From 2cbf261032dc8aca56c846971c090c991ac594a6 Mon Sep 17 00:00:00 2001 From: Dawid Weiss Date: Tue, 5 Jan 2021 13:44:05 +0100 Subject: [PATCH] LUCENE-9570: code reformatting [final]. --- gradle/generation/javacc.gradle | 17 +- gradle/validation/spotless.gradle | 107 +- lucene/CHANGES.txt | 4 + .../org/apache/lucene/search/HitQueue.java | 7 +- .../apache/lucene/store/NIOFSDirectory.java | 8 +- .../grouping/AllGroupHeadsCollector.java | 84 +- .../search/grouping/AllGroupsCollector.java | 23 +- .../grouping/BlockGroupingCollector.java | 193 +- .../search/grouping/CollectedSearchGroup.java | 10 +- .../grouping/DistinctValuesCollector.java | 27 +- .../lucene/search/grouping/DoubleRange.java | 10 +- .../search/grouping/DoubleRangeFactory.java | 20 +- .../grouping/DoubleRangeGroupSelector.java | 16 +- .../grouping/FirstPassGroupingCollector.java | 100 +- .../lucene/search/grouping/GroupDocs.java | 40 +- .../search/grouping/GroupFacetCollector.java | 93 +- .../lucene/search/grouping/GroupReducer.java | 35 +- .../lucene/search/grouping/GroupSelector.java | 39 +- .../search/grouping/GroupingSearch.java | 135 +- .../lucene/search/grouping/LongRange.java | 9 +- .../search/grouping/LongRangeFactory.java | 20 +- .../grouping/LongRangeGroupSelector.java | 16 +- .../lucene/search/grouping/SearchGroup.java | 103 +- .../grouping/SecondPassGroupingCollector.java | 28 +- .../grouping/TermGroupFacetCollector.java | 113 +- .../search/grouping/TermGroupSelector.java | 25 +- .../lucene/search/grouping/TopGroups.java | 176 +- .../search/grouping/TopGroupsCollector.java | 106 +- .../grouping/ValueSourceGroupSelector.java | 13 +- .../lucene/search/grouping/package-info.java | 187 +- .../grouping/AbstractGroupingTestCase.java | 17 +- .../grouping/BaseGroupSelectorTestCase.java | 128 +- .../grouping/TestAllGroupHeadsCollector.java | 188 +- .../grouping/TestAllGroupsCollector.java | 14 +- .../search/grouping/TestBlockGrouping.java | 60 +- .../grouping/TestDistinctValuesCollector.java | 176 +- .../grouping/TestDoubleRangeFactory.java | 2 - .../TestDoubleRangeGroupSelector.java | 6 +- .../grouping/TestGroupFacetCollector.java | 255 +- .../lucene/search/grouping/TestGrouping.java | 796 +++-- .../search/grouping/TestGroupingSearch.java | 38 +- .../search/grouping/TestLongRangeFactory.java | 2 - .../grouping/TestLongRangeGroupSelector.java | 4 +- .../grouping/TestTermGroupSelector.java | 2 +- .../lucene/search/grouping/TestTopGroups.java | 145 +- .../TestValueSourceGroupSelector.java | 1 - .../lucene/luke/app/AbstractHandler.java | 2 - .../lucene/luke/app/DirectoryHandler.java | 5 +- .../lucene/luke/app/DirectoryObserver.java | 1 - .../apache/lucene/luke/app/IndexHandler.java | 18 +- .../apache/lucene/luke/app/IndexObserver.java | 1 - .../org/apache/lucene/luke/app/LukeState.java | 5 +- .../org/apache/lucene/luke/app/Observer.java | 3 +- .../lucene/luke/app/desktop/LukeMain.java | 38 +- .../luke/app/desktop/MessageBroker.java | 1 - .../lucene/luke/app/desktop/Preferences.java | 8 +- .../luke/app/desktop/PreferencesFactory.java | 3 +- .../luke/app/desktop/PreferencesImpl.java | 14 +- .../components/AnalysisPanelProvider.java | 185 +- .../components/AnalysisTabOperator.java | 2 - .../components/CommitsPanelProvider.java | 204 +- .../components/ComponentOperatorRegistry.java | 4 +- .../components/DocumentsPanelProvider.java | 345 ++- .../components/DocumentsTabOperator.java | 2 +- .../desktop/components/LogsPanelProvider.java | 6 +- .../components/LukeWindowProvider.java | 33 +- .../desktop/components/MenuBarProvider.java | 45 +- .../components/OverviewPanelProvider.java | 172 +- .../components/SearchPanelProvider.java | 340 ++- .../desktop/components/TabSwitcherProxy.java | 1 - .../components/TabbedPaneProvider.java | 10 +- .../desktop/components/TableColumnInfo.java | 1 - .../desktop/components/TableModelBase.java | 9 +- .../dialog/ConfirmDialogFactory.java | 23 +- .../components/dialog/HelpDialogFactory.java | 15 +- .../analysis/AnalysisChainDialogFactory.java | 48 +- .../analysis/EditFiltersDialogFactory.java | 140 +- .../dialog/analysis/EditFiltersMode.java | 3 +- .../analysis/EditParamsDialogFactory.java | 106 +- .../dialog/analysis/EditParamsMode.java | 4 +- .../analysis/TokenAttributeDialogFactory.java | 40 +- .../dialog/analysis/package-info.java | 2 +- .../documents/AddDocumentDialogFactory.java | 254 +- .../documents/AddDocumentDialogOperator.java | 1 - .../documents/DocValuesDialogFactory.java | 64 +- .../documents/IndexOptionsDialogFactory.java | 27 +- .../documents/StoredValueDialogFactory.java | 34 +- .../documents/TermVectorDialogFactory.java | 63 +- .../dialog/documents/package-info.java | 2 +- .../dialog/menubar/AboutDialogFactory.java | 102 +- .../menubar/CheckIndexDialogFactory.java | 181 +- .../menubar/CreateIndexDialogFactory.java | 154 +- .../menubar/ExportTermsDialogFactory.java | 125 +- .../menubar/OpenIndexDialogFactory.java | 76 +- .../menubar/OptimizeIndexDialogFactory.java | 95 +- .../dialog/menubar/package-info.java | 2 +- .../components/dialog/package-info.java | 2 +- .../dialog/search/ExplainDialogFactory.java | 38 +- .../dialog/search/package-info.java | 2 +- .../analysis/CustomAnalyzerPanelOperator.java | 1 - .../analysis/CustomAnalyzerPanelProvider.java | 267 +- .../analysis/PresetAnalyzerPanelOperator.java | 1 - .../analysis/PresetAnalyzerPanelProvider.java | 20 +- .../SimpleAnalyzeResultPanelOperator.java | 4 +- .../SimpleAnalyzeResultPanelProvider.java | 55 +- .../StepByStepAnalyzeResultPanelOperator.java | 3 +- .../StepByStepAnalyzeResultPanelProvider.java | 99 +- .../fragments/analysis/package-info.java | 2 +- .../components/fragments/package-info.java | 2 +- .../search/AnalyzerPaneProvider.java | 43 +- .../fragments/search/AnalyzerTabOperator.java | 1 - .../search/FieldValuesPaneProvider.java | 37 +- .../search/FieldValuesTabOperator.java | 1 - .../fragments/search/MLTPaneProvider.java | 59 +- .../fragments/search/MLTTabOperator.java | 1 - .../search/QueryParserPaneProvider.java | 109 +- .../search/QueryParserTabOperator.java | 2 - .../search/SimilarityPaneProvider.java | 18 +- .../fragments/search/SortPaneProvider.java | 75 +- .../fragments/search/SortTabOperator.java | 2 - .../fragments/search/package-info.java | 2 +- .../app/desktop/components/package-info.java | 2 +- .../app/desktop/dto/documents/NewField.java | 5 +- .../desktop/dto/documents/package-info.java | 2 +- .../lucene/luke/app/desktop/package-info.java | 2 +- .../luke/app/desktop/util/DialogOpener.java | 17 +- .../app/desktop/util/ExceptionHandler.java | 2 - .../luke/app/desktop/util/FontUtils.java | 13 +- .../app/desktop/util/HelpHeaderRenderer.java | 67 +- .../luke/app/desktop/util/ImageUtils.java | 9 +- .../luke/app/desktop/util/ListUtils.java | 8 +- .../luke/app/desktop/util/MessageUtils.java | 37 +- .../luke/app/desktop/util/NumericUtils.java | 31 +- .../luke/app/desktop/util/StringUtils.java | 3 +- .../luke/app/desktop/util/StyleConstants.java | 4 +- .../luke/app/desktop/util/TabUtils.java | 12 +- .../luke/app/desktop/util/TableUtils.java | 27 +- .../app/desktop/util/TextAreaAppender.java | 28 +- .../app/desktop/util/TextAreaPrintStream.java | 2 +- .../luke/app/desktop/util/URLLabel.java | 16 +- .../app/desktop/util/inifile/IniFile.java | 1 - .../desktop/util/inifile/IniFileReader.java | 1 - .../desktop/util/inifile/IniFileWriter.java | 1 - .../app/desktop/util/inifile/OptionMap.java | 1 - .../desktop/util/inifile/SimpleIniFile.java | 4 +- .../util/inifile/SimpleIniFileReader.java | 37 +- .../desktop/util/inifile/package-info.java | 2 +- .../app/desktop/util/lang/package-info.java | 2 +- .../luke/app/desktop/util/package-info.java | 2 +- .../apache/lucene/luke/app/package-info.java | 2 +- .../lucene/luke/models/LukeException.java | 1 - .../apache/lucene/luke/models/LukeModel.java | 7 +- .../lucene/luke/models/analysis/Analysis.java | 70 +- .../luke/models/analysis/AnalysisFactory.java | 1 - .../luke/models/analysis/AnalysisImpl.java | 86 +- .../models/analysis/CustomAnalyzerConfig.java | 20 +- .../luke/models/analysis/package-info.java | 2 +- .../lucene/luke/models/commits/Commit.java | 8 +- .../lucene/luke/models/commits/Commits.java | 12 +- .../luke/models/commits/CommitsFactory.java | 1 - .../luke/models/commits/CommitsImpl.java | 39 +- .../lucene/luke/models/commits/File.java | 7 +- .../lucene/luke/models/commits/Segment.java | 8 +- .../luke/models/commits/package-info.java | 2 +- .../luke/models/documents/DocValues.java | 35 +- .../models/documents/DocValuesAdapter.java | 58 +- .../luke/models/documents/DocumentField.java | 34 +- .../luke/models/documents/Documents.java | 58 +- .../models/documents/DocumentsFactory.java | 1 - .../luke/models/documents/DocumentsImpl.java | 48 +- .../luke/models/documents/TermPosting.java | 24 +- .../models/documents/TermVectorEntry.java | 68 +- .../models/documents/TermVectorsAdapter.java | 10 +- .../luke/models/documents/package-info.java | 2 +- .../lucene/luke/models/overview/Overview.java | 57 +- .../luke/models/overview/OverviewFactory.java | 1 - .../luke/models/overview/OverviewImpl.java | 18 +- .../luke/models/overview/TermCounts.java | 17 +- .../luke/models/overview/TermCountsOrder.java | 20 +- .../luke/models/overview/TermStats.java | 34 +- .../lucene/luke/models/overview/TopTerms.java | 13 +- .../luke/models/overview/package-info.java | 2 +- .../lucene/luke/models/package-info.java | 2 +- .../lucene/luke/models/search/MLTConfig.java | 6 +- .../luke/models/search/QueryParserConfig.java | 63 +- .../lucene/luke/models/search/Search.java | 47 +- .../luke/models/search/SearchFactory.java | 1 - .../lucene/luke/models/search/SearchImpl.java | 114 +- .../luke/models/search/SearchResults.java | 58 +- .../luke/models/search/SimilarityConfig.java | 26 +- .../luke/models/search/package-info.java | 2 +- .../lucene/luke/models/tools/IndexTools.java | 15 +- .../luke/models/tools/IndexToolsFactory.java | 1 - .../luke/models/tools/IndexToolsImpl.java | 23 +- .../luke/models/tools/package-info.java | 2 +- .../lucene/luke/models/util/IndexUtils.java | 93 +- .../lucene/luke/models/util/package-info.java | 2 +- .../models/util/twentynewsgroups/Message.java | 8 +- .../twentynewsgroups/MessageFilesParser.java | 12 +- .../util/twentynewsgroups/package-info.java | 2 +- .../org/apache/lucene/luke/package-info.java | 2 +- .../lucene/luke/util/BytesRefUtils.java | 7 +- .../lucene/luke/util/LoggerFactory.java | 37 +- .../apache/lucene/luke/util/package-info.java | 2 +- .../luke/util/reflection/ClassScanner.java | 10 +- .../util/reflection/SubtypeCollector.java | 8 +- .../luke/util/reflection/package-info.java | 2 +- .../util/inifile/SimpleIniFileTest.java | 1 - .../models/analysis/AnalysisImplTest.java | 60 +- .../luke/models/commits/CommitsImplTest.java | 10 +- .../documents/DocValuesAdapterTest.java | 16 +- .../models/documents/DocumentsImplTest.java | 7 +- .../models/documents/DocumentsTestBase.java | 34 +- .../documents/TermVectorsAdapterTest.java | 4 +- .../models/overview/OverviewImplTest.java | 8 +- .../models/overview/OverviewTestBase.java | 4 +- .../luke/models/overview/TermCountsTest.java | 4 +- .../luke/models/overview/TopTermsTest.java | 2 - .../luke/models/search/SearchImplTest.java | 111 +- .../lucene/misc/CollectorMemoryTracker.java | 5 +- .../org/apache/lucene/misc/GetTermInfo.java | 38 +- .../org/apache/lucene/misc/HighFreqTerms.java | 88 +- .../apache/lucene/misc/IndexMergeTool.java | 42 +- .../lucene/misc/SweetSpotSimilarity.java | 165 +- .../org/apache/lucene/misc/TermStats.java | 18 +- .../lucene/misc/document/LazyDocument.java | 63 +- .../lucene/misc/document/package-info.java | 2 +- .../lucene/misc/index/IndexSplitter.java | 65 +- .../misc/index/MultiPassIndexSplitter.java | 71 +- .../lucene/misc/index/PKIndexSplitter.java | 82 +- .../lucene/misc/index/package-info.java | 2 +- .../org/apache/lucene/misc/package-info.java | 4 +- .../search/DiversifiedTopDocsCollector.java | 81 +- .../lucene/misc/search/DocValuesStats.java | 43 +- .../misc/search/DocValuesStatsCollector.java | 15 +- .../MemoryAccountingBitsetCollector.java | 1 - .../lucene/misc/search/package-info.java | 2 +- .../similarity/LegacyBM25Similarity.java | 31 +- .../misc/search/similarity/package-info.java | 2 +- .../store/HardlinkCopyDirectoryWrapper.java | 78 +- .../lucene/misc/store/NativePosixUtil.java | 39 +- .../misc/store/NativeUnixDirectory.java | 180 +- .../lucene/misc/store/RAFDirectory.java | 95 +- .../lucene/misc/store/WindowsDirectory.java | 66 +- .../lucene/misc/store/package-info.java | 2 +- .../lucene/misc/util/MemoryTracker.java | 5 +- .../lucene/misc/util/fst/ListOfOutputs.java | 66 +- .../util/fst/UpToTwoPositiveIntOutputs.java | 48 +- .../lucene/misc/util/fst/package-info.java | 2 +- .../apache/lucene/misc/util/package-info.java | 5 +- .../lucene/misc/SweetSpotSimilarityTest.java | 213 +- .../apache/lucene/misc/TestHighFreqTerms.java | 203 +- .../lucene/misc/TestIndexMergeTool.java | 40 +- .../misc/document/TestLazyDocument.java | 80 +- .../lucene/misc/index/TestIndexSplitter.java | 31 +- .../index/TestMultiPassIndexSplitter.java | 44 +- .../misc/index/TestPKIndexSplitter.java | 67 +- .../TestDiversifiedTopDocsCollector.java | 298 +- .../search/TestDocValuesStatsCollector.java | 47 +- .../TestMemoryAccountingBitsetCollector.java | 14 +- .../similarity/TestLegacyBM25Similarity.java | 64 +- .../misc/store/NativeLibEnableRule.java | 9 +- .../misc/store/NativeUnixDirectoryTest.java | 20 +- .../TestHardLinkCopyDirectoryWrapper.java | 30 +- .../lucene/misc/store/TestRAFDirectory.java | 12 +- .../misc/store/WindowsDirectoryTest.java | 11 +- .../misc/util/TestCollectorMemoryTracker.java | 22 +- .../lucene/misc/util/fst/TestFSTsMisc.java | 31 +- .../lucene/monitor/CandidateMatcher.java | 60 +- .../lucene/monitor/CollectingMatcher.java | 9 +- .../lucene/monitor/ConcurrentQueryLoader.java | 59 +- .../lucene/monitor/CustomQueryHandler.java | 15 +- .../apache/lucene/monitor/DocumentBatch.java | 11 +- .../lucene/monitor/ExplainingMatch.java | 54 +- .../monitor/ForceNoBulkScoringQuery.java | 12 +- .../lucene/monitor/HighlightsMatch.java | 146 +- .../apache/lucene/monitor/MatcherFactory.java | 5 +- .../lucene/monitor/MatchingQueries.java | 33 +- .../org/apache/lucene/monitor/Monitor.java | 159 +- .../lucene/monitor/MonitorConfiguration.java | 30 +- .../apache/lucene/monitor/MonitorQuery.java | 44 +- .../monitor/MonitorQuerySerializer.java | 22 +- .../lucene/monitor/MonitorUpdateListener.java | 40 +- .../lucene/monitor/MultiMatchingQueries.java | 37 +- .../MultipassTermFilteredPresearcher.java | 77 +- .../lucene/monitor/ParallelMatcher.java | 64 +- .../lucene/monitor/PartitionMatcher.java | 58 +- .../apache/lucene/monitor/Presearcher.java | 41 +- .../lucene/monitor/PresearcherMatch.java | 12 +- .../lucene/monitor/PresearcherMatches.java | 12 +- .../apache/lucene/monitor/QueryAnalyzer.java | 41 +- .../lucene/monitor/QueryCacheEntry.java | 21 +- .../lucene/monitor/QueryDecomposer.java | 24 +- .../org/apache/lucene/monitor/QueryIndex.java | 86 +- .../org/apache/lucene/monitor/QueryMatch.java | 30 +- .../lucene/monitor/QueryTimeListener.java | 20 +- .../org/apache/lucene/monitor/QueryTree.java | 77 +- .../lucene/monitor/RegexpQueryHandler.java | 44 +- .../apache/lucene/monitor/ScoringMatch.java | 15 +- .../org/apache/lucene/monitor/SlowLog.java | 22 +- .../monitor/SuffixingNGramTokenFilter.java | 29 +- .../monitor/TermFilteredPresearcher.java | 126 +- .../apache/lucene/monitor/TermWeightor.java | 48 +- .../lucene/monitor/TermsEnumTokenStream.java | 8 +- .../apache/lucene/monitor/package-info.java | 127 +- .../monitor/ConcurrentMatcherTestBase.java | 23 +- ...eldFilterPresearcherComponentTestBase.java | 22 +- .../lucene/monitor/MonitorTestBase.java | 1 - .../lucene/monitor/PresearcherTestBase.java | 51 +- .../monitor/TestBooleanClauseWeightings.java | 36 +- .../monitor/TestBooleanTermExtractor.java | 16 +- .../lucene/monitor/TestCachePurging.java | 74 +- .../monitor/TestConcurrentQueryLoader.java | 1 - .../lucene/monitor/TestDocumentBatch.java | 17 +- .../lucene/monitor/TestExplainingMatcher.java | 4 +- .../apache/lucene/monitor/TestExtractors.java | 26 +- ...TestFieldFilteredMultipassPresearcher.java | 4 +- .../TestFieldTermFilteredPresearcher.java | 3 +- .../monitor/TestForceNoBulkScoringQuery.java | 11 +- .../monitor/TestHighlightingMatcher.java | 307 +- .../monitor/TestMatchAllPresearcher.java | 1 - .../apache/lucene/monitor/TestMonitor.java | 103 +- .../monitor/TestMonitorErrorHandling.java | 15 +- .../monitor/TestMonitorPersistence.java | 29 +- .../monitor/TestMultipassPresearcher.java | 67 +- .../lucene/monitor/TestParallelMatcher.java | 3 +- .../lucene/monitor/TestPartitionMatcher.java | 4 +- .../TestPresearcherMatchCollector.java | 6 +- .../lucene/monitor/TestQueryAnalyzer.java | 72 +- .../lucene/monitor/TestQueryDecomposer.java | 44 +- .../monitor/TestQueryTermComparators.java | 17 +- .../lucene/monitor/TestQueryTermFilter.java | 12 +- .../monitor/TestRegexpQueryHandler.java | 68 +- .../lucene/monitor/TestSimilarities.java | 19 +- .../lucene/monitor/TestSimpleMatcher.java | 4 +- .../lucene/monitor/TestSpanExtractors.java | 66 +- .../monitor/TestSuffixingNGramTokenizer.java | 61 +- .../lucene/monitor/TestTermPresearcher.java | 62 +- .../monitor/TestTermsEnumTokenFilter.java | 23 +- .../monitor/TestWildcardTermPresearcher.java | 36 +- .../queryparser/charstream/CharStream.java | 77 +- .../charstream/FastCharStream.java | 43 +- .../queryparser/charstream/package-info.java | 4 +- .../classic/MultiFieldQueryParser.java | 193 +- .../queryparser/classic/QueryParserBase.java | 556 ++-- .../queryparser/classic/package-info.java | 481 ++-- .../ComplexPhraseQueryParser.java | 157 +- .../complexPhrase/package-info.java | 7 +- .../ext/ExtendableQueryParser.java | 102 +- .../queryparser/ext/ExtensionQuery.java | 23 +- .../lucene/queryparser/ext/Extensions.java | 184 +- .../queryparser/ext/ParserExtension.java | 35 +- .../lucene/queryparser/ext/package-info.java | 8 +- .../flexible/core/QueryNodeError.java | 23 +- .../flexible/core/QueryNodeException.java | 20 +- .../core/QueryNodeParseException.java | 46 +- .../flexible/core/QueryParserHelper.java | 163 +- .../flexible/core/builders/QueryBuilder.java | 12 +- .../core/builders/QueryTreeBuilder.java | 99 +- .../flexible/core/builders/package-info.java | 16 +- .../core/config/AbstractQueryConfig.java | 60 +- .../core/config/ConfigurationKey.java | 17 +- .../flexible/core/config/FieldConfig.java | 19 +- .../core/config/FieldConfigListener.java | 14 +- .../core/config/QueryConfigHandler.java | 54 +- .../flexible/core/config/package-info.java | 28 +- .../core/messages/QueryParserMessages.java | 5 +- .../flexible/core/messages/package-info.java | 9 +- .../flexible/core/nodes/AndQueryNode.java | 31 +- .../flexible/core/nodes/AnyQueryNode.java | 49 +- .../flexible/core/nodes/BooleanQueryNode.java | 28 +- .../flexible/core/nodes/BoostQueryNode.java | 48 +- .../flexible/core/nodes/DeletedQueryNode.java | 8 +- .../flexible/core/nodes/FieldQueryNode.java | 85 +- .../core/nodes/FieldValuePairQueryNode.java | 12 +- .../flexible/core/nodes/FieldableNode.java | 19 +- .../flexible/core/nodes/FuzzyQueryNode.java | 43 +- .../flexible/core/nodes/GroupQueryNode.java | 24 +- .../core/nodes/MatchAllDocsQueryNode.java | 4 +- .../core/nodes/MatchNoDocsQueryNode.java | 6 +- .../core/nodes/ModifierQueryNode.java | 92 +- .../core/nodes/NoTokenFoundQueryNode.java | 4 +- .../flexible/core/nodes/OpaqueQueryNode.java | 22 +- .../flexible/core/nodes/OrQueryNode.java | 30 +- .../flexible/core/nodes/PathQueryNode.java | 87 +- .../core/nodes/PhraseSlopQueryNode.java | 40 +- .../core/nodes/ProximityQueryNode.java | 140 +- .../flexible/core/nodes/QueryNode.java | 47 +- .../flexible/core/nodes/QueryNodeImpl.java | 68 +- .../core/nodes/QuotedFieldQueryNode.java | 32 +- .../flexible/core/nodes/RangeQueryNode.java | 15 +- .../flexible/core/nodes/SlopQueryNode.java | 46 +- .../core/nodes/TextableQueryNode.java | 5 +- .../core/nodes/TokenizedPhraseQueryNode.java | 18 +- .../flexible/core/nodes/ValueQueryNode.java | 10 +- .../flexible/core/nodes/package-info.java | 108 +- .../flexible/core/package-info.java | 54 +- .../core/parser/EscapeQuerySyntax.java | 20 +- .../flexible/core/parser/SyntaxParser.java | 13 +- .../flexible/core/parser/package-info.java | 29 +- ...NoChildOptimizationQueryNodeProcessor.java | 31 +- .../core/processors/QueryNodeProcessor.java | 52 +- .../processors/QueryNodeProcessorImpl.java | 119 +- .../QueryNodeProcessorPipeline.java | 160 +- .../RemoveDeletedQueryNodesProcessor.java | 27 +- .../core/processors/package-info.java | 42 +- .../core/util/QueryNodeOperation.java | 86 +- .../flexible/core/util/StringUtils.java | 12 +- .../core/util/UnescapedCharSequence.java | 45 +- .../flexible/core/util/package-info.java | 10 +- .../flexible/messages/Message.java | 5 +- .../flexible/messages/MessageImpl.java | 6 +- .../queryparser/flexible/messages/NLS.java | 69 +- .../flexible/messages/NLSException.java | 14 +- .../flexible/messages/package-info.java | 73 +- .../precedence/PrecedenceQueryParser.java | 38 +- .../flexible/precedence/package-info.java | 22 +- .../BooleanModifiersQueryNodeProcessor.java | 45 +- .../PrecedenceQueryNodeProcessorPipeline.java | 34 +- .../precedence/processors/package-info.java | 38 +- .../CommonQueryParserConfiguration.java | 133 +- .../flexible/standard/QueryParserUtil.java | 119 +- .../standard/StandardQueryParser.java | 347 +-- .../builders/AnyQueryNodeBuilder.java | 22 +- .../builders/BooleanQueryNodeBuilder.java | 48 +- .../builders/BoostQueryNodeBuilder.java | 12 +- .../builders/DummyQueryNodeBuilder.java | 15 +- .../builders/FieldQueryNodeBuilder.java | 9 +- .../builders/FuzzyQueryNodeBuilder.java | 19 +- .../builders/GroupQueryNodeBuilder.java | 10 +- .../MatchAllDocsQueryNodeBuilder.java | 18 +- .../builders/MatchNoDocsQueryNodeBuilder.java | 16 +- .../builders/ModifierQueryNodeBuilder.java | 10 +- .../builders/MultiPhraseQueryNodeBuilder.java | 18 +- .../builders/PhraseQueryNodeBuilder.java | 12 +- .../builders/PointRangeQueryNodeBuilder.java | 29 +- .../PrefixWildcardQueryNodeBuilder.java | 18 +- .../builders/RegexpQueryNodeBuilder.java | 13 +- .../builders/SlopQueryNodeBuilder.java | 19 +- .../builders/StandardQueryBuilder.java | 7 +- .../builders/StandardQueryTreeBuilder.java | 24 +- .../builders/TermRangeQueryNodeBuilder.java | 38 +- .../builders/WildcardQueryNodeBuilder.java | 18 +- .../standard/builders/package-info.java | 23 +- .../config/FieldBoostMapFCListener.java | 18 +- .../config/FieldDateResolutionFCListener.java | 20 +- .../flexible/standard/config/FuzzyConfig.java | 9 +- .../standard/config/NumberDateFormat.java | 34 +- .../standard/config/PointsConfig.java | 67 +- .../standard/config/PointsConfigListener.java | 28 +- .../config/StandardQueryConfigHandler.java | 157 +- .../standard/config/package-info.java | 20 +- .../nodes/AbstractRangeQueryNode.java | 115 +- .../standard/nodes/BooleanModifierNode.java | 9 +- .../standard/nodes/MultiPhraseQueryNode.java | 20 +- .../standard/nodes/PointQueryNode.java | 78 +- .../standard/nodes/PointRangeQueryNode.java | 98 +- .../nodes/PrefixWildcardQueryNode.java | 25 +- .../standard/nodes/RegexpQueryNode.java | 32 +- .../standard/nodes/SynonymQueryNode.java | 1 - .../standard/nodes/TermRangeQueryNode.java | 25 +- .../standard/nodes/WildcardQueryNode.java | 20 +- .../flexible/standard/nodes/package-info.java | 13 +- .../flexible/standard/package-info.java | 31 +- .../parser/EscapeQuerySyntaxImpl.java | 131 +- .../standard/parser/package-info.java | 17 +- .../AllowLeadingWildcardProcessor.java | 42 +- .../AnalyzerQueryNodeProcessor.java | 153 +- .../BooleanQuery2ModifierNodeProcessor.java | 112 +- ...leChildOptimizationQueryNodeProcessor.java | 25 +- .../processors/BoostQueryNodeProcessor.java | 25 +- .../DefaultPhraseSlopQueryNodeProcessor.java | 34 +- .../processors/FuzzyQueryNodeProcessor.java | 28 +- .../MatchAllDocsQueryNodeProcessor.java | 19 +- .../MultiFieldQueryNodeProcessor.java | 26 +- .../MultiTermRewriteMethodProcessor.java | 17 +- .../OpenRangeQueryNodeProcessor.java | 42 +- .../PhraseSlopQueryNodeProcessor.java | 17 +- .../processors/PointQueryNodeProcessor.java | 92 +- .../PointRangeQueryNodeProcessor.java | 56 +- .../processors/RegexpQueryNodeProcessor.java | 5 +- .../RemoveEmptyNonLeafQueryNodeProcessor.java | 28 +- .../StandardQueryNodeProcessorPipeline.java | 25 +- .../TermRangeQueryNodeProcessor.java | 85 +- .../WildcardQueryNodeProcessor.java | 85 +- .../standard/processors/package-info.java | 18 +- .../queryparser/simple/SimpleQueryParser.java | 200 +- .../queryparser/simple/package-info.java | 6 +- .../surround/parser/package-info.java | 9 +- .../queryparser/surround/query/AndQuery.java | 22 +- .../surround/query/BasicQueryFactory.java | 46 +- .../surround/query/ComposedQuery.java | 68 +- .../surround/query/DistanceQuery.java | 59 +- .../surround/query/DistanceRewriteQuery.java | 7 +- .../surround/query/DistanceSubQuery.java | 18 +- .../surround/query/FieldsQuery.java | 53 +- .../queryparser/surround/query/NotQuery.java | 34 +- .../queryparser/surround/query/OrQuery.java | 27 +- .../surround/query/RewriteQuery.java | 37 +- .../surround/query/SimpleTerm.java | 87 +- .../query/SimpleTermRewriteQuery.java | 35 +- .../surround/query/SpanNearClauseFactory.java | 97 +- .../surround/query/SrndBooleanQuery.java | 18 +- .../surround/query/SrndPrefixQuery.java | 51 +- .../queryparser/surround/query/SrndQuery.java | 75 +- .../surround/query/SrndTermQuery.java | 34 +- .../surround/query/SrndTruncQuery.java | 65 +- .../surround/query/TooManyBasicQueries.java | 12 +- .../surround/query/package-info.java | 21 +- .../lucene/queryparser/xml/CoreParser.java | 58 +- .../xml/CorePlusExtensionsParser.java | 10 +- .../xml/CorePlusQueriesParser.java | 9 +- .../lucene/queryparser/xml/DOMUtils.java | 74 +- .../queryparser/xml/ParserException.java | 5 +- .../lucene/queryparser/xml/QueryBuilder.java | 4 +- .../queryparser/xml/QueryBuilderFactory.java | 8 +- .../xml/builders/BooleanQueryBuilder.java | 15 +- .../xml/builders/BoostingTermBuilder.java | 14 +- .../builders/ConstantScoreQueryBuilder.java | 6 +- .../builders/DisjunctionMaxQueryBuilder.java | 12 +- .../builders/FuzzyLikeThisQueryBuilder.java | 8 +- .../xml/builders/LikeThisQueryBuilder.java | 41 +- .../builders/MatchAllDocsQueryBuilder.java | 11 +- .../xml/builders/PointRangeQueryBuilder.java | 22 +- .../xml/builders/RangeQueryBuilder.java | 8 +- .../xml/builders/SpanBuilderBase.java | 10 +- .../xml/builders/SpanFirstBuilder.java | 10 +- .../xml/builders/SpanNearBuilder.java | 13 +- .../xml/builders/SpanNotBuilder.java | 10 +- .../xml/builders/SpanOrBuilder.java | 13 +- .../xml/builders/SpanOrTermsBuilder.java | 22 +- .../builders/SpanPositionRangeBuilder.java | 5 +- .../xml/builders/SpanQueryBuilder.java | 7 +- .../xml/builders/SpanQueryBuilderFactory.java | 15 +- .../xml/builders/SpanTermBuilder.java | 10 +- .../xml/builders/TermQueryBuilder.java | 12 +- .../xml/builders/TermsQueryBuilder.java | 13 +- .../xml/builders/UserInputQueryBuilder.java | 20 +- .../xml/builders/package-info.java | 7 +- .../lucene/queryparser/xml/package-info.java | 7 +- .../classic/TestMultiAnalyzer.java | 144 +- .../classic/TestMultiFieldQueryParser.java | 188 +- .../classic/TestMultiPhraseQueryParsing.java | 41 +- .../queryparser/classic/TestQueryParser.java | 531 ++-- .../complexPhrase/TestComplexPhraseQuery.java | 84 +- .../lucene/queryparser/ext/ExtensionStub.java | 4 +- .../ext/TestExtendableQueryParser.java | 64 +- .../queryparser/ext/TestExtensions.java | 21 +- .../core/builders/TestQueryTreeBuilder.java | 24 +- .../flexible/core/nodes/TestQueryNode.java | 17 +- .../flexible/messages/TestNLS.java | 68 +- .../precedence/TestPrecedenceQueryParser.java | 180 +- .../spans/SpanOrQueryNodeBuilder.java | 7 +- .../spans/SpanTermQueryNodeBuilder.java | 11 +- .../spans/SpansQueryConfigHandler.java | 9 +- .../flexible/spans/SpansQueryTreeBuilder.java | 10 +- .../SpansValidatorQueryNodeProcessor.java | 29 +- .../flexible/spans/TestSpanQueryParser.java | 140 +- .../TestSpanQueryParserSimpleSample.java | 67 +- .../flexible/spans/UniqueFieldAttribute.java | 7 +- .../spans/UniqueFieldAttributeImpl.java | 17 +- .../spans/UniqueFieldQueryNodeProcessor.java | 20 +- .../standard/TestMultiAnalyzerQPHelper.java | 96 +- .../standard/TestMultiFieldQPHelper.java | 168 +- .../standard/TestPointQueryParser.java | 93 +- .../flexible/standard/TestQPHelper.java | 454 +-- .../flexible/standard/TestStandardQP.java | 109 +- .../simple/TestSimpleQueryParser.java | 70 +- .../surround/query/BooleanQueryTst.java | 27 +- .../surround/query/ExceptionQueryTst.java | 17 +- .../surround/query/SingleFieldTestDb.java | 29 +- .../surround/query/SrndQueryTest.java | 11 +- .../surround/query/Test01Exceptions.java | 4 - .../surround/query/Test02Boolean.java | 101 +- .../surround/query/Test03Distance.java | 227 +- .../queryparser/util/QueryParserTestBase.java | 743 ++--- .../xml/CoreParserTestIndexData.java | 20 +- .../queryparser/xml/TestCoreParser.java | 75 +- .../xml/TestCorePlusExtensionsParser.java | 3 +- .../xml/TestCorePlusQueriesParser.java | 1 - .../IndexAndTaxonomyReplicationHandler.java | 104 +- .../replicator/IndexAndTaxonomyRevision.java | 120 +- .../replicator/IndexInputInputStream.java | 26 +- .../replicator/IndexReplicationHandler.java | 166 +- .../lucene/replicator/IndexRevision.java | 69 +- .../lucene/replicator/LocalReplicator.java | 117 +- .../PerSessionDirectoryFactory.java | 17 +- .../lucene/replicator/ReplicationClient.java | 213 +- .../apache/lucene/replicator/Replicator.java | 75 +- .../apache/lucene/replicator/Revision.java | 55 +- .../lucene/replicator/RevisionFile.java | 20 +- .../replicator/SessionExpiredException.java | 25 +- .../lucene/replicator/SessionToken.java | 40 +- .../replicator/http/HttpClientBase.java | 140 +- .../replicator/http/HttpReplicator.java | 97 +- .../replicator/http/ReplicationService.java | 96 +- .../lucene/replicator/http/package-info.java | 8 +- .../apache/lucene/replicator/nrt/CopyJob.java | 97 +- .../lucene/replicator/nrt/CopyOneFile.java | 53 +- .../lucene/replicator/nrt/CopyState.java | 21 +- .../lucene/replicator/nrt/FileMetaData.java | 12 +- .../apache/lucene/replicator/nrt/Node.java | 130 +- .../nrt/NodeCommunicationException.java | 4 +- .../nrt/PreCopyMergedSegmentWarmer.java | 25 +- .../lucene/replicator/nrt/PrimaryNode.java | 131 +- .../replicator/nrt/ReplicaFileDeleter.java | 39 +- .../lucene/replicator/nrt/ReplicaNode.java | 473 +-- .../nrt/SegmentInfosSearcherManager.java | 45 +- .../lucene/replicator/nrt/package-info.java | 6 +- .../lucene/replicator/package-info.java | 61 +- .../lucene/replicator/ReplicatorTestCase.java | 73 +- ...TestIndexAndTaxonomyReplicationClient.java | 356 +-- .../TestIndexAndTaxonomyRevision.java | 38 +- .../TestIndexReplicationClient.java | 270 +- .../lucene/replicator/TestIndexRevision.java | 30 +- .../replicator/TestLocalReplicator.java | 100 +- .../lucene/replicator/TestSessionToken.java | 6 +- .../replicator/http/ReplicationServlet.java | 11 +- .../replicator/http/TestHttpReplicator.java | 62 +- .../apache/lucene/replicator/nrt/Jobs.java | 18 +- .../lucene/replicator/nrt/NodeProcess.java | 31 +- .../lucene/replicator/nrt/SimpleCopyJob.java | 104 +- .../replicator/nrt/SimplePrimaryNode.java | 435 +-- .../replicator/nrt/SimpleReplicaNode.java | 367 +-- .../lucene/replicator/nrt/SimpleServer.java | 161 +- .../lucene/replicator/nrt/SimpleTransLog.java | 87 +- .../replicator/nrt/TestNRTReplication.java | 187 +- .../nrt/TestStressNRTReplication.java | 506 ++-- .../lucene/replicator/nrt/ThreadPumper.java | 62 +- .../lucene/payloads/PayloadSpanCollector.java | 18 +- .../lucene/payloads/PayloadSpanUtil.java | 39 +- .../apache/lucene/payloads/package-info.java | 4 +- .../idversion/IDVersionPostingsFormat.java | 90 +- .../idversion/IDVersionPostingsReader.java | 34 +- .../idversion/IDVersionPostingsWriter.java | 55 +- .../idversion/IDVersionSegmentTermsEnum.java | 547 ++-- .../IDVersionSegmentTermsEnumFrame.java | 159 +- .../codecs/idversion/SingleDocsEnum.java | 3 +- .../codecs/idversion/SinglePostingsEnum.java | 2 +- .../VersionBlockTreeTermsReader.java | 126 +- .../VersionBlockTreeTermsWriter.java | 340 ++- .../codecs/idversion/VersionFieldReader.java | 62 +- .../codecs/idversion/package-info.java | 10 +- .../sandbox/document/BigIntegerPoint.java | 153 +- .../DoublePointMultiRangeBuilder.java | 11 +- .../document/FloatPointMultiRangeBuilder.java | 14 +- .../document/FloatPointNearestNeighbor.java | 136 +- .../sandbox/document/HalfFloatPoint.java | 192 +- .../document/IntPointMultiRangeBuilder.java | 10 +- .../sandbox/document/LatLonBoundingBox.java | 145 +- .../document/LongPointMultiRangeBuilder.java | 10 +- .../lucene/sandbox/document/package-info.java | 9 +- .../sandbox/queries/FuzzyLikeThisQuery.java | 277 +- .../lucene/sandbox/queries/package-info.java | 6 +- .../lucene/sandbox/search/BM25FQuery.java | 105 +- .../lucene/sandbox/search/CoveringQuery.java | 61 +- .../lucene/sandbox/search/CoveringScorer.java | 182 +- .../sandbox/search/DocValuesNumbersQuery.java | 75 +- .../sandbox/search/DocValuesTermsQuery.java | 161 +- ...xSortSortedNumericDocValuesRangeQuery.java | 88 +- .../search/LargeNumHitsTopDocsCollector.java | 35 +- .../search/LatLonPointPrototypeQueries.java | 54 +- .../lucene/sandbox/search/LongHashSet.java | 9 +- .../search/MultiNormsLeafSimScorer.java | 46 +- .../sandbox/search/MultiRangeQuery.java | 166 +- .../sandbox/search/NearestNeighbor.java | 208 +- .../sandbox/search/PhraseWildcardQuery.java | 402 +-- .../sandbox/search/TermAutomatonQuery.java | 161 +- .../sandbox/search/TermAutomatonScorer.java | 86 +- .../TokenStreamToTermAutomatonQuery.java | 29 +- .../lucene/sandbox/search/package-info.java | 7 +- .../lucene/payloads/TestPayloadSpanUtil.java | 19 +- .../idversion/StringAndPayloadField.java | 4 +- .../TestIDVersionPostingsFormat.java | 665 +++-- .../sandbox/document/TestBigIntegerPoint.java | 86 +- .../document/TestDoubleRangeField.java | 38 +- .../TestFloatPointNearestNeighbor.java | 98 +- .../sandbox/document/TestHalfFloatPoint.java | 43 +- .../queries/TestFuzzyLikeThisQuery.java | 22 +- .../sandbox/search/LongHashSetTests.java | 11 +- .../lucene/sandbox/search/TestBM25FQuery.java | 66 +- .../sandbox/search/TestCoveringQuery.java | 29 +- .../search/TestDocValuesNumbersQuery.java | 39 +- .../search/TestDocValuesTermsQuery.java | 29 +- .../search/TestFieldCacheTermsFilter.java | 36 +- ...xSortSortedNumericDocValuesRangeQuery.java | 51 +- .../TestLargeNumHitsTopDocsCollector.java | 36 +- .../search/TestLatLonBoundingBoxQueries.java | 100 +- .../sandbox/search/TestMultiRangeQueries.java | 66 +- .../lucene/sandbox/search/TestNearest.java | 120 +- .../search/TestPhraseWildcardQuery.java | 378 ++- .../search/TestTermAutomatonQuery.java | 209 +- .../apache/lucene/spatial/ShapeValues.java | 11 +- .../lucene/spatial/ShapeValuesSource.java | 10 +- .../lucene/spatial/SpatialStrategy.java | 115 +- .../bbox/BBoxOverlapRatioValueSource.java | 169 +- .../bbox/BBoxSimilarityValueSource.java | 60 +- .../lucene/spatial/bbox/BBoxStrategy.java | 227 +- .../lucene/spatial/bbox/BBoxValueSource.java | 15 +- .../lucene/spatial/bbox/package-info.java | 6 +- .../composite/CompositeSpatialStrategy.java | 64 +- .../composite/CompositeVerifyQuery.java | 32 +- .../composite/IntersectsRPTVerifyQuery.java | 103 +- .../spatial/composite/package-info.java | 2 +- .../apache/lucene/spatial/package-info.java | 2 +- .../prefix/AbstractPrefixTreeQuery.java | 33 +- .../AbstractVisitingPrefixTreeQuery.java | 242 +- .../prefix/BytesRefIteratorTokenStream.java | 6 +- .../prefix/CellToBytesRefIterator.java | 6 +- .../prefix/ContainsPrefixTreeQuery.java | 148 +- .../spatial/prefix/HeatmapFacetCounter.java | 251 +- .../prefix/IntersectsPrefixTreeQuery.java | 29 +- .../prefix/NumberRangePrefixTreeStrategy.java | 121 +- .../PointPrefixTreeFieldCacheProvider.java | 16 +- .../prefix/PrefixTreeFacetCounter.java | 128 +- .../spatial/prefix/PrefixTreeStrategy.java | 151 +- .../prefix/RecursivePrefixTreeStrategy.java | 107 +- .../prefix/TermQueryPrefixTreeStrategy.java | 52 +- .../spatial/prefix/WithinPrefixTreeQuery.java | 147 +- .../lucene/spatial/prefix/package-info.java | 4 +- .../lucene/spatial/prefix/tree/Cell.java | 91 +- .../spatial/prefix/tree/CellCanPrune.java | 9 +- .../spatial/prefix/tree/CellIterator.java | 26 +- .../prefix/tree/DateRangePrefixTree.java | 361 +-- .../prefix/tree/FilterCellIterator.java | 15 +- .../prefix/tree/GeohashPrefixTree.java | 58 +- .../spatial/prefix/tree/LegacyCell.java | 97 +- .../spatial/prefix/tree/LegacyPrefixTree.java | 39 +- .../prefix/tree/NumberRangePrefixTree.java | 526 ++-- .../prefix/tree/PackedQuadPrefixTree.java | 203 +- .../spatial/prefix/tree/QuadPrefixTree.java | 117 +- .../spatial/prefix/tree/S2PrefixTree.java | 221 +- .../spatial/prefix/tree/S2PrefixTreeCell.java | 448 +-- .../spatial/prefix/tree/S2ShapeFactory.java | 7 +- .../prefix/tree/SingletonCellIterator.java | 3 +- .../prefix/tree/SpatialPrefixTree.java | 61 +- .../prefix/tree/SpatialPrefixTreeFactory.java | 48 +- .../spatial/prefix/tree/TreeCellIterator.java | 53 +- .../spatial/prefix/tree/package-info.java | 19 +- .../lucene/spatial/query/SpatialArgs.java | 40 +- .../spatial/query/SpatialArgsParser.java | 50 +- .../spatial/query/SpatialOperation.java | 194 +- .../query/UnsupportedSpatialOperation.java | 4 +- .../lucene/spatial/query/package-info.java | 4 +- .../serialized/SerializedDVStrategy.java | 94 +- .../spatial/serialized/package-info.java | 6 +- .../spatial/spatial4j/Geo3dBinaryCodec.java | 28 +- .../spatial/spatial4j/Geo3dCircleShape.java | 24 +- .../spatial4j/Geo3dDistanceCalculator.java | 28 +- .../spatial/spatial4j/Geo3dPointShape.java | 10 +- .../spatial4j/Geo3dRectangleShape.java | 63 +- .../lucene/spatial/spatial4j/Geo3dShape.java | 36 +- .../spatial/spatial4j/Geo3dShapeFactory.java | 133 +- .../spatial4j/Geo3dSpatialContextFactory.java | 13 +- .../spatial/spatial4j/package-info.java | 2 +- .../util/CachingDoubleValueSource.java | 14 +- .../util/DistanceToShapeValueSource.java | 40 +- .../util/ReciprocalDoubleValuesSource.java | 22 +- .../spatial/util/ShapeAreaValueSource.java | 34 +- .../lucene/spatial/util/ShapeFieldCache.java | 26 +- .../ShapeFieldCacheDistanceValueSource.java | 58 +- .../spatial/util/ShapeFieldCacheProvider.java | 25 +- .../spatial/util/ShapeValuesPredicate.java | 25 +- .../lucene/spatial/util/package-info.java | 2 +- .../spatial/vector/DistanceValueSource.java | 49 +- .../spatial/vector/PointVectorStrategy.java | 154 +- .../lucene/spatial/vector/package-info.java | 4 +- .../apache/lucene/spatial/SpatialExample.java | 110 +- .../lucene/spatial/SpatialMatchConcern.java | 11 +- .../lucene/spatial/SpatialTestCase.java | 53 +- .../lucene/spatial/SpatialTestData.java | 21 +- .../lucene/spatial/SpatialTestQuery.java | 37 +- .../lucene/spatial/StrategyTestCase.java | 74 +- .../lucene/spatial/TestDistanceStrategy.java | 39 +- .../lucene/spatial/TestPortedSolr3.java | 69 +- .../spatial/TestQueryEqualsHashCode.java | 60 +- .../lucene/spatial/TestSpatialArgs.java | 9 +- .../lucene/spatial/TestTestFramework.java | 40 +- .../lucene/spatial/bbox/TestBBoxStrategy.java | 162 +- .../composite/TestCompositeStrategy.java | 51 +- .../RandomSpatialOpStrategyTestCase.java | 65 +- .../spatial/prefix/TestDateNRStrategy.java | 44 +- .../prefix/TestHeatmapFacetCounter.java | 137 +- .../lucene/spatial/prefix/TestJtsPolygon.java | 53 +- .../spatial/prefix/TestNumberRangeFacets.java | 82 +- .../TestRandomSpatialOpFuzzyPrefixTree.java | 307 +- .../TestRandomSpatialOpFuzzyPrefixTree50.java | 5 +- .../TestRecursivePrefixTreeStrategy.java | 50 +- .../TestTermQueryPrefixGridStrategy.java | 30 +- .../prefix/tree/TestDateRangePrefixTree.java | 240 +- .../spatial/prefix/tree/TestS2PrefixTree.java | 34 +- .../prefix/tree/TestSpatialPrefixTree.java | 32 +- .../spatial/query/TestSpatialArgsParser.java | 28 +- .../serialized/TestSerializedStrategy.java | 3 +- .../spatial4j/RandomizedShapeTestCase.java | 19 +- .../spatial4j/ShapeRectRelationTestCase.java | 42 +- .../lucene/spatial/spatial4j/TestGeo3d.java | 49 +- .../spatial/spatial4j/TestGeo3dRpt.java | 51 +- ...TestGeo3dShapeSphereModelRectRelation.java | 44 +- .../TestGeo3dShapeWGS84ModelRectRelation.java | 120 +- .../vector/TestPointVectorStrategy.java | 31 +- .../lucene/spatial3d/Geo3DDocValuesField.java | 313 +- .../apache/lucene/spatial3d/Geo3DPoint.java | 151 +- .../Geo3DPointDistanceComparator.java | 72 +- .../Geo3DPointOutsideDistanceComparator.java | 55 +- .../spatial3d/Geo3DPointOutsideSortField.java | 17 +- .../lucene/spatial3d/Geo3DPointSortField.java | 15 +- .../apache/lucene/spatial3d/Geo3DUtil.java | 162 +- .../spatial3d/PointInGeo3DShapeQuery.java | 35 +- .../PointInShapeIntersectVisitor.java | 108 +- .../lucene/spatial3d/geom/ArcDistance.java | 53 +- .../spatial3d/geom/BasePlanetObject.java | 30 +- .../lucene/spatial3d/geom/BaseXYZSolid.java | 91 +- .../apache/lucene/spatial3d/geom/Bounded.java | 7 +- .../apache/lucene/spatial3d/geom/Bounds.java | 185 +- .../lucene/spatial3d/geom/DistanceStyle.java | 110 +- .../apache/lucene/spatial3d/geom/GeoArea.java | 36 +- .../lucene/spatial3d/geom/GeoAreaFactory.java | 28 +- .../lucene/spatial3d/geom/GeoAreaShape.java | 16 +- .../apache/lucene/spatial3d/geom/GeoBBox.java | 6 +- .../lucene/spatial3d/geom/GeoBBoxFactory.java | 85 +- .../spatial3d/geom/GeoBaseAreaShape.java | 62 +- .../lucene/spatial3d/geom/GeoBaseBBox.java | 12 +- .../lucene/spatial3d/geom/GeoBaseCircle.java | 8 +- .../geom/GeoBaseCompositeAreaShape.java | 74 +- .../geom/GeoBaseCompositeMembershipShape.java | 21 +- .../spatial3d/geom/GeoBaseCompositeShape.java | 37 +- .../spatial3d/geom/GeoBaseDistanceShape.java | 38 +- .../geom/GeoBaseMembershipShape.java | 22 +- .../lucene/spatial3d/geom/GeoBasePath.java | 8 +- .../lucene/spatial3d/geom/GeoBasePolygon.java | 8 +- .../lucene/spatial3d/geom/GeoBaseShape.java | 15 +- .../lucene/spatial3d/geom/GeoCircle.java | 3 +- .../spatial3d/geom/GeoCircleFactory.java | 47 +- .../spatial3d/geom/GeoComplexPolygon.java | 1855 ++++++++---- .../spatial3d/geom/GeoCompositeAreaShape.java | 14 +- .../geom/GeoCompositeMembershipShape.java | 17 +- .../spatial3d/geom/GeoCompositePolygon.java | 20 +- .../spatial3d/geom/GeoConcavePolygon.java | 315 +- .../spatial3d/geom/GeoConvexPolygon.java | 320 ++- .../geom/GeoDegenerateHorizontalLine.java | 118 +- .../geom/GeoDegenerateLatitudeZone.java | 46 +- .../geom/GeoDegenerateLongitudeSlice.java | 68 +- .../spatial3d/geom/GeoDegeneratePath.java | 790 +++-- .../spatial3d/geom/GeoDegeneratePoint.java | 87 +- .../geom/GeoDegenerateVerticalLine.java | 125 +- .../lucene/spatial3d/geom/GeoDistance.java | 56 +- .../spatial3d/geom/GeoDistanceShape.java | 21 +- .../lucene/spatial3d/geom/GeoExactCircle.java | 314 +- .../spatial3d/geom/GeoLatitudeZone.java | 99 +- .../spatial3d/geom/GeoLongitudeSlice.java | 109 +- .../spatial3d/geom/GeoMembershipShape.java | 7 +- .../spatial3d/geom/GeoNorthLatitudeZone.java | 61 +- .../spatial3d/geom/GeoNorthRectangle.java | 196 +- .../spatial3d/geom/GeoOutsideDistance.java | 23 +- .../apache/lucene/spatial3d/geom/GeoPath.java | 37 +- .../lucene/spatial3d/geom/GeoPathFactory.java | 12 +- .../lucene/spatial3d/geom/GeoPoint.java | 228 +- .../lucene/spatial3d/geom/GeoPointShape.java | 7 +- .../spatial3d/geom/GeoPointShapeFactory.java | 11 +- .../lucene/spatial3d/geom/GeoPolygon.java | 4 +- .../spatial3d/geom/GeoPolygonFactory.java | 1435 +++++---- .../lucene/spatial3d/geom/GeoRectangle.java | 227 +- .../lucene/spatial3d/geom/GeoS2Shape.java | 348 +-- .../spatial3d/geom/GeoS2ShapeFactory.java | 20 +- .../lucene/spatial3d/geom/GeoShape.java | 39 +- .../lucene/spatial3d/geom/GeoSizeable.java | 4 +- .../spatial3d/geom/GeoSouthLatitudeZone.java | 51 +- .../spatial3d/geom/GeoSouthRectangle.java | 159 +- .../spatial3d/geom/GeoStandardCircle.java | 93 +- .../spatial3d/geom/GeoStandardPath.java | 1439 ++++++---- .../geom/GeoWideDegenerateHorizontalLine.java | 142 +- .../spatial3d/geom/GeoWideLongitudeSlice.java | 132 +- .../spatial3d/geom/GeoWideNorthRectangle.java | 178 +- .../spatial3d/geom/GeoWideRectangle.java | 221 +- .../spatial3d/geom/GeoWideSouthRectangle.java | 156 +- .../lucene/spatial3d/geom/GeoWorld.java | 33 +- .../lucene/spatial3d/geom/LatLonBounds.java | 181 +- .../lucene/spatial3d/geom/LinearDistance.java | 53 +- .../spatial3d/geom/LinearSquaredDistance.java | 53 +- .../lucene/spatial3d/geom/Membership.java | 1 - .../lucene/spatial3d/geom/NormalDistance.java | 53 +- .../spatial3d/geom/NormalSquaredDistance.java | 53 +- .../apache/lucene/spatial3d/geom/Plane.java | 1699 +++++++---- .../lucene/spatial3d/geom/PlanetModel.java | 410 ++- .../lucene/spatial3d/geom/PlanetObject.java | 1 - .../spatial3d/geom/SerializableObject.java | 279 +- .../lucene/spatial3d/geom/SidedPlane.java | 171 +- .../spatial3d/geom/StandardObjects.java | 92 +- .../spatial3d/geom/StandardXYZSolid.java | 460 ++- .../apache/lucene/spatial3d/geom/Tools.java | 14 +- .../apache/lucene/spatial3d/geom/Vector.java | 268 +- .../lucene/spatial3d/geom/XYZBounds.java | 219 +- .../lucene/spatial3d/geom/XYZSolid.java | 4 +- .../spatial3d/geom/XYZSolidFactory.java | 42 +- .../lucene/spatial3d/geom/XYdZSolid.java | 183 +- .../lucene/spatial3d/geom/XdYZSolid.java | 176 +- .../lucene/spatial3d/geom/XdYdZSolid.java | 86 +- .../lucene/spatial3d/geom/dXYZSolid.java | 191 +- .../lucene/spatial3d/geom/dXYdZSolid.java | 86 +- .../lucene/spatial3d/geom/dXdYZSolid.java | 86 +- .../lucene/spatial3d/geom/dXdYdZSolid.java | 90 +- .../lucene/spatial3d/geom/package-info.java | 2 +- .../apache/lucene/spatial3d/package-info.java | 3 +- .../lucene/spatial3d/TestGeo3DDocValues.java | 17 +- .../lucene/spatial3d/TestGeo3DPoint.java | 1420 +++++---- .../lucene/spatial3d/geom/Geo3DUtil.java | 6 +- .../geom/RandomGeo3dShapeGenerator.java | 559 ++-- .../TestCompositeGeoPolygonRelationships.java | 881 +++--- .../lucene/spatial3d/geom/TestGeoBBox.java | 245 +- .../lucene/spatial3d/geom/TestGeoCircle.java | 310 +- .../spatial3d/geom/TestGeoConvexPolygon.java | 12 +- .../spatial3d/geom/TestGeoExactCircle.java | 181 +- .../lucene/spatial3d/geom/TestGeoModel.java | 21 +- .../lucene/spatial3d/geom/TestGeoPath.java | 260 +- .../lucene/spatial3d/geom/TestGeoPoint.java | 59 +- .../lucene/spatial3d/geom/TestGeoPolygon.java | 2553 +++++++++++------ .../lucene/spatial3d/geom/TestPlane.java | 29 +- .../spatial3d/geom/TestRandomBinaryCodec.java | 13 +- .../spatial3d/geom/TestRandomGeoPolygon.java | 172 +- .../geom/TestRandomGeoShapeRelationship.java | 120 +- .../spatial3d/geom/TestRandomPlane.java | 58 +- .../TestSimpleGeoPolygonRelationships.java | 1127 ++++---- .../lucene/spatial3d/geom/TestXYZSolid.java | 32 +- .../search/spell/CombineSuggestion.java | 23 +- .../lucene/search/spell/Dictionary.java | 9 +- .../search/spell/DirectSpellChecker.java | 352 ++- .../search/spell/HighFrequencyDictionary.java | 33 +- .../search/spell/JaroWinklerDistance.java | 39 +- .../search/spell/LevenshteinDistance.java | 157 +- .../lucene/search/spell/LuceneDictionary.java | 14 +- .../spell/LuceneLevenshteinDistance.java | 71 +- .../lucene/search/spell/NGramDistance.java | 134 +- .../search/spell/PlainTextDictionary.java | 17 +- .../lucene/search/spell/SpellChecker.java | 292 +- .../lucene/search/spell/StringDistance.java | 14 +- .../lucene/search/spell/SuggestMode.java | 15 +- .../lucene/search/spell/SuggestWord.java | 30 +- .../spell/SuggestWordFrequencyComparator.java | 12 +- .../lucene/search/spell/SuggestWordQueue.java | 16 +- .../spell/SuggestWordScoreComparator.java | 13 +- .../search/spell/WordBreakSpellChecker.java | 334 +-- .../lucene/search/spell/package-info.java | 5 +- .../lucene/search/suggest/BitsProducer.java | 9 +- .../search/suggest/BufferedInputIterator.java | 8 +- .../search/suggest/DocumentDictionary.java | 99 +- .../DocumentValueSourceDictionary.java | 108 +- .../lucene/search/suggest/FileDictionary.java | 75 +- .../lucene/search/suggest/InMemorySorter.java | 9 +- .../lucene/search/suggest/InputIterator.java | 45 +- .../apache/lucene/search/suggest/Lookup.java | 156 +- .../search/suggest/SortedInputIterator.java | 147 +- .../search/suggest/UnsortedInputIterator.java | 19 +- .../analyzing/AnalyzingInfixSuggester.java | 548 ++-- .../suggest/analyzing/AnalyzingSuggester.java | 481 ++-- .../analyzing/BlendedInfixSuggester.java | 168 +- .../search/suggest/analyzing/FSTUtil.java | 64 +- .../suggest/analyzing/FreeTextSuggester.java | 459 +-- .../suggest/analyzing/FuzzySuggester.java | 220 +- .../suggest/analyzing/SuggestStopFilter.java | 23 +- .../analyzing/SuggestStopFilterFactory.java | 58 +- .../suggest/analyzing/package-info.java | 6 +- .../document/Completion50PostingsFormat.java | 19 +- .../document/Completion84PostingsFormat.java | 14 +- .../suggest/document/CompletionAnalyzer.java | 92 +- .../document/CompletionFieldsConsumer.java | 73 +- .../document/CompletionFieldsProducer.java | 66 +- .../document/CompletionPostingsFormat.java | 130 +- .../suggest/document/CompletionQuery.java | 57 +- .../suggest/document/CompletionScorer.java | 40 +- .../suggest/document/CompletionTerms.java | 26 +- .../document/CompletionTokenStream.java | 40 +- .../suggest/document/CompletionWeight.java | 35 +- .../document/CompletionsTermsReader.java | 21 +- .../search/suggest/document/ContextQuery.java | 149 +- .../suggest/document/ContextSuggestField.java | 65 +- .../document/FuzzyCompletionQuery.java | 138 +- .../search/suggest/document/NRTSuggester.java | 312 +- .../suggest/document/NRTSuggesterBuilder.java | 56 +- .../document/PrefixCompletionQuery.java | 30 +- .../document/RegexCompletionQuery.java | 53 +- .../search/suggest/document/SuggestField.java | 58 +- .../document/SuggestIndexSearcher.java | 33 +- .../SuggestScoreDocPriorityQueue.java | 13 +- .../suggest/document/TopSuggestDocs.java | 53 +- .../document/TopSuggestDocsCollector.java | 87 +- .../search/suggest/document/package-info.java | 4 +- .../search/suggest/fst/BytesRefSorter.java | 26 +- .../search/suggest/fst/ExternalRefSorter.java | 54 +- .../search/suggest/fst/FSTCompletion.java | 178 +- .../suggest/fst/FSTCompletionBuilder.java | 240 +- .../suggest/fst/FSTCompletionLookup.java | 179 +- .../suggest/fst/WFSTCompletionLookup.java | 135 +- .../search/suggest/fst/package-info.java | 6 +- .../search/suggest/jaspell/JaspellLookup.java | 45 +- .../jaspell/JaspellTernarySearchTrie.java | 654 ++--- .../search/suggest/jaspell/package-info.java | 8 +- .../lucene/search/suggest/package-info.java | 4 +- .../search/suggest/tst/TSTAutocomplete.java | 65 +- .../lucene/search/suggest/tst/TSTLookup.java | 120 +- .../search/suggest/tst/TernaryTreeNode.java | 25 +- .../search/suggest/tst/package-info.java | 6 +- .../search/spell/TestDirectSpellChecker.java | 149 +- .../search/spell/TestJaroWinklerDistance.java | 9 +- .../search/spell/TestLevenshteinDistance.java | 23 +- .../search/spell/TestLuceneDictionary.java | 56 +- .../search/spell/TestNGramDistance.java | 76 +- .../search/spell/TestPlainTextDictionary.java | 7 +- .../lucene/search/spell/TestSpellChecker.java | 279 +- .../spell/TestWordBreakSpellChecker.java | 313 +- .../apache/lucene/search/suggest/Average.java | 74 +- .../suggest/DocumentDictionaryTest.java | 148 +- .../DocumentValueSourceDictionaryTest.java | 136 +- .../search/suggest/FileDictionaryTest.java | 74 +- .../apache/lucene/search/suggest/Input.java | 28 +- .../search/suggest/InputArrayIterator.java | 10 +- .../search/suggest/LookupBenchmarkTest.java | 183 +- .../search/suggest/PersistenceTest.java | 48 +- .../search/suggest/TestInputIterator.java | 71 +- .../AnalyzingInfixSuggesterTest.java | 713 +++-- .../analyzing/AnalyzingSuggesterTest.java | 595 ++-- .../analyzing/BlendedInfixSuggesterTest.java | 298 +- .../suggest/analyzing/FuzzySuggesterTest.java | 610 ++-- .../analyzing/TestFreeTextSuggester.java | 370 ++- .../analyzing/TestSuggestStopFilter.java | 137 +- .../TestSuggestStopFilterFactory.java | 62 +- .../suggest/document/TestContextQuery.java | 189 +- .../document/TestContextSuggestField.java | 127 +- .../document/TestFuzzyCompletionQuery.java | 41 +- .../document/TestPrefixCompletionQuery.java | 196 +- .../document/TestRegexCompletionQuery.java | 50 +- .../suggest/document/TestSuggestField.java | 298 +- .../suggest/fst/BytesRefSortersTest.java | 15 +- .../search/suggest/fst/FSTCompletionTest.java | 157 +- .../suggest/fst/WFSTCompletionTest.java | 109 +- .../BaseTokenStreamFactoryTestCase.java | 120 +- .../analysis/BaseTokenStreamTestCase.java | 828 ++++-- .../analysis/CannedBinaryTokenStream.java | 16 +- .../lucene/analysis/CannedTokenStream.java | 14 +- .../lucene/analysis/CollationTestBase.java | 108 +- .../lucene/analysis/CrankyTokenFilter.java | 14 +- .../lucene/analysis/LookaheadTokenFilter.java | 117 +- .../apache/lucene/analysis/MockAnalyzer.java | 102 +- .../lucene/analysis/MockBytesAnalyzer.java | 14 +- .../lucene/analysis/MockCharFilter.java | 46 +- .../MockFixedLengthPayloadFilter.java | 5 +- .../lucene/analysis/MockGraphTokenFilter.java | 29 +- .../MockHoleInjectingTokenFilter.java | 10 +- .../lucene/analysis/MockLowerCaseFilter.java | 6 +- .../lucene/analysis/MockPayloadAnalyzer.java | 22 +- .../MockRandomLookaheadTokenFilter.java | 11 +- .../lucene/analysis/MockReaderWrapper.java | 9 +- .../lucene/analysis/MockSynonymAnalyzer.java | 1 - .../lucene/analysis/MockSynonymFilter.java | 5 +- .../lucene/analysis/MockTokenFilter.java | 74 +- .../apache/lucene/analysis/MockTokenizer.java | 166 +- .../analysis/MockUTF16TermAttributeImpl.java | 12 +- .../MockVariableLengthPayloadFilter.java | 5 +- .../lucene/analysis/SimplePayloadFilter.java | 1 - .../org/apache/lucene/analysis/Token.java | 112 +- .../lucene/analysis/TokenStreamToDot.java | 27 +- .../analysis/ValidatingTokenFilter.java | 72 +- .../lucene/analysis/VocabularyAssert.java | 34 +- .../lucene/codecs/MissingOrdRemapper.java | 21 +- .../codecs/asserting/AssertingCodec.java | 66 +- .../asserting/AssertingDocValuesFormat.java | 75 +- .../asserting/AssertingLiveDocsFormat.java | 15 +- .../asserting/AssertingNormsFormat.java | 27 +- .../asserting/AssertingPointsFormat.java | 26 +- .../asserting/AssertingPostingsFormat.java | 46 +- .../AssertingStoredFieldsFormat.java | 26 +- .../asserting/AssertingTermVectorsFormat.java | 32 +- .../lucene/codecs/asserting/package-info.java | 4 +- .../codecs/blockterms/LuceneFixedGap.java | 8 +- .../LuceneVarGapDocFreqInterval.java | 17 +- .../blockterms/LuceneVarGapFixedInterval.java | 13 +- .../TestBloomFilteredLucenePostings.java | 35 +- .../cheapbastard/CheapBastardCodec.java | 6 +- .../codecs/cheapbastard/package-info.java | 6 +- .../codecs/compressing/CompressingCodec.java | 141 +- .../DeflateWithPresetCompressingCodec.java | 16 +- .../compressing/FastCompressingCodec.java | 13 +- .../FastDecompressionCompressingCodec.java | 13 +- .../HighCompressionCompressingCodec.java | 13 +- .../LZ4WithPresetCompressingCodec.java | 16 +- .../dummy/DummyCompressingCodec.java | 100 +- .../compressing/dummy/package-info.java | 4 +- .../lucene/codecs/cranky/CrankyCodec.java | 8 +- .../codecs/cranky/CrankyCompoundFormat.java | 10 +- .../codecs/cranky/CrankyDocValuesFormat.java | 26 +- .../codecs/cranky/CrankyFieldInfosFormat.java | 15 +- .../codecs/cranky/CrankyLiveDocsFormat.java | 10 +- .../codecs/cranky/CrankyNormsFormat.java | 9 +- .../codecs/cranky/CrankyPointsFormat.java | 23 +- .../codecs/cranky/CrankyPostingsFormat.java | 17 +- .../cranky/CrankySegmentInfoFormat.java | 9 +- .../cranky/CrankyStoredFieldsFormat.java | 21 +- .../cranky/CrankyTermVectorsFormat.java | 34 +- .../lucene/codecs/cranky/package-info.java | 4 +- .../mockrandom/MockRandomPostingsFormat.java | 101 +- .../codecs/mockrandom/package-info.java | 4 +- .../codecs/ramonly/RAMOnlyPostingsFormat.java | 107 +- .../lucene/codecs/ramonly/package-info.java | 4 +- .../uniformsplit/Rot13CypherTestUtil.java | 11 +- .../UniformSplitRot13PostingsFormat.java | 25 +- .../STUniformSplitRot13PostingsFormat.java | 25 +- .../lucene/geo/BaseGeoPointTestCase.java | 869 ++++-- .../lucene/geo/BaseXYPointTestCase.java | 653 +++-- .../org/apache/lucene/geo/EarthDebugger.java | 124 +- .../org/apache/lucene/geo/GeoTestUtil.java | 288 +- .../org/apache/lucene/geo/ShapeTestUtil.java | 107 +- .../lucene/index/AlcoholicMergePolicy.java | 51 +- .../lucene/index/AllDeletedFilterReader.java | 6 +- .../index/AssertingDirectoryReader.java | 6 +- .../lucene/index/AssertingLeafReader.java | 224 +- .../index/BaseCompoundFormatTestCase.java | 413 +-- ...aseCompressingDocValuesFormatTestCase.java | 4 +- .../index/BaseDocValuesFormatTestCase.java | 989 ++++--- .../index/BaseFieldInfoFormatTestCase.java | 246 +- .../index/BaseIndexFileFormatTestCase.java | 537 ++-- .../index/BaseLiveDocsFormatTestCase.java | 46 +- .../lucene/index/BaseMergePolicyTestCase.java | 373 +-- .../lucene/index/BaseNormsFormatTestCase.java | 505 ++-- .../index/BasePointsFormatTestCase.java | 577 ++-- .../index/BasePostingsFormatTestCase.java | 668 +++-- .../index/BaseSegmentInfoFormatTestCase.java | 543 ++-- .../index/BaseStoredFieldsFormatTestCase.java | 240 +- .../index/BaseTermVectorsFormatTestCase.java | 406 +-- .../lucene/index/BaseTestCheckIndex.java | 44 +- .../org/apache/lucene/index/DocHelper.java | 197 +- .../lucene/index/FieldFilterLeafReader.java | 78 +- .../apache/lucene/index/ForceMergePolicy.java | 10 +- .../index/IndexWriterMaxDocsChanger.java | 16 +- .../lucene/index/MergingCodecReader.java | 41 +- .../index/MergingDirectoryReaderWrapper.java | 22 +- .../index/MismatchedDirectoryReader.java | 9 +- .../lucene/index/MismatchedLeafReader.java | 59 +- .../lucene/index/MockRandomMergePolicy.java | 98 +- .../lucene/index/OwnCacheKeyMultiReader.java | 35 +- .../lucene/index/PerThreadPKLookup.java | 33 +- .../lucene/index/PointsStackTracker.java | 34 +- .../org/apache/lucene/index/RandomCodec.java | 210 +- .../lucene/index/RandomIndexWriter.java | 226 +- .../lucene/index/RandomPostingsTester.java | 697 +++-- .../ThreadedIndexingAndSearchingTestCase.java | 682 +++-- .../lucene/mockfile/DisableFsyncFS.java | 25 +- .../org/apache/lucene/mockfile/ExtrasFS.java | 43 +- .../FilterAsynchronousFileChannel.java | 40 +- .../mockfile/FilterDirectoryStream.java | 31 +- .../lucene/mockfile/FilterFileChannel.java | 25 +- .../lucene/mockfile/FilterFileStore.java | 26 +- .../lucene/mockfile/FilterFileSystem.java | 39 +- .../mockfile/FilterFileSystemProvider.java | 114 +- .../lucene/mockfile/FilterInputStream2.java | 37 +- .../lucene/mockfile/FilterOutputStream2.java | 39 +- .../apache/lucene/mockfile/FilterPath.java | 72 +- .../mockfile/FilterSeekableByteChannel.java | 25 +- .../apache/lucene/mockfile/HandleLimitFS.java | 11 +- .../lucene/mockfile/HandleTrackingFS.java | 411 +-- .../org/apache/lucene/mockfile/LeakFS.java | 16 +- .../mockfile/MockFileSystemTestCase.java | 22 +- .../org/apache/lucene/mockfile/ShuffleFS.java | 23 +- .../org/apache/lucene/mockfile/VerboseFS.java | 54 +- .../lucene/mockfile/VirusCheckingFS.java | 25 +- .../org/apache/lucene/mockfile/WindowsFS.java | 53 +- .../apache/lucene/mockfile/package-info.java | 13 +- .../lucene/search/AssertingBulkScorer.java | 11 +- .../lucene/search/AssertingCollector.java | 14 +- .../lucene/search/AssertingIndexSearcher.java | 24 +- .../lucene/search/AssertingLeafCollector.java | 4 +- .../lucene/search/AssertingMatches.java | 5 +- .../search/AssertingMatchesIterator.java | 9 +- .../apache/lucene/search/AssertingQuery.java | 11 +- .../lucene/search/AssertingScorable.java | 20 +- .../apache/lucene/search/AssertingScorer.java | 116 +- .../apache/lucene/search/AssertingWeight.java | 14 +- .../search/BaseExplanationTestCase.java | 91 +- .../search/BaseRangeFieldQueryTestCase.java | 131 +- .../lucene/search/BlockScoreQueryWrapper.java | 9 +- .../search/BulkScorerWrapperScorer.java | 38 +- .../org/apache/lucene/search/CheckHits.java | 397 +-- .../org/apache/lucene/search/QueryUtils.java | 632 ++-- .../search/RandomApproximationQuery.java | 24 +- .../lucene/search/ScorerIndexSearcher.java | 31 +- .../search/SearchEquivalenceTestBase.java | 106 +- .../lucene/search/ShardSearchingTestBase.java | 148 +- .../similarities/AssertingSimilarity.java | 15 +- .../similarities/BaseSimilarityTestCase.java | 171 +- .../search/similarities/RandomSimilarity.java | 37 +- .../search/spans/AssertingSpanQuery.java | 9 +- .../search/spans/AssertingSpanWeight.java | 10 +- .../lucene/search/spans/AssertingSpans.java | 129 +- .../lucene/search/spans/SpanTestUtil.java | 106 +- .../lucene/store/BaseDirectoryTestCase.java | 402 +-- .../lucene/store/BaseDirectoryWrapper.java | 18 +- .../lucene/store/BaseLockFactoryTestCase.java | 150 +- .../lucene/store/CorruptingIndexOutput.java | 21 +- .../lucene/store/MockDirectoryWrapper.java | 642 +++-- .../lucene/store/MockIndexInputWrapper.java | 30 +- .../lucene/store/MockIndexOutputWrapper.java | 43 +- .../lucene/store/RawDirectoryWrapper.java | 14 +- .../SlowClosingMockIndexInputWrapper.java | 14 +- .../SlowOpeningMockIndexInputWrapper.java | 13 +- .../lucene/util/AbstractBeforeAfterRule.java | 11 +- .../lucene/util/BaseBitSetTestCase.java | 16 +- .../lucene/util/BaseDocIdSetTestCase.java | 15 +- .../lucene/util/CloseableDirectory.java | 10 +- .../java/org/apache/lucene/util/English.java | 26 +- .../util/FailOnNonBulkMergesInfoStream.java | 5 +- .../org/apache/lucene/util/FailureMarker.java | 5 +- .../org/apache/lucene/util/LineFileDocs.java | 64 +- .../util/LuceneJUnit3MethodProvider.java | 21 +- .../apache/lucene/util/LuceneTestCase.java | 1936 +++++++------ .../apache/lucene/util/NullInfoStream.java | 10 +- .../lucene/util/QuickPatchThreadsFilter.java | 11 +- .../apache/lucene/util/RamUsageTester.java | 276 +- .../apache/lucene/util/RemoveUponClose.java | 13 +- .../java/org/apache/lucene/util/Rethrow.java | 16 +- .../util/RunListenerPrintReproduceInfo.java | 93 +- .../util/TestRuleAssertionsRequired.java | 6 +- .../apache/lucene/util/TestRuleDelegate.java | 5 +- .../util/TestRuleIgnoreAfterMaxFailures.java | 34 +- .../lucene/util/TestRuleIgnoreTestSuites.java | 37 +- .../lucene/util/TestRuleLimitSysouts.java | 171 +- .../lucene/util/TestRuleMarkFailure.java | 28 +- .../util/TestRuleRestoreSystemProperties.java | 26 +- .../util/TestRuleSetupAndRestoreClassEnv.java | 169 +- .../TestRuleSetupAndRestoreInstanceEnv.java | 4 +- .../util/TestRuleSetupTeardownChained.java | 23 +- .../lucene/util/TestRuleStoreClassName.java | 13 +- .../util/TestRuleTemporaryFilesCleanup.java | 119 +- .../util/TestRuleThreadAndTestName.java | 13 +- .../lucene/util/TestSecurityManager.java | 72 +- .../java/org/apache/lucene/util/TestUtil.java | 1123 +++++--- .../lucene/util/ThrottledIndexOutput.java | 45 +- .../org/apache/lucene/util/TimeUnits.java | 2 +- .../util/automaton/AutomatonTestUtil.java | 182 +- .../org/apache/lucene/util/fst/FSTTester.java | 297 +- .../analysis/TestLookaheadTokenFilter.java | 94 +- .../lucene/analysis/TestMockAnalyzer.java | 307 +- .../lucene/analysis/TestMockCharFilter.java | 50 +- .../analysis/TestMockSynonymFilter.java | 197 +- .../apache/lucene/analysis/TestPosition.java | 5 +- .../analysis/TrivialLookaheadFilter.java | 20 +- .../TestAssertingDocValuesFormat.java | 2 +- .../asserting/TestAssertingNormsFormat.java | 4 +- .../TestAssertingPostingsFormat.java | 2 +- .../TestAssertingStoredFieldsFormat.java | 4 +- .../TestAssertingTermVectorsFormat.java | 2 +- .../TestCompressingStoredFieldsFormat.java | 75 +- .../TestCompressingTermVectorsFormat.java | 20 +- .../lucene/index/TestAssertingLeafReader.java | 20 +- .../lucene/index/TestForceMergePolicy.java | 3 +- .../lucene/mockfile/TestDisableFsyncFS.java | 16 +- .../apache/lucene/mockfile/TestExtrasFS.java | 18 +- .../lucene/mockfile/TestHandleLimitFS.java | 21 +- .../lucene/mockfile/TestHandleTrackingFS.java | 52 +- .../apache/lucene/mockfile/TestLeakFS.java | 31 +- .../apache/lucene/mockfile/TestShuffleFS.java | 55 +- .../apache/lucene/mockfile/TestVerboseFS.java | 72 +- .../lucene/mockfile/TestVirusCheckingFS.java | 11 +- .../apache/lucene/mockfile/TestWindowsFS.java | 88 +- .../search/TestBaseExplanationTestCase.java | 66 +- .../store/TestMockDirectoryWrapper.java | 35 +- .../org/apache/lucene/util/SorePoint.java | 7 +- .../lucene/util/TestBeforeAfterOverrides.java | 18 +- .../apache/lucene/util/TestCodecReported.java | 5 +- .../util/TestExceptionInBeforeClassHooks.java | 83 +- .../apache/lucene/util/TestExpectThrows.java | 128 +- .../util/TestFailIfDirectoryNotClosed.java | 10 +- .../util/TestFailIfUnreferencedFiles.java | 15 +- .../lucene/util/TestGroupFiltering.java | 12 +- .../lucene/util/TestJUnitRuleOrder.java | 40 +- .../org/apache/lucene/util/TestJvmInfo.java | 28 +- .../lucene/util/TestMaxFailuresRule.java | 137 +- .../apache/lucene/util/TestPleaseFail.java | 5 +- .../util/TestRamUsageTesterOnWildAnimals.java | 8 +- .../lucene/util/TestReproduceMessage.java | 122 +- .../TestReproduceMessageWithRepeated.java | 10 +- .../TestRunWithRestrictedPermissions.java | 17 +- .../lucene/util/TestSeedFromUncaught.java | 22 +- .../util/TestSetupTeardownChaining.java | 33 +- .../apache/lucene/util/TestSysoutsLimits.java | 39 +- .../util/TestWorstCaseTestBehavior.java | 44 +- .../apache/lucene/util/WithNestedTests.java | 146 +- 1285 files changed, 62211 insertions(+), 49131 deletions(-) diff --git a/gradle/generation/javacc.gradle b/gradle/generation/javacc.gradle index 339eada7857..4a3dcedf9a2 100644 --- a/gradle/generation/javacc.gradle +++ b/gradle/generation/javacc.gradle @@ -227,6 +227,22 @@ configure(project(":lucene:queryparser")) { } } + task regenerate() { + description "Regenerate any generated sources" + group "generation" + + // Run regeneration tasks. + dependsOn javaccParserClassic, javaccParserSurround, javaccParserFlexible + + // Clean up and reformat the generated sources after generation. + dependsOn "tidy" + } + + // Make sure tidy runs after generation, if they're defined. + tasks.matching { it.name == "tidy" }.configureEach { + mustRunAfter javaccParserClassic, javaccParserSurround, javaccParserFlexible + } + task javacc() { description "Regenerate query parsers (javacc syntax definitions)." group "generation" @@ -244,7 +260,6 @@ configure(project(":solr:core")) { javaccFile = file('src/java/org/apache/solr/parser/QueryParser.jj') - afterGenerate << commonCleanups afterGenerate << { FileTree generatedFiles -> generatedFiles.matching { include "QueryParser.java" }.each { file -> diff --git a/gradle/validation/spotless.gradle b/gradle/validation/spotless.gradle index 3dc3deb4045..148ec5bf8d2 100644 --- a/gradle/validation/spotless.gradle +++ b/gradle/validation/spotless.gradle @@ -16,116 +16,89 @@ */ /* - * LUCENE-9564: This adds automatic (and enforced) code formatting. + * LUCENE-9564: This adds automatic (and enforced) code formatting using + * spotless and Google Java Format. */ def resources = scriptResources(buildscript) -allprojects { prj -> +configure(project(":lucene").subprojects) { prj -> plugins.withType(JavaPlugin) { prj.apply plugin: 'com.diffplug.spotless' spotless { java { - // TODO: work out how to have multiple different header files (we have - // classes in the codebase that have original headers). + // TODO: Work out how to support multiple different header files (we have + // classes in the codebase that have original headers). We currently use + // Apache RAT to enforce headers so this is of lesser priority. + // // licenseHeaderFile file("${resources}/asl-header.txt"), '^(\\s*package)' lineEndings 'UNIX' endWithNewline() googleJavaFormat('1.9') + // Apply to all Java sources + target "src/**/*.java" + + // Exclude certain files (generated ones, mostly). switch (project.path) { - // These modules are complete - all sources scanned. case ":lucene:core": - target "src/java/**/*.java", - "src/test/**/*.java" - targetExclude "**/resources/**", "**/StandardTokenizerImpl.java" - break - - case ":lucene:highlighter": - target "src/java/**/*.java", - "src/test/**/*.java" - targetExclude "**/resources/**" - break - - case ":lucene:queries": - target "src/java/**/*.java", - "src/test/**/*.java" - targetExclude "**/resources/**" + targetExclude "**/StandardTokenizerImpl.java" break case ":lucene:analysis:common": - target "src/**/*.java" - targetExclude "**/resources/**", - "**/HTMLStripCharFilter.java", + targetExclude "**/HTMLStripCharFilter.java", "**/UAX29URLEmailTokenizerImpl.java", "**/tartarus/**" break - case ":lucene:demo": - case ":lucene:analysis:morfologik": - case ":lucene:analysis:icu": - case ":lucene:analysis:kuromoji": - case ":lucene:memory": - case ":lucene:benchmark": - case ":lucene:analysis:nori": - case ":lucene:analysis:opennlp": - case ":lucene:analysis:phonetic": - case ":lucene:analysis:smartcn": - case ":lucene:analysis:stempel": - case ":lucene:classification": - case ":lucene:backward-codecs": - case ":lucene:codecs": - case ":lucene:join": - target "src/**/*.java" - targetExclude "**/resources/**" + case ":lucene:test-framework": + targetExclude "**/EmojiTokenizationTestUnicode_11_0.java", + "**/WordBreakTestUnicode_9_0_0.java" break case ":lucene:expressions": - target "src/**/*.java" - targetExclude "**/resources/**", "**/JavascriptLexer.java", "**/JavascriptParser.java", + targetExclude "**/JavascriptLexer.java", + "**/JavascriptParser.java", "**/JavascriptVisitor.java" break - // Partially complete. - - case ":lucene:facet": - target "src/**/*.java" - targetExclude "**/taxonomy.8.6.3-cfs.zip" - break - - // All others - disable reformatting/ checks for now. - case ":lucene:grouping": - case ":lucene:luke": - case ":lucene:misc": - case ":lucene:monitor": case ":lucene:queryparser": - case ":lucene:replicator": - case ":lucene:sandbox": - case ":lucene:spatial3d": - case ":lucene:spatial-extras": - case ":lucene:suggest": - case ":lucene:test-framework": - - default: - target 'non-existing/**' + targetExclude "**/classic/ParseException.java", + "**/classic/QueryParser.java", + "**/classic/QueryParserConstants.java", + "**/classic/QueryParserTokenManager.java", + "**/classic/Token.java", + "**/classic/TokenMgrError.java", + "**/standard/parser/ParseException.java", + "**/standard/parser/StandardSyntaxParser.java", + "**/standard/parser/StandardSyntaxParserConstants.java", + "**/standard/parser/StandardSyntaxParserTokenManager.java", + "**/standard/parser/Token.java", + "**/standard/parser/TokenMgrError.java", + "**/surround/parser/ParseException.java", + "**/surround/parser/QueryParser.java", + "**/surround/parser/QueryParserConstants.java", + "**/surround/parser/QueryParserTokenManager.java", + "**/surround/parser/Token.java", + "**/surround/parser/TokenMgrError.java" break } } } + // Workaround for an odd problem in spotless where it fails because + // of a missing folder. spotlessJava { doFirst { project.mkdir("${buildDir}/spotless/spotlessJava") } } } -} -// Add an alias to 'spotlessApply' simply called 'tidy' and add -// spotlessCheck to check. -allprojects { prj -> + // Add an alias to 'spotlessApply' simply called 'tidy' and wire up + // spotlessCheck to convention's check. task tidy() { description "Applies formatters and cleanups to sources." group "verification" diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 742aed2605a..bb5bdb3b9be 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -191,6 +191,10 @@ Bug fixes Other +* LUCENE-9570, LUCENE-9564: Apply google java format and enforce it on source Java files. + Review diffs and correct automatic formatting oddities. (Erick Erickson, + Bruno Roustant, Dawid Weiss) + * LUCENE-9631: Properly override slice() on subclasses of OffsetRange. (Dawid Weiss) * LUCENE-9312: Allow gradle builds against arbitrary JVMs. (Tomoko Uchida, Dawid Weiss) diff --git a/lucene/core/src/java/org/apache/lucene/search/HitQueue.java b/lucene/core/src/java/org/apache/lucene/search/HitQueue.java index 9998452b604..dc2a7e5bff5 100644 --- a/lucene/core/src/java/org/apache/lucene/search/HitQueue.java +++ b/lucene/core/src/java/org/apache/lucene/search/HitQueue.java @@ -76,7 +76,10 @@ public final class HitQueue extends PriorityQueue { @Override protected final boolean lessThan(ScoreDoc hitA, ScoreDoc hitB) { - if (hitA.score == hitB.score) return hitA.doc > hitB.doc; - else return hitA.score < hitB.score; + if (hitA.score == hitB.score) { + return hitA.doc > hitB.doc; + } else { + return hitA.score < hitB.score; + } } } diff --git a/lucene/core/src/java/org/apache/lucene/store/NIOFSDirectory.java b/lucene/core/src/java/org/apache/lucene/store/NIOFSDirectory.java index a9835a0c730..39a67ffe1c3 100644 --- a/lucene/core/src/java/org/apache/lucene/store/NIOFSDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/NIOFSDirectory.java @@ -177,9 +177,8 @@ public class NIOFSDirectory extends FSDirectory { b.limit(b.position() + toRead); assert b.remaining() == toRead; final int i = channel.read(b, pos); - if (i - < 0) { // be defensive here, even though we checked before hand, something could have - // changed + if (i < 0) { + // be defensive here, even though we checked before hand, something could have changed throw new EOFException( "read past EOF: " + this @@ -191,7 +190,8 @@ public class NIOFSDirectory extends FSDirectory { + end); } assert i > 0 - : "FileChannel.read with non zero-length bb.remaining() must always read at least one byte (FileChannel is in blocking mode, see spec of ReadableByteChannel)"; + : "FileChannel.read with non zero-length bb.remaining() must always read at least " + + "one byte (FileChannel is in blocking mode, see spec of ReadableByteChannel)"; pos += i; readLength -= i; } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java index f6803858325..56a2d3d41cd 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Collection; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.FieldComparator; @@ -33,14 +32,14 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.FixedBitSet; /** - * This collector specializes in collecting the most relevant document (group head) for each - * group that matches the query. + * This collector specializes in collecting the most relevant document (group head) for each group + * that matches the query. * - * Clients should create new collectors by calling {@link #newCollector(GroupSelector, Sort)} + *

Clients should create new collectors by calling {@link #newCollector(GroupSelector, Sort)} * * @lucene.experimental */ -@SuppressWarnings({"unchecked","rawtypes"}) +@SuppressWarnings({"unchecked", "rawtypes"}) public abstract class AllGroupHeadsCollector extends SimpleCollector { private final GroupSelector groupSelector; @@ -56,13 +55,15 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { /** * Create a new AllGroupHeadsCollector based on the type of within-group Sort required + * * @param selector a GroupSelector to define the groups - * @param sort the within-group sort to use to choose the group head document - * @param the group value type + * @param sort the within-group sort to use to choose the group head document + * @param the group value type */ public static AllGroupHeadsCollector newCollector(GroupSelector selector, Sort sort) { - if (sort.equals(Sort.RELEVANCE)) + if (sort.equals(Sort.RELEVANCE)) { return new ScoringGroupHeadsCollector<>(selector, sort); + } return new SortingGroupHeadsCollector<>(selector, sort); } @@ -93,7 +94,8 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { } /** - * @return an int array containing all group heads. The size of the array is equal to number of collected unique groups. + * @return an int array containing all group heads. The size of the array is equal to number of + * collected unique groups. */ public int[] retrieveGroupHeads() { Collection> groupHeads = getCollectedGroupHeads(); @@ -107,16 +109,13 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { return docHeads; } - /** - * @return the number of group heads found for a query. - */ + /** @return the number of group heads found for a query. */ public int groupHeadsSize() { return getCollectedGroupHeads().size(); } /** - * Returns the collected group heads. - * Subsequent calls should return the same group heads. + * Returns the collected group heads. Subsequent calls should return the same group heads. * * @return the collected group heads */ @@ -179,49 +178,46 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { /** * Create a new GroupHead for the given group value, initialized with a doc, context and scorer */ - protected abstract GroupHead newGroupHead(int doc, T value, LeafReaderContext context, Scorable scorer) throws IOException; + protected abstract GroupHead newGroupHead( + int doc, T value, LeafReaderContext context, Scorable scorer) throws IOException; /** - * Represents a group head. A group head is the most relevant document for a particular group. - * The relevancy is based is usually based on the sort. + * Represents a group head. A group head is the most relevant document for a particular group. The + * relevancy is based is usually based on the sort. * - * The group head contains a group value with its associated most relevant document id. + *

The group head contains a group value with its associated most relevant document id. */ - public static abstract class GroupHead { + public abstract static class GroupHead { public final T groupValue; public int doc; protected int docBase; - /** - * Create a new GroupHead for the given value - */ + /** Create a new GroupHead for the given value */ protected GroupHead(T groupValue, int doc, int docBase) { this.groupValue = groupValue; this.doc = doc + docBase; this.docBase = docBase; } - /** - * Called for each segment - */ + /** Called for each segment */ protected void setNextReader(LeafReaderContext ctx) throws IOException { this.docBase = ctx.docBase; } - /** - * Called for each segment - */ + /** Called for each segment */ protected abstract void setScorer(Scorable scorer) throws IOException; /** - * Compares the specified document for a specified comparator against the current most relevant document. + * Compares the specified document for a specified comparator against the current most relevant + * document. * * @param compIDX The comparator index of the specified comparator. * @param doc The specified document. - * @return -1 if the specified document wasn't competitive against the current most relevant document, 1 if the - * specified document was competitive against the current most relevant document. Otherwise 0. + * @return -1 if the specified document wasn't competitive against the current most relevant + * document, 1 if the specified document was competitive against the current most relevant + * document. Otherwise 0. * @throws IOException If I/O related errors occur */ protected abstract int compare(int compIDX, int doc) throws IOException; @@ -233,12 +229,9 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { * @throws IOException If I/O related errors occur */ protected abstract void updateDocHead(int doc) throws IOException; - } - /** - * General implementation using a {@link FieldComparator} to select the group head - */ + /** General implementation using a {@link FieldComparator} to select the group head */ private static class SortingGroupHeadsCollector extends AllGroupHeadsCollector { protected SortingGroupHeadsCollector(GroupSelector selector, Sort sort) { @@ -246,7 +239,8 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { } @Override - protected GroupHead newGroupHead(int doc, T value, LeafReaderContext ctx, Scorable scorer) throws IOException { + protected GroupHead newGroupHead(int doc, T value, LeafReaderContext ctx, Scorable scorer) + throws IOException { return new SortingGroupHead<>(sort, value, doc, ctx, scorer); } } @@ -256,7 +250,9 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { final FieldComparator[] comparators; final LeafFieldComparator[] leafComparators; - protected SortingGroupHead(Sort sort, T groupValue, int doc, LeafReaderContext context, Scorable scorer) throws IOException { + protected SortingGroupHead( + Sort sort, T groupValue, int doc, LeafReaderContext context, Scorable scorer) + throws IOException { super(groupValue, doc, context.docBase); final SortField[] sortFields = sort.getSort(); comparators = new FieldComparator[sortFields.length]; @@ -300,9 +296,7 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { } } - /** - * Specialized implementation for sorting by score - */ + /** Specialized implementation for sorting by score */ private static class ScoringGroupHeadsCollector extends AllGroupHeadsCollector { protected ScoringGroupHeadsCollector(GroupSelector selector, Sort sort) { @@ -310,7 +304,8 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { } @Override - protected GroupHead newGroupHead(int doc, T value, LeafReaderContext context, Scorable scorer) throws IOException { + protected GroupHead newGroupHead( + int doc, T value, LeafReaderContext context, Scorable scorer) throws IOException { return new ScoringGroupHead<>(scorer, value, doc, context.docBase); } } @@ -320,7 +315,8 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { private Scorable scorer; private float topScore; - protected ScoringGroupHead(Scorable scorer, T groupValue, int doc, int docBase) throws IOException { + protected ScoringGroupHead(Scorable scorer, T groupValue, int doc, int docBase) + throws IOException { super(groupValue, doc, docBase); assert scorer.docID() == doc; this.scorer = scorer; @@ -338,8 +334,9 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { assert compIDX == 0; float score = scorer.score(); int c = Float.compare(score, topScore); - if (c > 0) + if (c > 0) { topScore = score; + } return c; } @@ -348,5 +345,4 @@ public abstract class AllGroupHeadsCollector extends SimpleCollector { this.doc = doc + docBase; } } - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java index 30ea49032e1..41d6a99a35f 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java @@ -20,17 +20,14 @@ import java.io.IOException; import java.util.Collection; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.SimpleCollector; /** - * A collector that collects all groups that match the - * query. Only the group value is collected, and the order - * is undefined. This collector does not determine - * the most relevant document of a group. + * A collector that collects all groups that match the query. Only the group value is collected, and + * the order is undefined. This collector does not determine the most relevant document of a group. * * @lucene.experimental */ @@ -42,6 +39,7 @@ public class AllGroupsCollector extends SimpleCollector { /** * Create a new AllGroupsCollector + * * @param groupSelector the GroupSelector to determine groups */ public AllGroupsCollector(GroupSelector groupSelector) { @@ -49,8 +47,10 @@ public class AllGroupsCollector extends SimpleCollector { } /** - * Returns the total number of groups for the executed search. - * This is a convenience method. The following code snippet has the same effect:

getGroups().size()
+ * Returns the total number of groups for the executed search. This is a convenience method. The + * following code snippet has the same effect: + * + *
getGroups().size()
* * @return The total number of groups for the executed search */ @@ -60,8 +60,8 @@ public class AllGroupsCollector extends SimpleCollector { /** * Returns the group values - *

- * This is an unordered collections of group values. + * + *

This is an unordered collections of group values. * * @return the group values */ @@ -80,8 +80,9 @@ public class AllGroupsCollector extends SimpleCollector { @Override public void collect(int doc) throws IOException { groupSelector.advanceTo(doc); - if (groups.contains(groupSelector.currentValue())) + if (groups.contains(groupSelector.currentValue())) { return; + } groups.add(groupSelector.copyValue()); } @@ -89,4 +90,4 @@ public class AllGroupsCollector extends SimpleCollector { public ScoreMode scoreMode() { return ScoreMode.COMPLETE_NO_SCORES; // the result is unaffected by relevancy } -} \ No newline at end of file +} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java index bbeb2ee204b..226aca061d8 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.grouping; import java.io.IOException; - import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSetIterator; @@ -40,29 +39,22 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.PriorityQueue; // TODO: this sentence is too long for the class summary. -/** BlockGroupingCollector performs grouping with a - * single pass collector, as long as you are grouping by a - * doc block field, ie all documents sharing a given group - * value were indexed as a doc block using the atomic - * {@link IndexWriter#addDocuments IndexWriter.addDocuments()} - * or {@link IndexWriter#updateDocuments IndexWriter.updateDocuments()} - * API. +/** + * BlockGroupingCollector performs grouping with a single pass collector, as long as you are + * grouping by a doc block field, ie all documents sharing a given group value were indexed as a doc + * block using the atomic {@link IndexWriter#addDocuments IndexWriter.addDocuments()} or {@link + * IndexWriter#updateDocuments IndexWriter.updateDocuments()} API. * - *

This results in faster performance (~25% faster QPS) - * than the two-pass grouping collectors, with the tradeoff - * being that the documents in each group must always be - * indexed as a block. This collector also fills in - * TopGroups.totalGroupCount without requiring the separate - * {@link org.apache.lucene.search.grouping.AllGroupsCollector}. However, this collector does - * not fill in the groupValue of each group; this field - * will always be null. + *

This results in faster performance (~25% faster QPS) than the two-pass grouping collectors, + * with the tradeoff being that the documents in each group must always be indexed as a block. This + * collector also fills in TopGroups.totalGroupCount without requiring the separate {@link + * org.apache.lucene.search.grouping.AllGroupsCollector}. However, this collector does not fill in + * the groupValue of each group; this field will always be null. * - *

NOTE: this collector makes no effort to verify - * the docs were in fact indexed as a block, so it's up to - * you to ensure this was the case. + *

NOTE: this collector makes no effort to verify the docs were in fact indexed as a + * block, so it's up to you to ensure this was the case. * - *

See {@link org.apache.lucene.search.grouping} for more - * details including a full code example.

+ *

See {@link org.apache.lucene.search.grouping} for more details including a full code example. * * @lucene.experimental */ @@ -104,14 +96,14 @@ public class BlockGroupingCollector extends SimpleCollector { private static final class OneGroup { LeafReaderContext readerContext; - //int groupOrd; + // int groupOrd; int topGroupDoc; int[] docs; float[] scores; int count; int comparatorSlot; } - + // Sorts by groupSort. Not static -- uses comparators, reversed private final class GroupQueue extends PriorityQueue { @@ -122,13 +114,15 @@ public class BlockGroupingCollector extends SimpleCollector { @Override protected boolean lessThan(final OneGroup group1, final OneGroup group2) { - //System.out.println(" ltcheck"); + // System.out.println(" ltcheck"); assert group1 != group2; assert group1.comparatorSlot != group2.comparatorSlot; final int numComparators = comparators.length; - for (int compIDX=0;compIDX < numComparators; compIDX++) { - final int c = reversed[compIDX] * comparators[compIDX].compare(group1.comparatorSlot, group2.comparatorSlot); + for (int compIDX = 0; compIDX < numComparators; compIDX++) { + final int c = + reversed[compIDX] + * comparators[compIDX].compare(group1.comparatorSlot, group2.comparatorSlot); if (c != 0) { // Short circuit return c > 0; @@ -144,7 +138,8 @@ public class BlockGroupingCollector extends SimpleCollector { // group is competitive we insert into the group queue private void processGroup() throws IOException { totalGroupCount++; - //System.out.println(" processGroup ord=" + lastGroupOrd + " competes=" + groupCompetes + " count=" + subDocUpto + " groupDoc=" + topGroupDoc); + // System.out.println(" processGroup ord=" + lastGroupOrd + " competes=" + groupCompetes + " + // count=" + subDocUpto + " groupDoc=" + topGroupDoc); if (groupCompetes) { if (!queueFull) { // Startup transient: always add a new OneGroup @@ -158,20 +153,21 @@ public class BlockGroupingCollector extends SimpleCollector { pendingSubScores = new float[10]; } og.readerContext = currentReaderContext; - //og.groupOrd = lastGroupOrd; + // og.groupOrd = lastGroupOrd; og.comparatorSlot = bottomSlot; final OneGroup bottomGroup = groupQueue.add(og); - //System.out.println(" ADD group=" + getGroupString(lastGroupOrd) + " newBottom=" + getGroupString(bottomGroup.groupOrd)); + // System.out.println(" ADD group=" + getGroupString(lastGroupOrd) + " newBottom=" + + // getGroupString(bottomGroup.groupOrd)); queueFull = groupQueue.size() == topNGroups; if (queueFull) { // Queue just became full; now set the real bottom // in the comparators: bottomSlot = bottomGroup.comparatorSlot; - //System.out.println(" set bottom=" + bottomSlot); + // System.out.println(" set bottom=" + bottomSlot); for (int i = 0; i < comparators.length; i++) { leafComparators[i].setBottom(bottomSlot); } - //System.out.println(" QUEUE FULL"); + // System.out.println(" QUEUE FULL"); } else { // Queue not full yet -- just advance bottomSlot: bottomSlot = groupQueue.size(); @@ -193,10 +189,10 @@ public class BlockGroupingCollector extends SimpleCollector { pendingSubScores = savScores; } og.readerContext = currentReaderContext; - //og.groupOrd = lastGroupOrd; + // og.groupOrd = lastGroupOrd; bottomSlot = groupQueue.updateTop().comparatorSlot; - //System.out.println(" set bottom=" + bottomSlot); + // System.out.println(" set bottom=" + bottomSlot); for (int i = 0; i < comparators.length; i++) { leafComparators[i].setBottom(bottomSlot); } @@ -208,22 +204,17 @@ public class BlockGroupingCollector extends SimpleCollector { /** * Create the single pass collector. * - * @param groupSort The {@link Sort} used to sort the - * groups. The top sorted document within each group - * according to groupSort, determines how that group - * sorts against other groups. This must be non-null, - * ie, if you want to groupSort by relevance use - * Sort.RELEVANCE. - * @param topNGroups How many top groups to keep. - * @param needsScores true if the collected documents - * require scores, either because relevance is included - * in the withinGroupSort or because you plan to pass true - * for either getSscores or getMaxScores to {@link - * #getTopGroups} - * @param lastDocPerGroup a {@link Weight} that marks the - * last document in each group. + * @param groupSort The {@link Sort} used to sort the groups. The top sorted document within each + * group according to groupSort, determines how that group sorts against other groups. This + * must be non-null, ie, if you want to groupSort by relevance use Sort.RELEVANCE. + * @param topNGroups How many top groups to keep. + * @param needsScores true if the collected documents require scores, either because relevance is + * included in the withinGroupSort or because you plan to pass true for either getSscores or + * getMaxScores to {@link #getTopGroups} + * @param lastDocPerGroup a {@link Weight} that marks the last document in each group. */ - public BlockGroupingCollector(Sort groupSort, int topNGroups, boolean needsScores, Weight lastDocPerGroup) { + public BlockGroupingCollector( + Sort groupSort, int topNGroups, boolean needsScores, Weight lastDocPerGroup) { if (topNGroups < 1) { throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")"); @@ -239,7 +230,7 @@ public class BlockGroupingCollector extends SimpleCollector { this.lastDocPerGroup = lastDocPerGroup; this.groupSort = groupSort; - + this.topNGroups = topNGroups; final SortField[] sortFields = groupSort.getSort(); @@ -259,29 +250,26 @@ public class BlockGroupingCollector extends SimpleCollector { // typically they will be presented as a "single" result // in the UI? - /** Returns the grouped results. Returns null if the - * number of groups collected is <= groupOffset. + /** + * Returns the grouped results. Returns null if the number of groups collected is <= + * groupOffset. * - *

NOTE: This collector is unable to compute - * the groupValue per group so it will always be null. - * This is normally not a problem, as you can obtain the - * value just like you obtain other values for each - * matching document (eg, via stored fields, via - * DocValues, etc.) + *

NOTE: This collector is unable to compute the groupValue per group so it will always + * be null. This is normally not a problem, as you can obtain the value just like you obtain other + * values for each matching document (eg, via stored fields, via DocValues, etc.) * - * @param withinGroupSort The {@link Sort} used to sort - * documents within each group. - * @param groupOffset Which group to start from - * @param withinGroupOffset Which document to start from - * within each group - * @param maxDocsPerGroup How many top documents to keep - * within each group. + * @param withinGroupSort The {@link Sort} used to sort documents within each group. + * @param groupOffset Which group to start from + * @param withinGroupOffset Which document to start from within each group + * @param maxDocsPerGroup How many top documents to keep within each group. */ - public TopGroups getTopGroups(Sort withinGroupSort, int groupOffset, int withinGroupOffset, int maxDocsPerGroup) throws IOException { + public TopGroups getTopGroups( + Sort withinGroupSort, int groupOffset, int withinGroupOffset, int maxDocsPerGroup) + throws IOException { - //if (queueFull) { - //System.out.println("getTopGroups groupOffset=" + groupOffset + " topNGroups=" + topNGroups); - //} + // if (queueFull) { + // System.out.println("getTopGroups groupOffset=" + groupOffset + " topNGroups=" + topNGroups); + // } if (subDocUpto != 0) { processGroup(); } @@ -294,9 +282,9 @@ public class BlockGroupingCollector extends SimpleCollector { float maxScore = Float.MIN_VALUE; - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) final GroupDocs[] groups = new GroupDocs[groupQueue.size() - groupOffset]; - for(int downTo=groupQueue.size()-groupOffset-1;downTo>=0;downTo--) { + for (int downTo = groupQueue.size() - groupOffset - 1; downTo >= 0; downTo--) { final OneGroup og = groupQueue.pop(); // At this point we hold all docs w/ in each group, @@ -305,18 +293,21 @@ public class BlockGroupingCollector extends SimpleCollector { if (withinGroupSort.equals(Sort.RELEVANCE)) { // Sort by score if (!needsScores) { - throw new IllegalArgumentException("cannot sort by relevance within group: needsScores=false"); + throw new IllegalArgumentException( + "cannot sort by relevance within group: needsScores=false"); } collector = TopScoreDocCollector.create(maxDocsPerGroup, Integer.MAX_VALUE); } else { // Sort by fields - collector = TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, Integer.MAX_VALUE); // TODO: disable exact counts? + collector = + TopFieldCollector.create( + withinGroupSort, maxDocsPerGroup, Integer.MAX_VALUE); // TODO: disable exact counts? } float groupMaxScore = needsScores ? Float.NEGATIVE_INFINITY : Float.NaN; LeafCollector leafCollector = collector.getLeafCollector(og.readerContext); leafCollector.setScorer(fakeScorer); - for(int docIDX=0;docIDX[comparators.length]; - for(int sortFieldIDX=0;sortFieldIDX(Float.NaN, - groupMaxScore, - new TotalHits(og.count, TotalHits.Relation.EQUAL_TO), - topDocs.scoreDocs, - null, - groupSortValues); + groups[downTo] = + new GroupDocs<>( + Float.NaN, + groupMaxScore, + new TotalHits(og.count, TotalHits.Relation.EQUAL_TO), + topDocs.scoreDocs, + null, + groupSortValues); maxScore = Math.max(maxScore, groupMaxScore); } @@ -355,10 +348,15 @@ public class BlockGroupingCollector extends SimpleCollector { } */ - return new TopGroups<>(new TopGroups<>(groupSort.getSort(), - withinGroupSort.getSort(), - totalHitCount, totalGroupedHitCount, groups, maxScore), - totalGroupCount); + return new TopGroups<>( + new TopGroups<>( + groupSort.getSort(), + withinGroupSort.getSort(), + totalHitCount, + totalGroupedHitCount, + groups, + maxScore), + totalGroupCount); } @Override @@ -380,7 +378,7 @@ public class BlockGroupingCollector extends SimpleCollector { processGroup(); } groupEndDocID = lastDocPerGroupBits.advance(doc); - //System.out.println(" adv " + groupEndDocID + " " + lastDocPerGroupBits); + // System.out.println(" adv " + groupEndDocID + " " + lastDocPerGroupBits); subDocUpto = 0; groupCompetes = !queueFull; } @@ -404,15 +402,15 @@ public class BlockGroupingCollector extends SimpleCollector { if (subDocUpto == 1) { assert !queueFull; - //System.out.println(" init copy to bottomSlot=" + bottomSlot); + // System.out.println(" init copy to bottomSlot=" + bottomSlot); for (LeafFieldComparator fc : leafComparators) { fc.copy(bottomSlot, doc); fc.setBottom(bottomSlot); - } + } topGroupDoc = doc; } else { // Compare to bottomSlot - for (int compIDX = 0;; compIDX++) { + for (int compIDX = 0; ; compIDX++) { final int c = reversed[compIDX] * leafComparators[compIDX].compareBottom(doc); if (c < 0) { // Definitely not competitive -- done @@ -428,25 +426,25 @@ public class BlockGroupingCollector extends SimpleCollector { } } - //System.out.println(" best w/in group!"); - + // System.out.println(" best w/in group!"); + for (LeafFieldComparator fc : leafComparators) { fc.copy(bottomSlot, doc); // Necessary because some comparators cache // details of bottom slot; this forces them to // re-cache: fc.setBottom(bottomSlot); - } + } topGroupDoc = doc; } } else { // We're not sure this group will make it into the // queue yet - for (int compIDX = 0;; compIDX++) { + for (int compIDX = 0; ; compIDX++) { final int c = reversed[compIDX] * leafComparators[compIDX].compareBottom(doc); if (c < 0) { // Definitely not competitive -- done - //System.out.println(" doc doesn't compete w/ top groups"); + // System.out.println(" doc doesn't compete w/ top groups"); return; } else if (c > 0) { // Definitely competitive. @@ -455,7 +453,7 @@ public class BlockGroupingCollector extends SimpleCollector { // Ties with bottom, except we know this docID is // > docID in the queue (docs are visited in // order), so not competitive: - //System.out.println(" doc doesn't compete w/ top groups"); + // System.out.println(" doc doesn't compete w/ top groups"); return; } } @@ -468,7 +466,7 @@ public class BlockGroupingCollector extends SimpleCollector { fc.setBottom(bottomSlot); } topGroupDoc = doc; - //System.out.println(" doc competes w/ top groups"); + // System.out.println(" doc competes w/ top groups"); } } @@ -479,7 +477,7 @@ public class BlockGroupingCollector extends SimpleCollector { } subDocUpto = 0; docBase = readerContext.docBase; - //System.out.println("setNextReader base=" + docBase + " r=" + readerContext.reader); + // System.out.println("setNextReader base=" + docBase + " r=" + readerContext.reader); Scorer s = lastDocPerGroup.scorer(readerContext); if (s == null) { lastDocPerGroupBits = null; @@ -489,7 +487,7 @@ public class BlockGroupingCollector extends SimpleCollector { groupEndDocID = -1; currentReaderContext = readerContext; - for (int i=0; i extends SearchGroup { int topDoc; int comparatorSlot; diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java index ee381597bcc..0d7d3e8d145 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java @@ -22,13 +22,13 @@ import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.SimpleCollector; /** - * A second pass grouping collector that keeps track of distinct values for a specified field for the top N group. + * A second pass grouping collector that keeps track of distinct values for a specified field for + * the top N group. * * @lucene.experimental */ @@ -36,12 +36,15 @@ public class DistinctValuesCollector extends SecondPassGroupingCollector groupSelector, Collection> groups, - GroupSelector valueSelector) { + public DistinctValuesCollector( + GroupSelector groupSelector, + Collection> groups, + GroupSelector valueSelector) { super(groupSelector, groups, new DistinctValuesReducer<>(valueSelector)); } @@ -58,12 +61,9 @@ public class DistinctValuesCollector extends SecondPassGroupingCollector extends SecondPassGroupingCollector { @@ -126,5 +126,4 @@ public class DistinctValuesCollector extends SecondPassGroupingCollector { private final DoubleValuesSource source; @@ -45,8 +42,10 @@ public class DoubleRangeGroupSelector extends GroupSelector { /** * Creates a new DoubleRangeGroupSelector - * @param source a DoubleValuesSource to retrieve double values per document - * @param rangeFactory a DoubleRangeFactory that defines how to group the double values into range buckets + * + * @param source a DoubleValuesSource to retrieve double values per document + * @param rangeFactory a DoubleRangeFactory that defines how to group the double values into range + * buckets */ public DoubleRangeGroupSelector(DoubleValuesSource source, DoubleRangeFactory rangeFactory) { this.source = source; @@ -91,10 +90,11 @@ public class DoubleRangeGroupSelector extends GroupSelector { inSecondPass = new HashSet<>(); includeEmpty = false; for (SearchGroup group : searchGroups) { - if (group.groupValue == null) + if (group.groupValue == null) { includeEmpty = true; - else + } else { inSecondPass.add(group.groupValue); + } } } } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java index f5b05974e41..6873eb350dd 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java @@ -22,7 +22,6 @@ import java.util.Collection; import java.util.Comparator; import java.util.HashMap; import java.util.TreeSet; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; @@ -32,12 +31,11 @@ import org.apache.lucene.search.SimpleCollector; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; -/** FirstPassGroupingCollector is the first of two passes necessary - * to collect grouped hits. This pass gathers the top N sorted - * groups. Groups are defined by a {@link GroupSelector} +/** + * FirstPassGroupingCollector is the first of two passes necessary to collect grouped hits. This + * pass gathers the top N sorted groups. Groups are defined by a {@link GroupSelector} * - *

See {@link org.apache.lucene.search.grouping} for more - * details including a full code example.

+ *

See {@link org.apache.lucene.search.grouping} for more details including a full code example. * * @lucene.experimental */ @@ -56,6 +54,7 @@ public class FirstPassGroupingCollector extends SimpleCollector { // Set once we reach topNGroups unique groups: /** @lucene.internal */ protected TreeSet> orderedGroups; + private int docBase; private int spareSlot; @@ -63,16 +62,14 @@ public class FirstPassGroupingCollector extends SimpleCollector { * Create the first pass collector. * * @param groupSelector a GroupSelector used to defined groups - * @param groupSort The {@link Sort} used to sort the - * groups. The top sorted document within each group - * according to groupSort, determines how that group - * sorts against other groups. This must be non-null, - * ie, if you want to groupSort by relevance use - * Sort.RELEVANCE. + * @param groupSort The {@link Sort} used to sort the groups. The top sorted document within each + * group according to groupSort, determines how that group sorts against other groups. This + * must be non-null, ie, if you want to groupSort by relevance use Sort.RELEVANCE. * @param topNGroups How many top groups to keep. */ @SuppressWarnings({"unchecked", "rawtypes"}) - public FirstPassGroupingCollector(GroupSelector groupSelector, Sort groupSort, int topNGroups) { + public FirstPassGroupingCollector( + GroupSelector groupSelector, Sort groupSort, int topNGroups) { this.groupSelector = groupSelector; if (topNGroups < 1) { throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")"); @@ -91,7 +88,8 @@ public class FirstPassGroupingCollector extends SimpleCollector { for (int i = 0; i < sortFields.length; i++) { final SortField sortField = sortFields[i]; - // use topNGroups + 1 so we have a spare slot to use for comparing (tracked by this.spareSlot): + // use topNGroups + 1 so we have a spare slot to use for comparing (tracked by + // this.spareSlot): comparators[i] = sortField.getComparator(topNGroups + 1, i); reversed[i] = sortField.getReverse() ? -1 : 1; } @@ -106,16 +104,16 @@ public class FirstPassGroupingCollector extends SimpleCollector { } /** - * Returns top groups, starting from offset. This may - * return null, if no groups were collected, or if the - * number of unique groups collected is <= offset. + * Returns top groups, starting from offset. This may return null, if no groups were collected, or + * if the number of unique groups collected is <= offset. * * @param groupOffset The offset in the collected groups * @return top groups, starting from offset */ public Collection> getTopGroups(int groupOffset) throws IOException { - //System.out.println("FP.getTopGroups groupOffset=" + groupOffset + " fillFields=" + fillFields + " groupMap.size()=" + groupMap.size()); + // System.out.println("FP.getTopGroups groupOffset=" + groupOffset + " fillFields=" + fillFields + // + " groupMap.size()=" + groupMap.size()); if (groupOffset < 0) { throw new IllegalArgumentException("groupOffset must be >= 0 (got " + groupOffset + ")"); @@ -132,20 +130,22 @@ public class FirstPassGroupingCollector extends SimpleCollector { final Collection> result = new ArrayList<>(); int upto = 0; final int sortFieldCount = comparators.length; - for(CollectedSearchGroup group : orderedGroups) { + for (CollectedSearchGroup group : orderedGroups) { if (upto++ < groupOffset) { continue; } - // System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.toString())); + // System.out.println(" group=" + (group.groupValue == null ? "null" : + // group.groupValue.toString())); SearchGroup searchGroup = new SearchGroup<>(); searchGroup.groupValue = group.groupValue; searchGroup.sortValues = new Object[sortFieldCount]; - for(int sortFieldIDX=0;sortFieldIDX extends SimpleCollector { // Downside: if the number of unique groups is very low, this is // wasted effort as we will most likely be updating an existing group. if (orderedGroups != null) { - for (int compIDX = 0;; compIDX++) { + for (int compIDX = 0; ; compIDX++) { final int c = reversed[compIDX] * leafComparators[compIDX].compareBottom(doc); if (c < 0) { // Definitely not competitive. So don't even bother to continue @@ -190,8 +190,9 @@ public class FirstPassGroupingCollector extends SimpleCollector { @Override public void collect(int doc) throws IOException { - if (isCompetitive(doc) == false) + if (isCompetitive(doc) == false) { return; + } // TODO: should we add option to mean "ignore docs that // don't have the group field" (instead of stuffing them @@ -236,7 +237,7 @@ public class FirstPassGroupingCollector extends SimpleCollector { // We already tested that the document is competitive, so replace // the bottom group with this new group. final CollectedSearchGroup bottomGroup = orderedGroups.pollLast(); - assert orderedGroups.size() == topNGroups -1; + assert orderedGroups.size() == topNGroups - 1; groupMap.remove(bottomGroup.groupValue); @@ -261,16 +262,17 @@ public class FirstPassGroupingCollector extends SimpleCollector { } // Update existing group: - for (int compIDX = 0;; compIDX++) { + for (int compIDX = 0; ; compIDX++) { leafComparators[compIDX].copy(spareSlot, doc); - final int c = reversed[compIDX] * comparators[compIDX].compare(group.comparatorSlot, spareSlot); + final int c = + reversed[compIDX] * comparators[compIDX].compare(group.comparatorSlot, spareSlot); if (c < 0) { // Definitely not competitive. return; } else if (c > 0) { // Definitely competitive; set remaining comparators: - for (int compIDX2=compIDX+1; compIDX2 extends SimpleCollector { if (orderedGroups != null) { prevLast = orderedGroups.last(); orderedGroups.remove(group); - assert orderedGroups.size() == topNGroups-1; + assert orderedGroups.size() == topNGroups - 1; } else { prevLast = null; } @@ -306,7 +308,8 @@ public class FirstPassGroupingCollector extends SimpleCollector { orderedGroups.add(group); assert orderedGroups.size() == topNGroups; final CollectedSearchGroup newLast = orderedGroups.last(); - // If we changed the value of the last group, or changed which group was last, then update bottom: + // If we changed the value of the last group, or changed which group was last, then update + // bottom: if (group == newLast || prevLast != newLast) { for (LeafFieldComparator fc : leafComparators) { fc.setBottom(newLast.comparatorSlot); @@ -316,20 +319,21 @@ public class FirstPassGroupingCollector extends SimpleCollector { } private void buildSortedSet() throws IOException { - final Comparator> comparator = new Comparator>() { - @Override - public int compare(CollectedSearchGroup o1, CollectedSearchGroup o2) { - for (int compIDX = 0;; compIDX++) { - FieldComparator fc = comparators[compIDX]; - final int c = reversed[compIDX] * fc.compare(o1.comparatorSlot, o2.comparatorSlot); - if (c != 0) { - return c; - } else if (compIDX == compIDXEnd) { - return o1.topDoc - o2.topDoc; + final Comparator> comparator = + new Comparator>() { + @Override + public int compare(CollectedSearchGroup o1, CollectedSearchGroup o2) { + for (int compIDX = 0; ; compIDX++) { + FieldComparator fc = comparators[compIDX]; + final int c = reversed[compIDX] * fc.compare(o1.comparatorSlot, o2.comparatorSlot); + if (c != 0) { + return c; + } else if (compIDX == compIDXEnd) { + return o1.topDoc - o2.topDoc; + } + } } - } - } - }; + }; orderedGroups = new TreeSet<>(comparator); orderedGroups.addAll(groupMap.values()); @@ -343,18 +347,14 @@ public class FirstPassGroupingCollector extends SimpleCollector { @Override protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { docBase = readerContext.docBase; - for (int i=0; i getGroupSelector() { return groupSelector; } - } - diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java index 78861d05141..9c0eccf9512 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java @@ -19,39 +19,43 @@ package org.apache.lucene.search.grouping; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TotalHits; -/** Represents one group in the results. - * - * @lucene.experimental */ +/** + * Represents one group in the results. + * + * @lucene.experimental + */ public class GroupDocs { - /** The groupField value for all docs in this group; this - * may be null if hits did not have the groupField. */ + /** + * The groupField value for all docs in this group; this may be null if hits did not have the + * groupField. + */ public final T groupValue; /** Max score in this group */ public final float maxScore; - /** Overall aggregated score of this group (currently only - * set by join queries). */ + /** Overall aggregated score of this group (currently only set by join queries). */ public final float score; - /** Hits; this may be {@link - * org.apache.lucene.search.FieldDoc} instances if the - * withinGroupSort sorted by fields. */ + /** + * Hits; this may be {@link org.apache.lucene.search.FieldDoc} instances if the withinGroupSort + * sorted by fields. + */ public final ScoreDoc[] scoreDocs; /** Total hits within this group */ public final TotalHits totalHits; - /** Matches the groupSort passed to {@link - * FirstPassGroupingCollector}. */ + /** Matches the groupSort passed to {@link FirstPassGroupingCollector}. */ public final Object[] groupSortValues; - public GroupDocs(float score, - float maxScore, - TotalHits totalHits, - ScoreDoc[] scoreDocs, - T groupValue, - Object[] groupSortValues) { + public GroupDocs( + float score, + float maxScore, + TotalHits totalHits, + ScoreDoc[] scoreDocs, + T groupValue, + Object[] groupSortValues) { this.score = score; this.maxScore = maxScore; this.totalHits = totalHits; diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupFacetCollector.java index 5abd64570c7..5dfc0d2d4bb 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupFacetCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupFacetCollector.java @@ -23,7 +23,6 @@ import java.util.LinkedList; import java.util.List; import java.util.NavigableSet; import java.util.TreeSet; - import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.SimpleCollector; @@ -55,17 +54,19 @@ public abstract class GroupFacetCollector extends SimpleCollector { } /** - * Returns grouped facet results that were computed over zero or more segments. - * Grouped facet counts are merged from zero or more segment results. + * Returns grouped facet results that were computed over zero or more segments. Grouped facet + * counts are merged from zero or more segment results. * * @param size The total number of facets to include. This is typically offset + limit - * @param minCount The minimum count a facet entry should have to be included in the grouped facet result - * @param orderByCount Whether to sort the facet entries by facet entry count. If false then the facets - * are sorted lexicographically in ascending order. + * @param minCount The minimum count a facet entry should have to be included in the grouped facet + * result + * @param orderByCount Whether to sort the facet entries by facet entry count. If false + * then the facets are sorted lexicographically in ascending order. * @return grouped facet results * @throws IOException If I/O related errors occur during merging segment grouped facet counts. */ - public GroupedFacetResult mergeSegmentResults(int size, int minCount, boolean orderByCount) throws IOException { + public GroupedFacetResult mergeSegmentResults(int size, int minCount, boolean orderByCount) + throws IOException { if (segmentFacetCounts != null) { segmentResults.add(createSegmentResult()); segmentFacetCounts = null; // reset @@ -83,7 +84,8 @@ public abstract class GroupFacetCollector extends SimpleCollector { segments.add(segmentResult); } - GroupedFacetResult facetResult = new GroupedFacetResult(size, minCount, orderByCount, totalCount, missingCount); + GroupedFacetResult facetResult = + new GroupedFacetResult(size, minCount, orderByCount, totalCount, missingCount); while (segments.size() > 0) { SegmentResult segmentResult = segments.top(); BytesRef currentFacetValue = BytesRef.deepCopyOf(segmentResult.mergeTerm); @@ -110,8 +112,7 @@ public abstract class GroupFacetCollector extends SimpleCollector { protected abstract SegmentResult createSegmentResult() throws IOException; @Override - public void setScorer(Scorable scorer) throws IOException { - } + public void setScorer(Scorable scorer) throws IOException {} @Override public ScoreMode scoreMode() { @@ -119,31 +120,32 @@ public abstract class GroupFacetCollector extends SimpleCollector { } /** - * The grouped facet result. Containing grouped facet entries, total count and total missing count. + * The grouped facet result. Containing grouped facet entries, total count and total missing + * count. */ public static class GroupedFacetResult { - private final static Comparator orderByCountAndValue = new Comparator() { + private static final Comparator orderByCountAndValue = + new Comparator() { - @Override - public int compare(FacetEntry a, FacetEntry b) { - int cmp = b.count - a.count; // Highest count first! - if (cmp != 0) { - return cmp; - } - return a.value.compareTo(b.value); - } + @Override + public int compare(FacetEntry a, FacetEntry b) { + int cmp = b.count - a.count; // Highest count first! + if (cmp != 0) { + return cmp; + } + return a.value.compareTo(b.value); + } + }; - }; + private static final Comparator orderByValue = + new Comparator() { - private final static Comparator orderByValue = new Comparator() { - - @Override - public int compare(FacetEntry a, FacetEntry b) { - return a.value.compareTo(b.value); - } - - }; + @Override + public int compare(FacetEntry a, FacetEntry b) { + return a.value.compareTo(b.value); + } + }; private final int maxSize; private final NavigableSet facetEntries; @@ -152,7 +154,8 @@ public abstract class GroupFacetCollector extends SimpleCollector { private int currentMin; - public GroupedFacetResult(int size, int minCount, boolean orderByCount, int totalCount, int totalMissingCount) { + public GroupedFacetResult( + int size, int minCount, boolean orderByCount, int totalCount, int totalMissingCount) { this.facetEntries = new TreeSet<>(orderByCount ? orderByCountAndValue : orderByValue); this.totalMissingCount = totalMissingCount; this.totalCount = totalCount; @@ -180,8 +183,8 @@ public abstract class GroupFacetCollector extends SimpleCollector { } /** - * Returns a list of facet entries to be rendered based on the specified offset and limit. - * The facet entries are retrieved from the facet entries collected during merging. + * Returns a list of facet entries to be rendered based on the specified offset and limit. The + * facet entries are retrieved from the facet entries collected during merging. * * @param offset The offset in the collected facet entries during merging * @param limit The number of facets to return starting from the offset. @@ -224,9 +227,7 @@ public abstract class GroupFacetCollector extends SimpleCollector { } } - /** - * Represents a facet entry with a value and a count. - */ + /** Represents a facet entry with a value and a count. */ public static class FacetEntry { private final BytesRef value; @@ -259,30 +260,23 @@ public abstract class GroupFacetCollector extends SimpleCollector { @Override public String toString() { - return "FacetEntry{" + - "value=" + value.utf8ToString() + - ", count=" + count + - '}'; + return "FacetEntry{" + "value=" + value.utf8ToString() + ", count=" + count + '}'; } - /** - * @return The value of this facet entry - */ + /** @return The value of this facet entry */ public BytesRef getValue() { return value; } - /** - * @return The count (number of groups) of this facet entry. - */ + /** @return The count (number of groups) of this facet entry. */ public int getCount() { return count; } } /** - * Contains the local grouped segment counts for a particular segment. - * Each SegmentResult must be added together. + * Contains the local grouped segment counts for a particular segment. Each SegmentResult + * must be added together. */ protected abstract static class SegmentResult { @@ -302,12 +296,12 @@ public abstract class GroupFacetCollector extends SimpleCollector { } /** - * Go to next term in this SegmentResult in order to retrieve the grouped facet counts. + * Go to next term in this SegmentResult in order to retrieve the grouped facet + * counts. * * @throws IOException If I/O related errors occur */ protected abstract void nextTerm() throws IOException; - } private static class SegmentResultPriorityQueue extends PriorityQueue { @@ -321,5 +315,4 @@ public abstract class GroupFacetCollector extends SimpleCollector { return a.mergeTerm.compareTo(b.mergeTerm) < 0; } } - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupReducer.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupReducer.java index d3fb1d9b5d3..313172ac26e 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupReducer.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupReducer.java @@ -21,22 +21,19 @@ import java.io.IOException; import java.util.Collection; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Scorable; /** - * Concrete implementations of this class define what to collect for individual - * groups during the second-pass of a grouping search. + * Concrete implementations of this class define what to collect for individual groups during the + * second-pass of a grouping search. * - * Each group is assigned a Collector returned by {@link #newCollector()}, and - * {@link LeafCollector#collect(int)} is called for each document that is in - * a group + *

Each group is assigned a Collector returned by {@link #newCollector()}, and {@link + * LeafCollector#collect(int)} is called for each document that is in a group * * @see SecondPassGroupingCollector - * * @param the type of the value used for grouping * @param the type of {@link Collector} used to reduce each group */ @@ -47,7 +44,7 @@ public abstract class GroupReducer { /** * Define which groups should be reduced. * - * Called by {@link SecondPassGroupingCollector} + *

Called by {@link SecondPassGroupingCollector} */ public void setGroups(Collection> groups) { for (SearchGroup group : groups) { @@ -55,25 +52,20 @@ public abstract class GroupReducer { } } - /** - * Whether or not this reducer requires collected documents to be scored - */ + /** Whether or not this reducer requires collected documents to be scored */ public abstract boolean needsScores(); - /** - * Creates a new Collector for each group - */ + /** Creates a new Collector for each group */ protected abstract C newCollector(); - /** - * Get the Collector for a given group - */ + /** Get the Collector for a given group */ public final C getCollector(T value) { return groups.get(value).collector; } /** * Collect a given document into a given group + * * @throws IOException on error */ public final void collect(T value, int doc) throws IOException { @@ -81,18 +73,14 @@ public abstract class GroupReducer { collector.leafCollector.collect(doc); } - /** - * Set the Scorer on all group collectors - */ + /** Set the Scorer on all group collectors */ public final void setScorer(Scorable scorer) throws IOException { for (GroupCollector collector : groups.values()) { collector.leafCollector.setScorer(scorer); } } - /** - * Called when the parent {@link SecondPassGroupingCollector} moves to a new segment - */ + /** Called when the parent {@link SecondPassGroupingCollector} moves to a new segment */ public final void setNextReader(LeafReaderContext ctx) throws IOException { for (GroupCollector collector : groups.values()) { collector.leafCollector = collector.collector.getLeafCollector(ctx); @@ -108,5 +96,4 @@ public abstract class GroupReducer { this.collector = collector; } } - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupSelector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupSelector.java index 92962a4d457..a5e4ddcf657 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupSelector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupSelector.java @@ -19,61 +19,52 @@ package org.apache.lucene.search.grouping; import java.io.IOException; import java.util.Collection; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; /** * Defines a group, for use by grouping collectors * - * A GroupSelector acts as an iterator over documents. For each segment, clients - * should call {@link #setNextReader(LeafReaderContext)}, and then {@link #advanceTo(int)} - * for each matching document. + *

A GroupSelector acts as an iterator over documents. For each segment, clients should call + * {@link #setNextReader(LeafReaderContext)}, and then {@link #advanceTo(int)} for each matching + * document. * * @param the type of the group value */ public abstract class GroupSelector { - /** - * What to do with the current value - */ - public enum State { SKIP, ACCEPT } + /** What to do with the current value */ + public enum State { + SKIP, + ACCEPT + } - /** - * Set the LeafReaderContext - */ + /** Set the LeafReaderContext */ public abstract void setNextReader(LeafReaderContext readerContext) throws IOException; - /** - * Set the current Scorer - */ + /** Set the current Scorer */ public abstract void setScorer(Scorable scorer) throws IOException; - /** - * Advance the GroupSelector's iterator to the given document - */ + /** Advance the GroupSelector's iterator to the given document */ public abstract State advanceTo(int doc) throws IOException; /** * Get the group value of the current document * - * N.B. this object may be reused, for a persistent version use {@link #copyValue()} + *

N.B. this object may be reused, for a persistent version use {@link #copyValue()} */ public abstract T currentValue() throws IOException; - /** - * @return a copy of the group value of the current document - */ + /** @return a copy of the group value of the current document */ public abstract T copyValue() throws IOException; /** * Set a restriction on the group values returned by this selector * - * If the selector is positioned on a document whose group value is not contained - * within this set, then {@link #advanceTo(int)} will return {@link State#SKIP} + *

If the selector is positioned on a document whose group value is not contained within this + * set, then {@link #advanceTo(int)} will return {@link State#SKIP} * * @param groups a set of {@link SearchGroup} objects to limit selections to */ public abstract void setGroups(Collection> groups); - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java index 25ed3770add..ce97e2a8705 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.Map; - import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.CachingCollector; import org.apache.lucene.search.Collector; @@ -62,8 +61,9 @@ public class GroupingSearch { private Bits matchingGroupHeads; /** - * Constructs a GroupingSearch instance that groups documents by index terms using DocValues. - * The group field can only have one token per document. This means that the field must not be analysed. + * Constructs a GroupingSearch instance that groups documents by index terms using + * DocValues. The group field can only have one token per document. This means that the field must + * not be analysed. * * @param groupField The name of the field to group by. */ @@ -72,7 +72,9 @@ public class GroupingSearch { } /** - * Constructs a GroupingSearch instance that groups documents using a {@link GroupSelector} + * Constructs a GroupingSearch instance that groups documents using a {@link + * GroupSelector} + * * @param groupSelector a {@link GroupSelector} that defines groups for this GroupingSearch */ public GroupingSearch(GroupSelector groupSelector) { @@ -80,10 +82,10 @@ public class GroupingSearch { } /** - * Constructs a GroupingSearch instance that groups documents by function using a {@link ValueSource} - * instance. + * Constructs a GroupingSearch instance that groups documents by function using a + * {@link ValueSource} instance. * - * @param groupFunction The function to group by specified as {@link ValueSource} + * @param groupFunction The function to group by specified as {@link ValueSource} * @param valueSourceContext The context of the specified groupFunction */ public GroupingSearch(ValueSource groupFunction, Map valueSourceContext) { @@ -91,8 +93,8 @@ public class GroupingSearch { } /** - * Constructor for grouping documents by doc block. - * This constructor can only be used when documents belonging in a group are indexed in one block. + * Constructor for grouping documents by doc block. This constructor can only be used when + * documents belonging in a group are indexed in one block. * * @param groupEndDocs The query that marks the last document in all doc blocks */ @@ -106,36 +108,44 @@ public class GroupingSearch { } /** - * Executes a grouped search. Both the first pass and second pass are executed on the specified searcher. + * Executes a grouped search. Both the first pass and second pass are executed on the specified + * searcher. * - * @param searcher The {@link org.apache.lucene.search.IndexSearcher} instance to execute the grouped search on. - * @param query The query to execute with the grouping + * @param searcher The {@link org.apache.lucene.search.IndexSearcher} instance to execute the + * grouped search on. + * @param query The query to execute with the grouping * @param groupOffset The group offset - * @param groupLimit The number of groups to return from the specified group offset + * @param groupLimit The number of groups to return from the specified group offset * @return the grouped result as a {@link TopGroups} instance * @throws IOException If any I/O related errors occur */ @SuppressWarnings("unchecked") - public TopGroups search(IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException { + public TopGroups search( + IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException { if (grouper != null) { return groupByFieldOrFunction(searcher, query, groupOffset, groupLimit); } else if (groupEndDocs != null) { return (TopGroups) groupByDocBlock(searcher, query, groupOffset, groupLimit); } else { - throw new IllegalStateException("Either groupField, groupFunction or groupEndDocs must be set."); // This can't happen... + throw new IllegalStateException( + "Either groupField, groupFunction or groupEndDocs must be set."); // This can't happen... } } @SuppressWarnings({"unchecked", "rawtypes"}) - protected TopGroups groupByFieldOrFunction(IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException { + protected TopGroups groupByFieldOrFunction( + IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException { int topN = groupOffset + groupLimit; - final FirstPassGroupingCollector firstPassCollector = new FirstPassGroupingCollector(grouper, groupSort, topN); - final AllGroupsCollector allGroupsCollector = allGroups ? new AllGroupsCollector(grouper) : null; - final AllGroupHeadsCollector allGroupHeadsCollector - = allGroupHeads ? AllGroupHeadsCollector.newCollector(grouper, sortWithinGroup) : null; + final FirstPassGroupingCollector firstPassCollector = + new FirstPassGroupingCollector(grouper, groupSort, topN); + final AllGroupsCollector allGroupsCollector = + allGroups ? new AllGroupsCollector(grouper) : null; + final AllGroupHeadsCollector allGroupHeadsCollector = + allGroupHeads ? AllGroupHeadsCollector.newCollector(grouper, sortWithinGroup) : null; - final Collector firstRound = MultiCollector.wrap(firstPassCollector, allGroupsCollector, allGroupHeadsCollector); + final Collector firstRound = + MultiCollector.wrap(firstPassCollector, allGroupsCollector, allGroupHeadsCollector); CachingCollector cachedCollector = null; if (maxCacheRAMMB != null || maxDocsToCache != null) { @@ -150,8 +160,10 @@ public class GroupingSearch { } matchingGroups = allGroups ? allGroupsCollector.getGroups() : Collections.emptyList(); - matchingGroupHeads = allGroupHeads ? allGroupHeadsCollector.retrieveGroupHeads(searcher.getIndexReader().maxDoc()) - : new Bits.MatchNoBits(searcher.getIndexReader().maxDoc()); + matchingGroupHeads = + allGroupHeads + ? allGroupHeadsCollector.retrieveGroupHeads(searcher.getIndexReader().maxDoc()) + : new Bits.MatchNoBits(searcher.getIndexReader().maxDoc()); Collection topSearchGroups = firstPassCollector.getTopGroups(groupOffset); if (topSearchGroups == null) { @@ -159,8 +171,9 @@ public class GroupingSearch { } int topNInsideGroup = groupDocsOffset + groupDocsLimit; - TopGroupsCollector secondPassCollector - = new TopGroupsCollector(grouper, topSearchGroups, groupSort, sortWithinGroup, topNInsideGroup, includeMaxScore); + TopGroupsCollector secondPassCollector = + new TopGroupsCollector( + grouper, topSearchGroups, groupSort, sortWithinGroup, topNInsideGroup, includeMaxScore); if (cachedCollector != null && cachedCollector.isCached()) { cachedCollector.replay(secondPassCollector); @@ -169,29 +182,38 @@ public class GroupingSearch { } if (allGroups) { - return new TopGroups(secondPassCollector.getTopGroups(groupDocsOffset), matchingGroups.size()); + return new TopGroups( + secondPassCollector.getTopGroups(groupDocsOffset), matchingGroups.size()); } else { return secondPassCollector.getTopGroups(groupDocsOffset); } } - protected TopGroups groupByDocBlock(IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException { + protected TopGroups groupByDocBlock( + IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException { int topN = groupOffset + groupLimit; final Query endDocsQuery = searcher.rewrite(this.groupEndDocs); - final Weight groupEndDocs = searcher.createWeight(endDocsQuery, ScoreMode.COMPLETE_NO_SCORES, 1); - BlockGroupingCollector c = new BlockGroupingCollector(groupSort, topN, groupSort.needsScores() || sortWithinGroup.needsScores(), groupEndDocs); + final Weight groupEndDocs = + searcher.createWeight(endDocsQuery, ScoreMode.COMPLETE_NO_SCORES, 1); + BlockGroupingCollector c = + new BlockGroupingCollector( + groupSort, + topN, + groupSort.needsScores() || sortWithinGroup.needsScores(), + groupEndDocs); searcher.search(query, c); int topNInsideGroup = groupDocsOffset + groupDocsLimit; return c.getTopGroups(sortWithinGroup, groupOffset, groupDocsOffset, topNInsideGroup); } /** - * Enables caching for the second pass search. The cache will not grow over a specified limit in MB. - * The cache is filled during the first pass searched and then replayed during the second pass searched. - * If the cache grows beyond the specified limit, then the cache is purged and not used in the second pass search. + * Enables caching for the second pass search. The cache will not grow over a specified limit in + * MB. The cache is filled during the first pass searched and then replayed during the second pass + * searched. If the cache grows beyond the specified limit, then the cache is purged and not used + * in the second pass search. * * @param maxCacheRAMMB The maximum amount in MB the cache is allowed to hold - * @param cacheScores Whether to cache the scores + * @param cacheScores Whether to cache the scores * @return this */ public GroupingSearch setCachingInMB(double maxCacheRAMMB, boolean cacheScores) { @@ -202,12 +224,13 @@ public class GroupingSearch { } /** - * Enables caching for the second pass search. The cache will not contain more than the maximum specified documents. - * The cache is filled during the first pass searched and then replayed during the second pass searched. - * If the cache grows beyond the specified limit, then the cache is purged and not used in the second pass search. + * Enables caching for the second pass search. The cache will not contain more than the maximum + * specified documents. The cache is filled during the first pass searched and then replayed + * during the second pass searched. If the cache grows beyond the specified limit, then the cache + * is purged and not used in the second pass search. * * @param maxDocsToCache The maximum number of documents the cache is allowed to hold - * @param cacheScores Whether to cache the scores + * @param cacheScores Whether to cache the scores * @return this */ public GroupingSearch setCaching(int maxDocsToCache, boolean cacheScores) { @@ -229,8 +252,7 @@ public class GroupingSearch { } /** - * Specifies how groups are sorted. - * Defaults to {@link Sort#RELEVANCE}. + * Specifies how groups are sorted. Defaults to {@link Sort#RELEVANCE}. * * @param groupSort The sort for the groups. * @return this @@ -241,8 +263,7 @@ public class GroupingSearch { } /** - * Specified how documents inside a group are sorted. - * Defaults to {@link Sort#RELEVANCE}. + * Specified how documents inside a group are sorted. Defaults to {@link Sort#RELEVANCE}. * * @param sortWithinGroup The sort for documents inside a group * @return this @@ -286,11 +307,11 @@ public class GroupingSearch { } /** - * Whether to also compute all groups matching the query. - * This can be used to determine the number of groups, which can be used for accurate pagination. - *

- * When grouping by doc block the number of groups are automatically included in the {@link TopGroups} and this - * option doesn't have any influence. + * Whether to also compute all groups matching the query. This can be used to determine the number + * of groups, which can be used for accurate pagination. + * + *

When grouping by doc block the number of groups are automatically included in the {@link + * TopGroups} and this option doesn't have any influence. * * @param allGroups to also compute all groups matching the query * @return this @@ -301,11 +322,11 @@ public class GroupingSearch { } /** - * If {@link #setAllGroups(boolean)} was set to true then all matching groups are returned, otherwise - * an empty collection is returned. + * If {@link #setAllGroups(boolean)} was set to true then all matching groups are + * returned, otherwise an empty collection is returned. * - * @param The group value type. This can be a {@link BytesRef} or a {@link MutableValue} instance. If grouping - * by doc block this the group value is always null. + * @param The group value type. This can be a {@link BytesRef} or a {@link MutableValue} + * instance. If grouping by doc block this the group value is always null. * @return all matching groups are returned, or an empty collection */ @SuppressWarnings({"unchecked", "rawtypes"}) @@ -315,10 +336,11 @@ public class GroupingSearch { /** * Whether to compute all group heads (most relevant document per group) matching the query. - *

- * This feature isn't enabled when grouping by doc block. * - * @param allGroupHeads Whether to compute all group heads (most relevant document per group) matching the query + *

This feature isn't enabled when grouping by doc block. + * + * @param allGroupHeads Whether to compute all group heads (most relevant document per group) + * matching the query * @return this */ public GroupingSearch setAllGroupHeads(boolean allGroupHeads) { @@ -327,12 +349,13 @@ public class GroupingSearch { } /** - * Returns the matching group heads if {@link #setAllGroupHeads(boolean)} was set to true or an empty bit set. + * Returns the matching group heads if {@link #setAllGroupHeads(boolean)} was set to true or an + * empty bit set. * - * @return The matching group heads if {@link #setAllGroupHeads(boolean)} was set to true or an empty bit set + * @return The matching group heads if {@link #setAllGroupHeads(boolean)} was set to true or an + * empty bit set */ public Bits getAllGroupHeads() { return matchingGroupHeads; } - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRange.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRange.java index 7b6c845aa4a..9b631d1d963 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRange.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRange.java @@ -19,10 +19,7 @@ package org.apache.lucene.search.grouping; import java.util.Objects; -/** - * Represents a contiguous range of long values, with an inclusive minimum and - * exclusive maximum - */ +/** Represents a contiguous range of long values, with an inclusive minimum and exclusive maximum */ public class LongRange { /** The inclusive minimum value of this range */ @@ -30,9 +27,7 @@ public class LongRange { /** The exclusive maximum value of this range */ public long max; - /** - * Creates a new double range, running from {@code min} inclusive to {@code max} exclusive - */ + /** Creates a new double range, running from {@code min} inclusive to {@code max} exclusive */ public LongRange(long min, long max) { this.min = min; this.max = max; diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRangeFactory.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRangeFactory.java index be66647a5e9..c2795d8c31d 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRangeFactory.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRangeFactory.java @@ -17,9 +17,7 @@ package org.apache.lucene.search.grouping; -/** - * Groups double values into ranges - */ +/** Groups double values into ranges */ public class LongRangeFactory { private final long min; @@ -28,11 +26,12 @@ public class LongRangeFactory { /** * Creates a new LongRangeFactory - * @param min a minimum value; all longs below this value are grouped into a single range - * @param width a standard width; all ranges between {@code min} and {@code max} are this wide, - * with the exception of the final range which may be up to this width. Ranges - * are inclusive at the lower end, and exclusive at the upper end. - * @param max a maximum value; all longs above this value are grouped into a single range + * + * @param min a minimum value; all longs below this value are grouped into a single range + * @param width a standard width; all ranges between {@code min} and {@code max} are this wide, + * with the exception of the final range which may be up to this width. Ranges are inclusive + * at the lower end, and exclusive at the upper end. + * @param max a maximum value; all longs above this value are grouped into a single range */ public LongRangeFactory(long min, long width, long max) { this.min = min; @@ -42,12 +41,14 @@ public class LongRangeFactory { /** * Finds the LongRange that a value should be grouped into + * * @param value the value to group * @param reuse an existing LongRange object to reuse */ public LongRange getRange(long value, LongRange reuse) { - if (reuse == null) + if (reuse == null) { reuse = new LongRange(Long.MIN_VALUE, Long.MAX_VALUE); + } if (value < min) { reuse.max = min; reuse.min = Long.MIN_VALUE; @@ -63,5 +64,4 @@ public class LongRangeFactory { reuse.max = reuse.min + width; return reuse; } - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRangeGroupSelector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRangeGroupSelector.java index 7dd0c238d92..ba8623d7128 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRangeGroupSelector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/LongRangeGroupSelector.java @@ -21,16 +21,13 @@ import java.io.IOException; import java.util.Collection; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValuesSource; import org.apache.lucene.search.LongValues; import org.apache.lucene.search.LongValuesSource; import org.apache.lucene.search.Scorable; -/** - * A GroupSelector implementation that groups documents by long values - */ +/** A GroupSelector implementation that groups documents by long values */ public class LongRangeGroupSelector extends GroupSelector { private final LongValuesSource source; @@ -46,8 +43,10 @@ public class LongRangeGroupSelector extends GroupSelector { /** * Creates a new LongRangeGroupSelector - * @param source a LongValuesSource to retrieve long values per document - * @param rangeFactory a LongRangeFactory that defines how to group the long values into range buckets + * + * @param source a LongValuesSource to retrieve long values per document + * @param rangeFactory a LongRangeFactory that defines how to group the long values into range + * buckets */ public LongRangeGroupSelector(LongValuesSource source, LongRangeFactory rangeFactory) { this.source = source; @@ -92,10 +91,11 @@ public class LongRangeGroupSelector extends GroupSelector { inSecondPass = new HashSet<>(); includeEmpty = false; for (SearchGroup group : searchGroups) { - if (group.groupValue == null) + if (group.groupValue == null) { includeEmpty = true; - else + } else { inSecondPass.add(group.groupValue); + } } } } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java index 58e1f74350a..f859e47a2f9 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.NavigableSet; import java.util.TreeSet; - import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -38,19 +37,23 @@ import org.apache.lucene.search.SortField; */ public class SearchGroup { - /** The value that defines this group */ + /** The value that defines this group */ public T groupValue; - /** The sort values used during sorting. These are the - * groupSort field values of the highest rank document - * (by the groupSort) within the group. Can be - * null if fillFields=false had - * been passed to {@link FirstPassGroupingCollector#getTopGroups} */ + /** + * The sort values used during sorting. These are the groupSort field values of the highest rank + * document (by the groupSort) within the group. Can be null if + * fillFields=false had been passed to {@link FirstPassGroupingCollector#getTopGroups} + */ public Object[] sortValues; @Override public String toString() { - return("SearchGroup(groupValue=" + groupValue + " sortValues=" + Arrays.toString(sortValues) + ")"); + return ("SearchGroup(groupValue=" + + groupValue + + " sortValues=" + + Arrays.toString(sortValues) + + ")"); } @Override @@ -90,11 +93,12 @@ public class SearchGroup { assert iter.hasNext(); final SearchGroup group = iter.next(); if (group.sortValues == null) { - throw new IllegalArgumentException("group.sortValues is null; you must pass fillFields=true to the first pass collector"); + throw new IllegalArgumentException( + "group.sortValues is null; you must pass fillFields=true to the first pass collector"); } return group; } - + @Override public String toString() { return "ShardIter(shard=" + shardIndex + ")"; @@ -162,7 +166,7 @@ public class SearchGroup { @SuppressWarnings("rawtypes") public final FieldComparator[] comparators; - + public final int[] reversed; @SuppressWarnings({"unchecked", "rawtypes"}) @@ -178,18 +182,19 @@ public class SearchGroup { } @Override - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) public int compare(MergedGroup group, MergedGroup other) { if (group == other) { return 0; } - //System.out.println("compare group=" + group + " other=" + other); + // System.out.println("compare group=" + group + " other=" + other); final Object[] groupValues = group.topValues; final Object[] otherValues = other.topValues; - //System.out.println(" groupValues=" + groupValues + " otherValues=" + otherValues); - for (int compIDX = 0;compIDX < comparators.length; compIDX++) { - final int c = reversed[compIDX] * comparators[compIDX].compareValues(groupValues[compIDX], - otherValues[compIDX]); + // System.out.println(" groupValues=" + groupValues + " otherValues=" + otherValues); + for (int compIDX = 0; compIDX < comparators.length; compIDX++) { + final int c = + reversed[compIDX] + * comparators[compIDX].compareValues(groupValues[compIDX], otherValues[compIDX]); if (c != 0) { return c; } @@ -205,7 +210,7 @@ public class SearchGroup { private final GroupComparator groupComp; private final NavigableSet> queue; - private final Map> groupsSeen; + private final Map> groupsSeen; public GroupMerger(Sort groupSort) { groupComp = new GroupComparator<>(groupSort); @@ -213,17 +218,18 @@ public class SearchGroup { groupsSeen = new HashMap<>(); } - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) private void updateNextGroup(int topN, ShardIter shard) { - while(shard.iter.hasNext()) { + while (shard.iter.hasNext()) { final SearchGroup group = shard.next(); MergedGroup mergedGroup = groupsSeen.get(group.groupValue); final boolean isNew = mergedGroup == null; - //System.out.println(" next group=" + (group.groupValue == null ? "null" : ((BytesRef) group.groupValue).utf8ToString()) + " sort=" + Arrays.toString(group.sortValues)); + // System.out.println(" next group=" + (group.groupValue == null ? "null" : ((BytesRef) + // group.groupValue).utf8ToString()) + " sort=" + Arrays.toString(group.sortValues)); if (isNew) { // Start a new group: - //System.out.println(" new"); + // System.out.println(" new"); mergedGroup = new MergedGroup<>(group.groupValue); mergedGroup.minShardIndex = shard.shardIndex; assert group.sortValues != null; @@ -236,11 +242,13 @@ public class SearchGroup { // processed; move on to next group... continue; } else { - //System.out.println(" old"); + // System.out.println(" old"); boolean competes = false; - for(int compIDX=0;compIDX { } else if (cmp > 0) { // Definitely does not compete break; - } else if (compIDX == groupComp.comparators.length-1) { + } else if (compIDX == groupComp.comparators.length - 1) { if (shard.shardIndex < mergedGroup.minShardIndex) { competes = true; } } } - //System.out.println(" competes=" + competes); + // System.out.println(" competes=" + competes); if (competes) { // Group's sort changed -- remove & re-insert @@ -274,23 +282,24 @@ public class SearchGroup { } // Prune un-competitive groups: - while(queue.size() > topN) { + while (queue.size() > topN) { final MergedGroup group = queue.pollLast(); - //System.out.println("PRUNE: " + group); + // System.out.println("PRUNE: " + group); group.inQueue = false; } } - public Collection> merge(List>> shards, int offset, int topN) { + public Collection> merge( + List>> shards, int offset, int topN) { final int maxQueueSize = offset + topN; - //System.out.println("merge"); + // System.out.println("merge"); // Init queue: - for(int shardIDX=0;shardIDX> shard = shards.get(shardIDX); if (!shard.isEmpty()) { - //System.out.println(" insert shard=" + shardIDX); + // System.out.println(" insert shard=" + shardIDX); updateNextGroup(maxQueueSize, new ShardIter<>(shard, shardIDX)); } } @@ -300,10 +309,12 @@ public class SearchGroup { int count = 0; - while(!queue.isEmpty()) { + while (!queue.isEmpty()) { final MergedGroup group = queue.pollFirst(); group.processed = true; - //System.out.println(" pop: shards=" + group.shards + " group=" + (group.groupValue == null ? "null" : (((BytesRef) group.groupValue).utf8ToString())) + " sortValues=" + Arrays.toString(group.topValues)); + // System.out.println(" pop: shards=" + group.shards + " group=" + (group.groupValue == + // null ? "null" : (((BytesRef) group.groupValue).utf8ToString())) + " sortValues=" + + // Arrays.toString(group.topValues)); if (count++ >= offset) { final SearchGroup newGroup = new SearchGroup<>(); newGroup.groupValue = group.groupValue; @@ -312,12 +323,12 @@ public class SearchGroup { if (newTopGroups.size() == topN) { break; } - //} else { - // System.out.println(" skip < offset"); + // } else { + // System.out.println(" skip < offset"); } // Advance all iters in this group: - for(ShardIter shardIter : group.shards) { + for (ShardIter shardIter : group.shards) { updateNextGroup(maxQueueSize, shardIter); } } @@ -330,16 +341,16 @@ public class SearchGroup { } } - /** Merges multiple collections of top groups, for example - * obtained from separate index shards. The provided - * groupSort must match how the groups were sorted, and - * the provided SearchGroups must have been computed - * with fillFields=true passed to {@link - * FirstPassGroupingCollector#getTopGroups}. + /** + * Merges multiple collections of top groups, for example obtained from separate index shards. The + * provided groupSort must match how the groups were sorted, and the provided SearchGroups must + * have been computed with fillFields=true passed to {@link + * FirstPassGroupingCollector#getTopGroups}. * *

NOTE: this returns null if the topGroups is empty. */ - public static Collection> merge(List>> topGroups, int offset, int topN, Sort groupSort) { + public static Collection> merge( + List>> topGroups, int offset, int topN, Sort groupSort) { if (topGroups.isEmpty()) { return null; } else { diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java index dc7d0aabde7..77cd049e62e 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java @@ -19,19 +19,17 @@ package org.apache.lucene.search.grouping; import java.io.IOException; import java.util.Collection; import java.util.Objects; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.SimpleCollector; /** - * SecondPassGroupingCollector runs over an already collected set of - * groups, further applying a {@link GroupReducer} to each group + * SecondPassGroupingCollector runs over an already collected set of groups, further applying a + * {@link GroupReducer} to each group * * @see TopGroupsCollector * @see DistinctValuesCollector - * * @lucene.experimental */ public class SecondPassGroupingCollector extends SimpleCollector { @@ -45,13 +43,17 @@ public class SecondPassGroupingCollector extends SimpleCollector { /** * Create a new SecondPassGroupingCollector - * @param groupSelector the GroupSelector that defines groups for this search - * @param groups the groups to collect documents for - * @param reducer the reducer to apply to each group + * + * @param groupSelector the GroupSelector that defines groups for this search + * @param groups the groups to collect documents for + * @param reducer the reducer to apply to each group */ - public SecondPassGroupingCollector(GroupSelector groupSelector, Collection> groups, GroupReducer reducer) { + public SecondPassGroupingCollector( + GroupSelector groupSelector, + Collection> groups, + GroupReducer reducer) { - //System.out.println("SP init"); + // System.out.println("SP init"); if (groups.isEmpty()) { throw new IllegalArgumentException("no groups to collect (groups is empty)"); } @@ -64,9 +66,7 @@ public class SecondPassGroupingCollector extends SimpleCollector { reducer.setGroups(groups); } - /** - * @return the GroupSelector used in this collector - */ + /** @return the GroupSelector used in this collector */ public GroupSelector getGroupSelector() { return groupSelector; } @@ -85,8 +85,9 @@ public class SecondPassGroupingCollector extends SimpleCollector { @Override public void collect(int doc) throws IOException { totalHitCount++; - if (groupSelector.advanceTo(doc) == GroupSelector.State.SKIP) + if (groupSelector.advanceTo(doc) == GroupSelector.State.SKIP) { return; + } totalGroupedHitCount++; T value = groupSelector.currentValue(); groupReducer.collect(value, doc); @@ -97,5 +98,4 @@ public class SecondPassGroupingCollector extends SimpleCollector { groupReducer.setNextReader(readerContext); groupSelector.setNextReader(readerContext); } - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupFacetCollector.java index 39d28a57996..96c948db872 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupFacetCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupFacetCollector.java @@ -19,7 +19,6 @@ package org.apache.lucene.search.grouping; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; @@ -31,8 +30,8 @@ import org.apache.lucene.util.SentinelIntSet; import org.apache.lucene.util.UnicodeUtil; /** - * An implementation of {@link GroupFacetCollector} that computes grouped facets based on the indexed terms - * from DocValues. + * An implementation of {@link GroupFacetCollector} that computes grouped facets based on the + * indexed terms from DocValues. * * @lucene.experimental */ @@ -44,23 +43,24 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { SortedDocValues groupFieldTermsIndex; /** - * Factory method for creating the right implementation based on the fact whether the facet field contains - * multiple tokens per documents. + * Factory method for creating the right implementation based on the fact whether the facet field + * contains multiple tokens per documents. * * @param groupField The group field * @param facetField The facet field * @param facetFieldMultivalued Whether the facet field has multiple tokens per document * @param facetPrefix The facet prefix a facet entry should start with to be included. - * @param initialSize The initial allocation size of the internal int set and group facet list which should roughly - * match the total number of expected unique groups. Be aware that the heap usage is - * 4 bytes * initialSize. + * @param initialSize The initial allocation size of the internal int set and group facet list + * which should roughly match the total number of expected unique groups. Be aware that the + * heap usage is 4 bytes * initialSize. * @return TermGroupFacetCollector implementation */ - public static TermGroupFacetCollector createTermGroupFacetCollector(String groupField, - String facetField, - boolean facetFieldMultivalued, - BytesRef facetPrefix, - int initialSize) { + public static TermGroupFacetCollector createTermGroupFacetCollector( + String groupField, + String facetField, + boolean facetFieldMultivalued, + BytesRef facetPrefix, + int initialSize) { if (facetFieldMultivalued) { return new MV(groupField, facetField, facetPrefix, initialSize); } else { @@ -68,7 +68,8 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { } } - TermGroupFacetCollector(String groupField, String facetField, BytesRef facetPrefix, int initialSize) { + TermGroupFacetCollector( + String groupField, String facetField, BytesRef facetPrefix, int initialSize) { super(groupField, facetField, facetPrefix); groupedFacetHits = new ArrayList<>(initialSize); segmentGroupedFacetHits = new SentinelIntSet(initialSize, Integer.MIN_VALUE); @@ -95,7 +96,7 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { } else { facetOrd = -1; } - + if (facetOrd < startFacetOrd || facetOrd >= endFacetOrd) { return; } @@ -110,13 +111,14 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { } else { groupOrd = -1; } - int segmentGroupedFacetsIndex = groupOrd * (facetFieldTermsIndex.getValueCount()+1) + facetOrd; + int segmentGroupedFacetsIndex = + groupOrd * (facetFieldTermsIndex.getValueCount() + 1) + facetOrd; if (segmentGroupedFacetHits.exists(segmentGroupedFacetsIndex)) { return; } segmentTotalCount++; - segmentFacetCounts[facetOrd+1]++; + segmentFacetCounts[facetOrd + 1]++; segmentGroupedFacetHits.put(segmentGroupedFacetsIndex); @@ -147,22 +149,29 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { facetFieldTermsIndex = DocValues.getSorted(context.reader(), facetField); // 1+ to allow for the -1 "not set": - segmentFacetCounts = new int[facetFieldTermsIndex.getValueCount()+1]; + segmentFacetCounts = new int[facetFieldTermsIndex.getValueCount() + 1]; segmentTotalCount = 0; segmentGroupedFacetHits.clear(); for (GroupedFacetHit groupedFacetHit : groupedFacetHits) { - int facetOrd = groupedFacetHit.facetValue == null ? -1 : facetFieldTermsIndex.lookupTerm(groupedFacetHit.facetValue); + int facetOrd = + groupedFacetHit.facetValue == null + ? -1 + : facetFieldTermsIndex.lookupTerm(groupedFacetHit.facetValue); if (groupedFacetHit.facetValue != null && facetOrd < 0) { continue; } - int groupOrd = groupedFacetHit.groupValue == null ? -1 : groupFieldTermsIndex.lookupTerm(groupedFacetHit.groupValue); + int groupOrd = + groupedFacetHit.groupValue == null + ? -1 + : groupFieldTermsIndex.lookupTerm(groupedFacetHit.groupValue); if (groupedFacetHit.groupValue != null && groupOrd < 0) { continue; } - int segmentGroupedFacetsIndex = groupOrd * (facetFieldTermsIndex.getValueCount()+1) + facetOrd; + int segmentGroupedFacetsIndex = + groupOrd * (facetFieldTermsIndex.getValueCount() + 1) + facetOrd; segmentGroupedFacetHits.put(segmentGroupedFacetsIndex); } @@ -186,17 +195,23 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { @Override protected SegmentResult createSegmentResult() throws IOException { - return new SegmentResult(segmentFacetCounts, segmentTotalCount, facetFieldTermsIndex.termsEnum(), startFacetOrd, endFacetOrd); + return new SegmentResult( + segmentFacetCounts, + segmentTotalCount, + facetFieldTermsIndex.termsEnum(), + startFacetOrd, + endFacetOrd); } private static class SegmentResult extends GroupFacetCollector.SegmentResult { final TermsEnum tenum; - SegmentResult(int[] counts, int total, TermsEnum tenum, int startFacetOrd, int endFacetOrd) throws IOException { - super(counts, total - counts[0], counts[0], endFacetOrd+1); + SegmentResult(int[] counts, int total, TermsEnum tenum, int startFacetOrd, int endFacetOrd) + throws IOException { + super(counts, total - counts[0], counts[0], endFacetOrd + 1); this.tenum = tenum; - this.mergePos = startFacetOrd == -1 ? 1 : startFacetOrd+1; + this.mergePos = startFacetOrd == -1 ? 1 : startFacetOrd + 1; if (mergePos < maxTermPos) { assert tenum != null; tenum.seekExact(startFacetOrd == -1 ? 0 : startFacetOrd); @@ -234,7 +249,7 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { } else { groupOrd = -1; } - + if (facetFieldNumTerms == 0) { int segmentGroupedFacetsIndex = groupOrd * (facetFieldNumTerms + 1); if (facetPrefix != null || segmentGroupedFacetHits.exists(segmentGroupedFacetsIndex)) { @@ -266,12 +281,14 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { empty = false; } } - + if (empty) { - process(groupOrd, facetFieldNumTerms); // this facet ord is reserved for docs not containing facet field. + process( + groupOrd, + facetFieldNumTerms); // this facet ord is reserved for docs not containing facet field. } } - + private void process(int groupOrd, int facetOrd) throws IOException { if (facetOrd < startFacetOrd || facetOrd >= endFacetOrd) { return; @@ -317,20 +334,25 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { } else { facetOrdTermsEnum = facetFieldDocTermOrds.termsEnum(); } - // [facetFieldNumTerms() + 1] for all possible facet values and docs not containing facet field + // [facetFieldNumTerms() + 1] for all possible facet values and docs not containing facet + // field segmentFacetCounts = new int[facetFieldNumTerms + 1]; segmentTotalCount = 0; segmentGroupedFacetHits.clear(); for (GroupedFacetHit groupedFacetHit : groupedFacetHits) { - int groupOrd = groupedFacetHit.groupValue == null ? -1 : groupFieldTermsIndex.lookupTerm(groupedFacetHit.groupValue); + int groupOrd = + groupedFacetHit.groupValue == null + ? -1 + : groupFieldTermsIndex.lookupTerm(groupedFacetHit.groupValue); if (groupedFacetHit.groupValue != null && groupOrd < 0) { continue; } int facetOrd; if (groupedFacetHit.facetValue != null) { - if (facetOrdTermsEnum == null || !facetOrdTermsEnum.seekExact(groupedFacetHit.facetValue)) { + if (facetOrdTermsEnum == null + || !facetOrdTermsEnum.seekExact(groupedFacetHit.facetValue)) { continue; } facetOrd = (int) facetOrdTermsEnum.ord(); @@ -338,7 +360,8 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { facetOrd = facetFieldNumTerms; } - // (facetFieldDocTermOrds.numTerms() + 1) for all possible facet values and docs not containing facet field + // (facetFieldDocTermOrds.numTerms() + 1) for all possible facet values and docs not + // containing facet field int segmentGroupedFacetsIndex = groupOrd * (facetFieldNumTerms + 1) + facetOrd; segmentGroupedFacetHits.put(segmentGroupedFacetsIndex); } @@ -376,16 +399,32 @@ public abstract class TermGroupFacetCollector extends GroupFacetCollector { @Override protected SegmentResult createSegmentResult() throws IOException { - return new SegmentResult(segmentFacetCounts, segmentTotalCount, facetFieldNumTerms, facetOrdTermsEnum, startFacetOrd, endFacetOrd); + return new SegmentResult( + segmentFacetCounts, + segmentTotalCount, + facetFieldNumTerms, + facetOrdTermsEnum, + startFacetOrd, + endFacetOrd); } private static class SegmentResult extends GroupFacetCollector.SegmentResult { final TermsEnum tenum; - SegmentResult(int[] counts, int total, int missingCountIndex, TermsEnum tenum, int startFacetOrd, int endFacetOrd) throws IOException { - super(counts, total - counts[missingCountIndex], counts[missingCountIndex], - endFacetOrd == missingCountIndex + 1 ? missingCountIndex : endFacetOrd); + SegmentResult( + int[] counts, + int total, + int missingCountIndex, + TermsEnum tenum, + int startFacetOrd, + int endFacetOrd) + throws IOException { + super( + counts, + total - counts[missingCountIndex], + counts[missingCountIndex], + endFacetOrd == missingCountIndex + 1 ? missingCountIndex : endFacetOrd); this.tenum = tenum; this.mergePos = startFacetOrd; if (tenum != null) { diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupSelector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupSelector.java index 65213b2dc9a..802776eb8b1 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupSelector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupSelector.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Collection; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; @@ -29,9 +28,7 @@ import org.apache.lucene.search.Scorable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefHash; -/** - * A GroupSelector implementation that groups via SortedDocValues - */ +/** A GroupSelector implementation that groups via SortedDocValues */ public class TermGroupSelector extends GroupSelector { private final String field; @@ -46,6 +43,7 @@ public class TermGroupSelector extends GroupSelector { /** * Create a new TermGroupSelector + * * @param field the SortedDocValues field to use for grouping */ public TermGroupSelector(String field) { @@ -60,13 +58,14 @@ public class TermGroupSelector extends GroupSelector { for (int i = 0; i < values.size(); i++) { values.get(i, scratch); int ord = this.docValues.lookupTerm(scratch); - if (ord >= 0) + if (ord >= 0) { ordsToGroupIds.put(ord, i); + } } } @Override - public void setScorer(Scorable scorer) throws IOException { } + public void setScorer(Scorable scorer) throws IOException {} @Override public State advanceTo(int doc) throws IOException { @@ -79,8 +78,9 @@ public class TermGroupSelector extends GroupSelector { groupId = ordsToGroupIds.get(ord); return State.ACCEPT; } - if (secondPass) + if (secondPass) { return State.SKIP; + } groupId = values.add(docValues.binaryValue()); ordsToGroupIds.put(ord, groupId); return State.ACCEPT; @@ -90,16 +90,18 @@ public class TermGroupSelector extends GroupSelector { @Override public BytesRef currentValue() { - if (groupId == -1) + if (groupId == -1) { return null; + } values.get(groupId, scratch); return scratch; } @Override public BytesRef copyValue() { - if (groupId == -1) + if (groupId == -1) { return null; + } return BytesRef.deepCopyOf(currentValue()); } @@ -108,10 +110,11 @@ public class TermGroupSelector extends GroupSelector { this.values.clear(); this.values.reinit(); for (SearchGroup sg : searchGroups) { - if (sg.groupValue == null) + if (sg.groupValue == null) { includeEmpty = true; - else + } else { this.values.add(sg.groupValue); + } } this.secondPass = true; } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java index b14e6753629..3ceb388ca80 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java @@ -24,9 +24,11 @@ import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; -/** Represents result returned by a grouping search. +/** + * Represents result returned by a grouping search. * - * @lucene.experimental */ + * @lucene.experimental + */ public class TopGroups { /** Number of documents matching the search */ public final int totalHitCount; @@ -46,11 +48,16 @@ public class TopGroups { /** How docs are sorted within each group */ public final SortField[] withinGroupSort; - /** Highest score across all hits, or - * Float.NaN if scores were not computed. */ + /** Highest score across all hits, or Float.NaN if scores were not computed. */ public final float maxScore; - public TopGroups(SortField[] groupSort, SortField[] withinGroupSort, int totalHitCount, int totalGroupedHitCount, GroupDocs[] groups, float maxScore) { + public TopGroups( + SortField[] groupSort, + SortField[] withinGroupSort, + int totalHitCount, + int totalGroupedHitCount, + GroupDocs[] groups, + float maxScore) { this.groupSort = groupSort; this.withinGroupSort = withinGroupSort; this.totalHitCount = totalHitCount; @@ -73,7 +80,7 @@ public class TopGroups { /** How the GroupDocs score (if any) should be merged. */ public enum ScoreMergeMode { /** Set score to Float.NaN */ - None, + None, /* Sum score across all shards for this group. */ Total, /* Avg score across all shards for this group. */ @@ -81,8 +88,9 @@ public class TopGroups { } /** - * If either value is NaN then return the other value, otherwise - * return the greater of the two values by calling Math.max. + * If either value is NaN then return the other value, otherwise return the greater of the two + * values by calling Math.max. + * * @param a - one value * @param b - another value * @return ignoring any NaN return the greater of a and b @@ -93,26 +101,27 @@ public class TopGroups { return Math.max(a, b); } - /** Merges an array of TopGroups, for example obtained - * from the second-pass collector across multiple - * shards. Each TopGroups must have been sorted by the - * same groupSort and docSort, and the top groups passed - * to all second-pass collectors must be the same. + /** + * Merges an array of TopGroups, for example obtained from the second-pass collector across + * multiple shards. Each TopGroups must have been sorted by the same groupSort and docSort, and + * the top groups passed to all second-pass collectors must be the same. * - * NOTE: We can't always compute an exact totalGroupCount. - * Documents belonging to a group may occur on more than - * one shard and thus the merged totalGroupCount can be - * higher than the actual totalGroupCount. In this case the - * totalGroupCount represents a upper bound. If the documents - * of one group do only reside in one shard then the - * totalGroupCount is exact. + *

NOTE: We can't always compute an exact totalGroupCount. Documents belonging to a + * group may occur on more than one shard and thus the merged totalGroupCount can be higher than + * the actual totalGroupCount. In this case the totalGroupCount represents a upper bound. If the + * documents of one group do only reside in one shard then the totalGroupCount is exact. * - * NOTE: the topDocs in each GroupDocs is actually - * an instance of TopDocsAndShards + *

NOTE: the topDocs in each GroupDocs is actually an instance of TopDocsAndShards */ - public static TopGroups merge(TopGroups[] shardGroups, Sort groupSort, Sort docSort, int docOffset, int docTopN, ScoreMergeMode scoreMergeMode) { + public static TopGroups merge( + TopGroups[] shardGroups, + Sort groupSort, + Sort docSort, + int docOffset, + int docTopN, + ScoreMergeMode scoreMergeMode) { - //System.out.println("TopGroups.merge"); + // System.out.println("TopGroups.merge"); if (shardGroups.length == 0) { return null; @@ -124,9 +133,10 @@ public class TopGroups { Integer totalGroupCount = null; final int numGroups = shardGroups[0].groups.length; - for(TopGroups shard : shardGroups) { + for (TopGroups shard : shardGroups) { if (numGroups != shard.groups.length) { - throw new IllegalArgumentException("number of groups differs across shards; you must pass same top groups to all shards' second-pass collector"); + throw new IllegalArgumentException( + "number of groups differs across shards; you must pass same top groups to all shards' second-pass collector"); } totalHitCount += shard.totalHitCount; totalGroupedHitCount += shard.totalGroupedHitCount; @@ -139,7 +149,7 @@ public class TopGroups { } } - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) final GroupDocs[] mergedGroupDocs = new GroupDocs[numGroups]; final TopDocs[] shardTopDocs; @@ -150,22 +160,25 @@ public class TopGroups { } float totalMaxScore = Float.NaN; - for(int groupIDX=0;groupIDX { */ if (docSort.equals(Sort.RELEVANCE)) { - shardTopDocs[shardIDX] = new TopDocs(shardGroupDocs.totalHits, - shardGroupDocs.scoreDocs); + shardTopDocs[shardIDX] = new TopDocs(shardGroupDocs.totalHits, shardGroupDocs.scoreDocs); } else { - shardTopDocs[shardIDX] = new TopFieldDocs(shardGroupDocs.totalHits, - shardGroupDocs.scoreDocs, - docSort.getSort()); + shardTopDocs[shardIDX] = + new TopFieldDocs( + shardGroupDocs.totalHits, shardGroupDocs.scoreDocs, docSort.getSort()); } for (int i = 0; i < shardTopDocs[shardIDX].scoreDocs.length; i++) { shardTopDocs[shardIDX].scoreDocs[i].shardIndex = shardIDX; } - maxScore = nonNANmax(maxScore, shardGroupDocs.maxScore); + maxScore = nonNANmax(maxScore, shardGroupDocs.maxScore); assert shardGroupDocs.totalHits.relation == Relation.EQUAL_TO; totalHits += shardGroupDocs.totalHits.value; scoreSum += shardGroupDocs.score; @@ -208,57 +220,63 @@ public class TopGroups { mergedScoreDocs = new ScoreDoc[0]; } else { mergedScoreDocs = new ScoreDoc[mergedTopDocs.scoreDocs.length - docOffset]; - System.arraycopy(mergedTopDocs.scoreDocs, - docOffset, - mergedScoreDocs, - 0, - mergedTopDocs.scoreDocs.length - docOffset); + System.arraycopy( + mergedTopDocs.scoreDocs, + docOffset, + mergedScoreDocs, + 0, + mergedTopDocs.scoreDocs.length - docOffset); } final float groupScore; - switch(scoreMergeMode) { - case None: - groupScore = Float.NaN; - break; - case Avg: - if (totalHits > 0) { - groupScore = (float) (scoreSum / totalHits); - } else { + switch (scoreMergeMode) { + case None: groupScore = Float.NaN; - } - break; - case Total: - groupScore = (float) scoreSum; - break; - default: - throw new IllegalArgumentException("can't handle ScoreMergeMode " + scoreMergeMode); + break; + case Avg: + if (totalHits > 0) { + groupScore = (float) (scoreSum / totalHits); + } else { + groupScore = Float.NaN; + } + break; + case Total: + groupScore = (float) scoreSum; + break; + default: + throw new IllegalArgumentException("can't handle ScoreMergeMode " + scoreMergeMode); } - - //System.out.println("SHARDS=" + Arrays.toString(mergedTopDocs.shardIndex)); - mergedGroupDocs[groupIDX] = new GroupDocs<>(groupScore, - maxScore, - new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), - mergedScoreDocs, - groupValue, - shardGroups[0].groups[groupIDX].groupSortValues); + + // System.out.println("SHARDS=" + Arrays.toString(mergedTopDocs.shardIndex)); + mergedGroupDocs[groupIDX] = + new GroupDocs<>( + groupScore, + maxScore, + new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), + mergedScoreDocs, + groupValue, + shardGroups[0].groups[groupIDX].groupSortValues); totalMaxScore = nonNANmax(totalMaxScore, maxScore); } if (totalGroupCount != null) { - TopGroups result = new TopGroups<>(groupSort.getSort(), - docSort.getSort(), - totalHitCount, - totalGroupedHitCount, - mergedGroupDocs, - totalMaxScore); + TopGroups result = + new TopGroups<>( + groupSort.getSort(), + docSort.getSort(), + totalHitCount, + totalGroupedHitCount, + mergedGroupDocs, + totalMaxScore); return new TopGroups<>(result, totalGroupCount); } else { - return new TopGroups<>(groupSort.getSort(), - docSort.getSort(), - totalHitCount, - totalGroupedHitCount, - mergedGroupDocs, - totalMaxScore); + return new TopGroups<>( + groupSort.getSort(), + docSort.getSort(), + totalHitCount, + totalGroupedHitCount, + mergedGroupDocs, + totalMaxScore); } } } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroupsCollector.java index 01e99282225..b3cbb25005c 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroupsCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroupsCollector.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Collection; import java.util.Objects; import java.util.function.Supplier; - import org.apache.lucene.search.FilterCollector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Scorable; @@ -36,8 +35,8 @@ import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.util.ArrayUtil; /** - * A second-pass collector that collects the TopDocs for each group, and - * returns them as a {@link TopGroups} object + * A second-pass collector that collects the TopDocs for each group, and returns them as a {@link + * TopGroups} object * * @param the type of the group value */ @@ -49,21 +48,28 @@ public class TopGroupsCollector extends SecondPassGroupingCollector { /** * Create a new TopGroupsCollector - * @param groupSelector the group selector used to define groups - * @param groups the groups to collect TopDocs for - * @param groupSort the order in which groups are returned - * @param withinGroupSort the order in which documents are sorted in each group - * @param maxDocsPerGroup the maximum number of docs to collect for each group - * @param getMaxScores if true, record the maximum score for each group + * + * @param groupSelector the group selector used to define groups + * @param groups the groups to collect TopDocs for + * @param groupSort the order in which groups are returned + * @param withinGroupSort the order in which documents are sorted in each group + * @param maxDocsPerGroup the maximum number of docs to collect for each group + * @param getMaxScores if true, record the maximum score for each group */ - public TopGroupsCollector(GroupSelector groupSelector, Collection> groups, Sort groupSort, Sort withinGroupSort, - int maxDocsPerGroup, boolean getMaxScores) { - super(groupSelector, groups, + public TopGroupsCollector( + GroupSelector groupSelector, + Collection> groups, + Sort groupSort, + Sort withinGroupSort, + int maxDocsPerGroup, + boolean getMaxScores) { + super( + groupSelector, + groups, new TopDocsReducer<>(withinGroupSort, maxDocsPerGroup, getMaxScores)); this.groupSort = Objects.requireNonNull(groupSort); this.withinGroupSort = Objects.requireNonNull(withinGroupSort); this.maxDocsPerGroup = maxDocsPerGroup; - } private static class MaxScoreCollector extends SimpleCollector { @@ -98,8 +104,11 @@ public class TopGroupsCollector extends SecondPassGroupingCollector { private final TopDocsCollector topDocsCollector; private final MaxScoreCollector maxScoreCollector; private final boolean sortedByScore; - - public TopDocsAndMaxScoreCollector(boolean sortedByScore, TopDocsCollector topDocsCollector, MaxScoreCollector maxScoreCollector) { + + public TopDocsAndMaxScoreCollector( + boolean sortedByScore, + TopDocsCollector topDocsCollector, + MaxScoreCollector maxScoreCollector) { super(MultiCollector.wrap(topDocsCollector, maxScoreCollector)); this.sortedByScore = sortedByScore; this.topDocsCollector = topDocsCollector; @@ -112,17 +121,24 @@ public class TopGroupsCollector extends SecondPassGroupingCollector { private final Supplier supplier; private final boolean needsScores; - TopDocsReducer(Sort withinGroupSort, - int maxDocsPerGroup, boolean getMaxScores) { + TopDocsReducer(Sort withinGroupSort, int maxDocsPerGroup, boolean getMaxScores) { this.needsScores = getMaxScores || withinGroupSort.needsScores(); if (withinGroupSort == Sort.RELEVANCE) { - supplier = () -> new TopDocsAndMaxScoreCollector(true, TopScoreDocCollector.create(maxDocsPerGroup, Integer.MAX_VALUE), null); + supplier = + () -> + new TopDocsAndMaxScoreCollector( + true, TopScoreDocCollector.create(maxDocsPerGroup, Integer.MAX_VALUE), null); } else { - supplier = () -> { - TopFieldCollector topDocsCollector = TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, Integer.MAX_VALUE); // TODO: disable exact counts? - MaxScoreCollector maxScoreCollector = getMaxScores ? new MaxScoreCollector() : null; - return new TopDocsAndMaxScoreCollector(false, topDocsCollector, maxScoreCollector); - }; + supplier = + () -> { + TopFieldCollector topDocsCollector = + TopFieldCollector.create( + withinGroupSort, + maxDocsPerGroup, + Integer.MAX_VALUE); // TODO: disable exact counts? + MaxScoreCollector maxScoreCollector = getMaxScores ? new MaxScoreCollector() : null; + return new TopDocsAndMaxScoreCollector(false, topDocsCollector, maxScoreCollector); + }; } } @@ -139,25 +155,34 @@ public class TopGroupsCollector extends SecondPassGroupingCollector { /** * Get the TopGroups recorded by this collector + * * @param withinGroupOffset the offset within each group to start collecting documents */ public TopGroups getTopGroups(int withinGroupOffset) { - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) final GroupDocs[] groupDocsResult = (GroupDocs[]) new GroupDocs[groups.size()]; int groupIDX = 0; float maxScore = Float.MIN_VALUE; - for(SearchGroup group : groups) { - TopDocsAndMaxScoreCollector collector = (TopDocsAndMaxScoreCollector) groupReducer.getCollector(group.groupValue); + for (SearchGroup group : groups) { + TopDocsAndMaxScoreCollector collector = + (TopDocsAndMaxScoreCollector) groupReducer.getCollector(group.groupValue); final TopDocs topDocs; final float groupMaxScore; if (collector.sortedByScore) { TopDocs allTopDocs = collector.topDocsCollector.topDocs(); - groupMaxScore = allTopDocs.scoreDocs.length == 0 ? Float.NaN : allTopDocs.scoreDocs[0].score; + groupMaxScore = + allTopDocs.scoreDocs.length == 0 ? Float.NaN : allTopDocs.scoreDocs[0].score; if (allTopDocs.scoreDocs.length <= withinGroupOffset) { topDocs = new TopDocs(allTopDocs.totalHits, new ScoreDoc[0]); } else { - topDocs = new TopDocs(allTopDocs.totalHits, ArrayUtil.copyOfSubArray(allTopDocs.scoreDocs, withinGroupOffset, Math.min(allTopDocs.scoreDocs.length, withinGroupOffset + maxDocsPerGroup))); + topDocs = + new TopDocs( + allTopDocs.totalHits, + ArrayUtil.copyOfSubArray( + allTopDocs.scoreDocs, + withinGroupOffset, + Math.min(allTopDocs.scoreDocs.length, withinGroupOffset + maxDocsPerGroup))); } } else { topDocs = collector.topDocsCollector.topDocs(withinGroupOffset, maxDocsPerGroup); @@ -167,21 +192,24 @@ public class TopGroupsCollector extends SecondPassGroupingCollector { groupMaxScore = collector.maxScoreCollector.getMaxScore(); } } - - groupDocsResult[groupIDX++] = new GroupDocs<>(Float.NaN, - groupMaxScore, - topDocs.totalHits, - topDocs.scoreDocs, - group.groupValue, - group.sortValues); + + groupDocsResult[groupIDX++] = + new GroupDocs<>( + Float.NaN, + groupMaxScore, + topDocs.totalHits, + topDocs.scoreDocs, + group.groupValue, + group.sortValues); maxScore = Math.max(maxScore, groupMaxScore); } - return new TopGroups<>(groupSort.getSort(), + return new TopGroups<>( + groupSort.getSort(), withinGroupSort.getSort(), - totalHitCount, totalGroupedHitCount, groupDocsResult, + totalHitCount, + totalGroupedHitCount, + groupDocsResult, maxScore); } - - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/ValueSourceGroupSelector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/ValueSourceGroupSelector.java index 54373de8be2..63e95d8bccb 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/ValueSourceGroupSelector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/ValueSourceGroupSelector.java @@ -22,16 +22,13 @@ import java.util.Collection; import java.util.HashSet; import java.util.Map; import java.util.Set; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.Scorable; import org.apache.lucene.util.mutable.MutableValue; -/** - * A GroupSelector that groups via a ValueSource - */ +/** A GroupSelector that groups via a ValueSource */ public class ValueSourceGroupSelector extends GroupSelector { private final ValueSource valueSource; @@ -41,8 +38,9 @@ public class ValueSourceGroupSelector extends GroupSelector { /** * Create a new ValueSourceGroupSelector + * * @param valueSource the ValueSource to group by - * @param context a context map for the ValueSource + * @param context a context map for the ValueSource */ public ValueSourceGroupSelector(ValueSource valueSource, Map context) { this.valueSource = valueSource; @@ -58,14 +56,15 @@ public class ValueSourceGroupSelector extends GroupSelector { } @Override - public void setScorer(Scorable scorer) throws IOException { } + public void setScorer(Scorable scorer) throws IOException {} @Override public State advanceTo(int doc) throws IOException { this.filler.fillValue(doc); if (secondPassGroups != null) { - if (secondPassGroups.contains(filler.getValue()) == false) + if (secondPassGroups.contains(filler.getValue()) == false) { return State.SKIP; + } } return State.ACCEPT; } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java index 36d94a5a92e..5c1b1247e0a 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java @@ -15,143 +15,125 @@ * limitations under the License. */ -/** +/** * Grouping. - *

- * This module enables search result grouping with Lucene, where hits - * with the same value in the specified single-valued group field are - * grouped together. For example, if you group by the author - * field, then all documents with the same value in the author - * field fall into a single group. - *

- * - *

Grouping requires a number of inputs:

- * + * + *

This module enables search result grouping with Lucene, where hits with the same value in the + * specified single-valued group field are grouped together. For example, if you group by the + * author field, then all documents with the same value in the author field fall + * into a single group. + * + *

Grouping requires a number of inputs: + * *

    - *
  • groupSelector: this defines how groups are created - * from values per-document. The grouping module ships with - * selectors for grouping by term, and by long and double ranges. - * - *
  • groupSort: how the groups are sorted. For sorting - * purposes, each group is "represented" by the highest-sorted - * document according to the groupSort within it. For - * example, if you specify "price" (ascending) then the first group - * is the one with the lowest price book within it. Or if you - * specify relevance group sort, then the first group is the one - * containing the highest scoring book. - * - *
  • topNGroups: how many top groups to keep. For - * example, 10 means the top 10 groups are computed. - * - *
  • groupOffset: which "slice" of top groups you want to - * retrieve. For example, 3 means you'll get 7 groups back - * (assuming topNGroups is 10). This is useful for + *
  • groupSelector: this defines how groups are created from values per-document. + * The grouping module ships with selectors for grouping by term, and by long and double + * ranges. + *
  • groupSort: how the groups are sorted. For sorting purposes, each group is + * "represented" by the highest-sorted document according to the groupSort within + * it. For example, if you specify "price" (ascending) then the first group is the one with + * the lowest price book within it. Or if you specify relevance group sort, then the first + * group is the one containing the highest scoring book. + *
  • topNGroups: how many top groups to keep. For example, 10 means the top 10 + * groups are computed. + *
  • groupOffset: which "slice" of top groups you want to retrieve. For example, 3 + * means you'll get 7 groups back (assuming topNGroups is 10). This is useful for * paging, where you might show 5 groups per page. - * - *
  • withinGroupSort: how the documents within each group - * are sorted. This can be different from the group sort. - * - *
  • maxDocsPerGroup: how many top documents within each - * group to keep. - * - *
  • withinGroupOffset: which "slice" of top - * documents you want to retrieve from each group. - * + *
  • withinGroupSort: how the documents within each group are sorted. This can be + * different from the group sort. + *
  • maxDocsPerGroup: how many top documents within each group to keep. + *
  • withinGroupOffset: which "slice" of top documents you want to retrieve from + * each group. *
- * + * *

The implementation is two-pass: the first pass ({@link - * org.apache.lucene.search.grouping.FirstPassGroupingCollector}) - * gathers the top groups, and the second pass ({@link - * org.apache.lucene.search.grouping.SecondPassGroupingCollector}) - * gathers documents within those groups. If the search is costly to - * run you may want to use the {@link - * org.apache.lucene.search.CachingCollector} class, which - * caches hits and can (quickly) replay them for the second pass. This - * way you only run the query once, but you pay a RAM cost to (briefly) - * hold all hits. Results are returned as a {@link - * org.apache.lucene.search.grouping.TopGroups} instance.

- * - *

Groups are defined by {@link org.apache.lucene.search.grouping.GroupSelector} - * implementations:

- *
    - *
  • {@link org.apache.lucene.search.grouping.TermGroupSelector} groups based on - * the value of a {@link org.apache.lucene.index.SortedDocValues} field
  • - *
  • {@link org.apache.lucene.search.grouping.ValueSourceGroupSelector} groups based on - * the value of a {@link org.apache.lucene.queries.function.ValueSource}
  • - *
  • {@link org.apache.lucene.search.grouping.DoubleRangeGroupSelector} groups based on - * the value of a {@link org.apache.lucene.search.DoubleValuesSource}
  • - *
  • {@link org.apache.lucene.search.grouping.LongRangeGroupSelector} groups based on - * the value of a {@link org.apache.lucene.search.LongValuesSource}
  • - *
- * - *

Known limitations:

+ * org.apache.lucene.search.grouping.FirstPassGroupingCollector}) gathers the top groups, and the + * second pass ({@link org.apache.lucene.search.grouping.SecondPassGroupingCollector}) gathers + * documents within those groups. If the search is costly to run you may want to use the {@link + * org.apache.lucene.search.CachingCollector} class, which caches hits and can (quickly) replay them + * for the second pass. This way you only run the query once, but you pay a RAM cost to (briefly) + * hold all hits. Results are returned as a {@link org.apache.lucene.search.grouping.TopGroups} + * instance. + * + *

Groups are defined by {@link org.apache.lucene.search.grouping.GroupSelector} implementations: + * *

    - *
  • Sharding is not directly supported, though is not too - * difficult, if you can merge the top groups and top documents per - * group yourself. + *
  • {@link org.apache.lucene.search.grouping.TermGroupSelector} groups based on the value of a + * {@link org.apache.lucene.index.SortedDocValues} field + *
  • {@link org.apache.lucene.search.grouping.ValueSourceGroupSelector} groups based on the + * value of a {@link org.apache.lucene.queries.function.ValueSource} + *
  • {@link org.apache.lucene.search.grouping.DoubleRangeGroupSelector} groups based on the + * value of a {@link org.apache.lucene.search.DoubleValuesSource} + *
  • {@link org.apache.lucene.search.grouping.LongRangeGroupSelector} groups based on the value + * of a {@link org.apache.lucene.search.LongValuesSource} *
- * - *

Typical usage for the generic two-pass grouping search looks like this using the grouping convenience utility - * (optionally using caching for the second pass search):

- * + * + *

Known limitations: + * + *

    + *
  • Sharding is not directly supported, though is not too difficult, if you can merge the top + * groups and top documents per group yourself. + *
+ * + *

Typical usage for the generic two-pass grouping search looks like this using the grouping + * convenience utility (optionally using caching for the second pass search): + * *

  *   GroupingSearch groupingSearch = new GroupingSearch("author");
  *   groupingSearch.setGroupSort(groupSort);
  *   groupingSearch.setFillSortFields(fillFields);
- * 
+ *
  *   if (useCache) {
  *     // Sets cache in MB
  *     groupingSearch.setCachingInMB(4.0, true);
  *   }
- * 
+ *
  *   if (requiredTotalGroupCount) {
  *     groupingSearch.setAllGroups(true);
  *   }
- * 
+ *
  *   TermQuery query = new TermQuery(new Term("content", searchTerm));
  *   TopGroups<BytesRef> result = groupingSearch.search(indexSearcher, query, groupOffset, groupLimit);
- * 
+ *
  *   // Render groupsResult...
  *   if (requiredTotalGroupCount) {
  *     int totalGroupCount = result.totalGroupCount;
  *   }
  * 
- * - *

To use the single-pass BlockGroupingCollector, - * first, at indexing time, you must ensure all docs in each group - * are added as a block, and you have some way to find the last - * document of each group. One simple way to do this is to add a - * marker binary field:

- * + * + *

To use the single-pass BlockGroupingCollector, first, at indexing time, you must + * ensure all docs in each group are added as a block, and you have some way to find the last + * document of each group. One simple way to do this is to add a marker binary field: + * *

  *   // Create Documents from your source:
  *   List<Document> oneGroup = ...;
- *   
+ *
  *   Field groupEndField = new Field("groupEnd", "x", Field.Store.NO, Field.Index.NOT_ANALYZED);
  *   groupEndField.setIndexOptions(IndexOptions.DOCS_ONLY);
  *   groupEndField.setOmitNorms(true);
  *   oneGroup.get(oneGroup.size()-1).add(groupEndField);
- * 
+ *
  *   // You can also use writer.updateDocuments(); just be sure you
  *   // replace an entire previous doc block with this new one.  For
  *   // example, each group could have a "groupID" field, with the same
  *   // value for all docs in this group:
  *   writer.addDocuments(oneGroup);
  * 
- * + * * Then, at search time: - * + * *
  *   Query groupEndDocs = new TermQuery(new Term("groupEnd", "x"));
  *   BlockGroupingCollector c = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, groupEndDocs);
  *   s.search(new TermQuery(new Term("content", searchTerm)), c);
  *   TopGroups groupsResult = c.getTopGroups(withinGroupSort, groupOffset, docOffset, docOffset+docsPerGroup, fillFields);
- * 
+ *
  *   // Render groupsResult...
  * 
- * + * * Or alternatively use the GroupingSearch convenience utility: - * + * *
  *   // Per search:
  *   GroupingSearch groupingSearch = new GroupingSearch(groupEndDocs);
@@ -162,18 +144,18 @@
  *
  *   // Render groupsResult...
  * 
- * - * Note that the groupValue of each GroupDocs - * will be null, so if you need to present this value you'll - * have to separately retrieve it (for example using stored - * fields, FieldCache, etc.). - * - *

Another collector is the AllGroupHeadsCollector that can be used to retrieve all most relevant - * documents per group. Also known as group heads. This can be useful in situations when one wants to compute group - * based facets / statistics on the complete query result. The collector can be executed during the first or second - * phase. This collector can also be used with the GroupingSearch convenience utility, but when if one only - * wants to compute the most relevant documents per group it is better to just use the collector as done here below.

- * + * + * Note that the groupValue of each GroupDocs will be null, + * so if you need to present this value you'll have to separately retrieve it (for example using + * stored fields, FieldCache, etc.). + * + *

Another collector is the AllGroupHeadsCollector that can be used to retrieve all + * most relevant documents per group. Also known as group heads. This can be useful in situations + * when one wants to compute group based facets / statistics on the complete query result. The + * collector can be executed during the first or second phase. This collector can also be used with + * the GroupingSearch convenience utility, but when if one only wants to compute the + * most relevant documents per group it is better to just use the collector as done here below. + * *

  *   TermGroupSelector grouper = new TermGroupSelector(groupField);
  *   AllGroupHeadsCollector c = AllGroupHeadsCollector.newCollector(grouper, sortWithinGroup);
@@ -184,6 +166,5 @@
  *   int maxDoc = s.maxDoc();
  *   FixedBitSet groupHeadsBitSet = c.retrieveGroupHeads(maxDoc)
  * 
- * */ package org.apache.lucene.search.grouping; diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java index e25666734db..0156acb395a 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.grouping; import java.io.Closeable; import java.io.IOException; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; @@ -28,10 +27,9 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -/** - * Base class for grouping related tests. - */ -// TODO (MvG) : The grouping tests contain a lot of code duplication. Try to move the common code to this class.. +/** Base class for grouping related tests. */ +// TODO (MvG) : The grouping tests contain a lot of code duplication. Try to move the common code to +// this class.. public abstract class AbstractGroupingTestCase extends LuceneTestCase { protected String generateRandomNonEmptyString() { @@ -41,7 +39,7 @@ public abstract class AbstractGroupingTestCase extends LuceneTestCase { // For that reason we don't generate empty string // groups. randomValue = TestUtil.randomRealisticUnicodeString(random()); - //randomValue = _TestUtil.randomSimpleString(random()); + // randomValue = _TestUtil.randomSimpleString(random()); } while ("".equals(randomValue)); return randomValue; } @@ -62,8 +60,11 @@ public abstract class AbstractGroupingTestCase extends LuceneTestCase { Shard() throws IOException { this.directory = newDirectory(); - this.writer = new RandomIndexWriter(random(), directory, - newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); + this.writer = + new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); } IndexSearcher getIndexSearcher() throws IOException { diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/BaseGroupSelectorTestCase.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/BaseGroupSelectorTestCase.java index bb2a946f2a4..d3e84aff69f 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/BaseGroupSelectorTestCase.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/BaseGroupSelectorTestCase.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; @@ -54,7 +53,7 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC Shard shard = new Shard(); indexRandomDocs(shard.writer); - String[] query = new String[]{ "foo", "bar", "baz" }; + String[] query = new String[] {"foo", "bar", "baz"}; Query topLevel = new TermQuery(new Term("text", query[random().nextInt(query.length)])); IndexSearcher searcher = shard.getIndexSearcher(); @@ -65,10 +64,11 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC for (int i = 0; i < topGroups.groups.length; i++) { // Each group should have a result set equal to that returned by the top-level query, // filtered by the group value. - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(filterQuery(topGroups.groups[i].groupValue), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(filterQuery(topGroups.groups[i].groupValue), BooleanClause.Occur.FILTER) + .build(); TopDocs td = searcher.search(filtered, 10); assertScoreDocsEquals(topGroups.groups[i].scoreDocs, td.scoreDocs); if (i == 0) { @@ -86,12 +86,15 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC indexRandomDocs(shard.writer); IndexSearcher searcher = shard.getIndexSearcher(); - String[] query = new String[]{ "foo", "bar", "baz" }; + String[] query = new String[] {"foo", "bar", "baz"}; Query topLevel = new TermQuery(new Term("text", query[random().nextInt(query.length)])); GroupingSearch grouper = new GroupingSearch(getGroupSelector()); grouper.setGroupDocsLimit(10); - Sort sort = new Sort(new SortField("sort1", SortField.Type.STRING), new SortField("sort2", SortField.Type.LONG)); + Sort sort = + new Sort( + new SortField("sort1", SortField.Type.STRING), + new SortField("sort2", SortField.Type.LONG)); grouper.setGroupSort(sort); TopGroups topGroups = grouper.search(searcher, topLevel, 0, 5); TopDocs topDoc = searcher.search(topLevel, 1, sort); @@ -99,10 +102,11 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC // We're sorting the groups by a defined Sort, but each group itself should be ordered // by doc relevance, and should be equal to the results of a top-level query filtered // by the group value - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(filterQuery(topGroups.groups[i].groupValue), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(filterQuery(topGroups.groups[i].groupValue), BooleanClause.Occur.FILTER) + .build(); TopDocs td = searcher.search(filtered, 10); assertScoreDocsEquals(topGroups.groups[i].scoreDocs, td.scoreDocs); // The top group should have sort values equal to the sort values of the top doc of @@ -111,7 +115,8 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC if (i > 0) { assertSortsBefore(topGroups.groups[i - 1], topGroups.groups[i]); } else { - assertArrayEquals(((FieldDoc)topDoc.scoreDocs[0]).fields, topGroups.groups[0].groupSortValues); + assertArrayEquals( + ((FieldDoc) topDoc.scoreDocs[0]).fields, topGroups.groups[0].groupSortValues); } } @@ -124,12 +129,15 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC indexRandomDocs(shard.writer); IndexSearcher searcher = shard.getIndexSearcher(); - String[] query = new String[]{ "foo", "bar", "baz" }; + String[] query = new String[] {"foo", "bar", "baz"}; Query topLevel = new TermQuery(new Term("text", query[random().nextInt(query.length)])); GroupingSearch grouper = new GroupingSearch(getGroupSelector()); grouper.setGroupDocsLimit(10); - Sort sort = new Sort(new SortField("sort1", SortField.Type.STRING), new SortField("sort2", SortField.Type.LONG)); + Sort sort = + new Sort( + new SortField("sort1", SortField.Type.STRING), + new SortField("sort2", SortField.Type.LONG)); grouper.setSortWithinGroup(sort); TopGroups topGroups = grouper.search(searcher, topLevel, 0, 5); @@ -146,16 +154,16 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC } // Groups themselves are ordered by a defined Sort, and each should give the same result as // the top-level query, filtered by the group value, with the same Sort - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(filterQuery(topGroups.groups[i].groupValue), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(filterQuery(topGroups.groups[i].groupValue), BooleanClause.Occur.FILTER) + .build(); TopDocs td = searcher.search(filtered, 10, sort); assertScoreDocsEquals(td.scoreDocs, topGroups.groups[i].scoreDocs); } shard.close(); - } public void testGroupHeads() throws IOException { @@ -164,7 +172,7 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC indexRandomDocs(shard.writer); IndexSearcher searcher = shard.getIndexSearcher(); - String[] query = new String[]{ "foo", "bar", "baz" }; + String[] query = new String[] {"foo", "bar", "baz"}; Query topLevel = new TermQuery(new Term("text", query[random().nextInt(query.length)])); GroupSelector groupSelector = getGroupSelector(); @@ -180,10 +188,11 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC int totalHits = searcher.count(topLevel); int groupHits = 0; for (T groupValue : matchingGroups) { - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(filterQuery(groupValue), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(filterQuery(groupValue), BooleanClause.Occur.FILTER) + .build(); groupHits += searcher.count(filtered); } assertEquals(totalHits, groupHits); @@ -195,15 +204,17 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC cardinality++; } } - assertEquals(matchingGroups.size(), cardinality); // We should have one set bit per matching group + assertEquals( + matchingGroups.size(), cardinality); // We should have one set bit per matching group // Each group head should correspond to the topdoc of a search filtered by // that group for (T groupValue : matchingGroups) { - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(filterQuery(groupValue), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(filterQuery(groupValue), BooleanClause.Occur.FILTER) + .build(); TopDocs td = searcher.search(filtered, 1); assertTrue(groupHeads.get(td.scoreDocs[0].doc)); } @@ -217,10 +228,13 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC indexRandomDocs(shard.writer); IndexSearcher searcher = shard.getIndexSearcher(); - String[] query = new String[]{ "foo", "bar", "baz" }; + String[] query = new String[] {"foo", "bar", "baz"}; Query topLevel = new TermQuery(new Term("text", query[random().nextInt(query.length)])); - Sort sort = new Sort(new SortField("sort1", SortField.Type.STRING), new SortField("sort2", SortField.Type.LONG)); + Sort sort = + new Sort( + new SortField("sort1", SortField.Type.STRING), + new SortField("sort2", SortField.Type.LONG)); GroupSelector groupSelector = getGroupSelector(); GroupingSearch grouping = new GroupingSearch(groupSelector); grouping.setAllGroups(true); @@ -237,15 +251,17 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC cardinality++; } } - assertEquals(matchingGroups.size(), cardinality); // We should have one set bit per matching group + assertEquals( + matchingGroups.size(), cardinality); // We should have one set bit per matching group // Each group head should correspond to the topdoc of a search filtered by // that group using the same within-group sort for (T groupValue : matchingGroups) { - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(filterQuery(groupValue), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(filterQuery(groupValue), BooleanClause.Occur.FILTER) + .build(); TopDocs td = searcher.search(filtered, 1, sort); assertTrue(groupHeads.get(td.scoreDocs[0].doc)); } @@ -263,7 +279,7 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC shards[i] = new Shard(); } - String[] texts = new String[]{ "foo", "bar", "bar baz", "foo foo bar" }; + String[] texts = new String[] {"foo", "bar", "bar baz", "foo foo bar"}; // Create a bunch of random documents, and index them - once into the control index, // and once into a randomly picked shard. @@ -282,29 +298,35 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC shards[shard].writer.addDocument(doc); } - String[] query = new String[]{ "foo", "bar", "baz" }; + String[] query = new String[] {"foo", "bar", "baz"}; Query topLevel = new TermQuery(new Term("text", query[random().nextInt(query.length)])); - Sort sort = new Sort(new SortField("sort1", SortField.Type.STRING), new SortField("sort2", SortField.Type.LONG)); + Sort sort = + new Sort( + new SortField("sort1", SortField.Type.STRING), + new SortField("sort2", SortField.Type.LONG)); // A grouped query run in two phases against the control should give us the same // result as the query run against shards and merged back together after each phase. - FirstPassGroupingCollector singletonFirstPass = new FirstPassGroupingCollector<>(getGroupSelector(), sort, 5); + FirstPassGroupingCollector singletonFirstPass = + new FirstPassGroupingCollector<>(getGroupSelector(), sort, 5); control.getIndexSearcher().search(topLevel, singletonFirstPass); Collection> singletonGroups = singletonFirstPass.getTopGroups(0); List>> shardGroups = new ArrayList<>(); for (Shard shard : shards) { - FirstPassGroupingCollector fc = new FirstPassGroupingCollector<>(getGroupSelector(), sort, 5); + FirstPassGroupingCollector fc = + new FirstPassGroupingCollector<>(getGroupSelector(), sort, 5); shard.getIndexSearcher().search(topLevel, fc); shardGroups.add(fc.getTopGroups(0)); } Collection> mergedGroups = SearchGroup.merge(shardGroups, 0, 5, sort); assertEquals(singletonGroups, mergedGroups); - TopGroupsCollector singletonSecondPass = new TopGroupsCollector<>(getGroupSelector(), singletonGroups, sort, - Sort.RELEVANCE, 5, true); + TopGroupsCollector singletonSecondPass = + new TopGroupsCollector<>( + getGroupSelector(), singletonGroups, sort, Sort.RELEVANCE, 5, true); control.getIndexSearcher().search(topLevel, singletonSecondPass); TopGroups singletonTopGroups = singletonSecondPass.getTopGroups(0); @@ -313,12 +335,14 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC TopGroups[] shardTopGroups = (TopGroups[]) new TopGroups[shards.length]; int j = 0; for (Shard shard : shards) { - TopGroupsCollector sc = new TopGroupsCollector<>(getGroupSelector(), mergedGroups, sort, Sort.RELEVANCE, 5, true); + TopGroupsCollector sc = + new TopGroupsCollector<>(getGroupSelector(), mergedGroups, sort, Sort.RELEVANCE, 5, true); shard.getIndexSearcher().search(topLevel, sc); shardTopGroups[j] = sc.getTopGroups(0); j++; } - TopGroups mergedTopGroups = TopGroups.merge(shardTopGroups, sort, Sort.RELEVANCE, 0, 5, TopGroups.ScoreMergeMode.None); + TopGroups mergedTopGroups = + TopGroups.merge(shardTopGroups, sort, Sort.RELEVANCE, 0, 5, TopGroups.ScoreMergeMode.None); assertNotNull(mergedTopGroups); assertEquals(singletonTopGroups.totalGroupedHitCount, mergedTopGroups.totalGroupedHitCount); @@ -327,18 +351,19 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC assertEquals(singletonTopGroups.groups.length, mergedTopGroups.groups.length); for (int i = 0; i < singletonTopGroups.groups.length; i++) { assertEquals(singletonTopGroups.groups[i].groupValue, mergedTopGroups.groups[i].groupValue); - assertEquals(singletonTopGroups.groups[i].scoreDocs.length, mergedTopGroups.groups[i].scoreDocs.length); + assertEquals( + singletonTopGroups.groups[i].scoreDocs.length, + mergedTopGroups.groups[i].scoreDocs.length); } control.close(); for (Shard shard : shards) { shard.close(); } - } private void indexRandomDocs(RandomIndexWriter w) throws IOException { - String[] texts = new String[]{ "foo", "bar", "bar baz", "foo foo bar" }; + String[] texts = new String[] {"foo", "bar", "bar baz", "foo foo bar"}; int numDocs = atLeast(200); for (int i = 0; i < numDocs; i++) { @@ -356,10 +381,9 @@ public abstract class BaseGroupSelectorTestCase extends AbstractGroupingTestC private void assertSortsBefore(GroupDocs first, GroupDocs second) { Object[] groupSortValues = second.groupSortValues; Object[] prevSortValues = first.groupSortValues; - assertTrue(((BytesRef)prevSortValues[0]).compareTo((BytesRef)groupSortValues[0]) <= 0); + assertTrue(((BytesRef) prevSortValues[0]).compareTo((BytesRef) groupSortValues[0]) <= 0); if (prevSortValues[0].equals(groupSortValues[0])) { - assertTrue((long)prevSortValues[1] <= (long)groupSortValues[1]); + assertTrue((long) prevSortValues[1] <= (long) groupSortValues[1]); } } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupHeadsCollector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupHeadsCollector.java index fef695ff9aa..9b7baae65a2 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupHeadsCollector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupHeadsCollector.java @@ -27,7 +27,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; @@ -61,10 +60,11 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { public void testBasic() throws Exception { final String groupField = "author"; Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random(), - dir, - newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter w = + new RandomIndexWriter( + random(), + dir, + newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); DocValuesType valueType = DocValuesType.SORTED; // 0 @@ -137,34 +137,45 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { int maxDoc = reader.maxDoc(); Sort sortWithinGroup = new Sort(new SortField("id_1", SortField.Type.INT, true)); - AllGroupHeadsCollector allGroupHeadsCollector = createRandomCollector(groupField, sortWithinGroup); + AllGroupHeadsCollector allGroupHeadsCollector = + createRandomCollector(groupField, sortWithinGroup); indexSearcher.search(new TermQuery(new Term("content", "random")), allGroupHeadsCollector); - assertTrue(arrayContains(new int[]{2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads())); - assertTrue(openBitSetContains(new int[]{2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); + assertTrue(arrayContains(new int[] {2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads())); + assertTrue( + openBitSetContains( + new int[] {2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); allGroupHeadsCollector = createRandomCollector(groupField, sortWithinGroup); indexSearcher.search(new TermQuery(new Term("content", "some")), allGroupHeadsCollector); - assertTrue(arrayContains(new int[]{2, 3, 4}, allGroupHeadsCollector.retrieveGroupHeads())); - assertTrue(openBitSetContains(new int[]{2, 3, 4}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); + assertTrue(arrayContains(new int[] {2, 3, 4}, allGroupHeadsCollector.retrieveGroupHeads())); + assertTrue( + openBitSetContains( + new int[] {2, 3, 4}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); allGroupHeadsCollector = createRandomCollector(groupField, sortWithinGroup); indexSearcher.search(new TermQuery(new Term("content", "blob")), allGroupHeadsCollector); - assertTrue(arrayContains(new int[]{1, 5}, allGroupHeadsCollector.retrieveGroupHeads())); - assertTrue(openBitSetContains(new int[]{1, 5}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); + assertTrue(arrayContains(new int[] {1, 5}, allGroupHeadsCollector.retrieveGroupHeads())); + assertTrue( + openBitSetContains( + new int[] {1, 5}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); // STRING sort type triggers different implementation Sort sortWithinGroup2 = new Sort(new SortField("id_2", SortField.Type.STRING, true)); allGroupHeadsCollector = createRandomCollector(groupField, sortWithinGroup2); indexSearcher.search(new TermQuery(new Term("content", "random")), allGroupHeadsCollector); - assertTrue(arrayContains(new int[]{2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads())); - assertTrue(openBitSetContains(new int[]{2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); + assertTrue(arrayContains(new int[] {2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads())); + assertTrue( + openBitSetContains( + new int[] {2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); Sort sortWithinGroup3 = new Sort(new SortField("id_2", SortField.Type.STRING, false)); allGroupHeadsCollector = createRandomCollector(groupField, sortWithinGroup3); indexSearcher.search(new TermQuery(new Term("content", "random")), allGroupHeadsCollector); // 7 b/c higher doc id wins, even if order of field is in not in reverse. - assertTrue(arrayContains(new int[]{0, 3, 4, 6}, allGroupHeadsCollector.retrieveGroupHeads())); - assertTrue(openBitSetContains(new int[]{0, 3, 4, 6}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); + assertTrue(arrayContains(new int[] {0, 3, 4, 6}, allGroupHeadsCollector.retrieveGroupHeads())); + assertTrue( + openBitSetContains( + new int[] {0, 3, 4, 6}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc)); indexSearcher.getIndexReader().close(); dir.close(); @@ -174,7 +185,8 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { int numberOfRuns = atLeast(1); for (int iter = 0; iter < numberOfRuns; iter++) { if (VERBOSE) { - System.out.println(String.format(Locale.ROOT, "TEST: iter=%d total=%d", iter, numberOfRuns)); + System.out.println( + String.format(Locale.ROOT, "TEST: iter=%d total=%d", iter, numberOfRuns)); } final int numDocs = TestUtil.nextInt(random(), 100, 1000) * RANDOM_MULTIPLIER; @@ -188,10 +200,11 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { for (int i = 0; i < numGroups; i++) { String randomValue; do { - // B/c of DV based impl we can't see the difference between an empty string and a null value. + // B/c of DV based impl we can't see the difference between an empty string and a null + // value. // For that reason we don't generate empty string groups. randomValue = TestUtil.randomRealisticUnicodeString(random()); - //randomValue = TestUtil.randomSimpleString(random()); + // randomValue = TestUtil.randomSimpleString(random()); } while ("".equals(randomValue)); groups.add(new BytesRef(randomValue)); } @@ -213,10 +226,8 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { } Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random(), - dir, - newIndexWriterConfig(new MockAnalyzer(random()))); + RandomIndexWriter w = + new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random()))); DocValuesType valueType = DocValuesType.SORTED; Document doc = new Document(); @@ -250,17 +261,29 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { groupValue = groups.get(random().nextInt(groups.size())); } - final GroupDoc groupDoc = new GroupDoc( - i, - groupValue, - groups.get(random().nextInt(groups.size())), - groups.get(random().nextInt(groups.size())), - new BytesRef(String.format(Locale.ROOT, "%05d", i)), - contentStrings[random().nextInt(contentStrings.length)] - ); + final GroupDoc groupDoc = + new GroupDoc( + i, + groupValue, + groups.get(random().nextInt(groups.size())), + groups.get(random().nextInt(groups.size())), + new BytesRef(String.format(Locale.ROOT, "%05d", i)), + contentStrings[random().nextInt(contentStrings.length)]); if (VERBOSE) { - System.out.println(" doc content=" + groupDoc.content + " id=" + i + " group=" + (groupDoc.group == null ? "null" : groupDoc.group.utf8ToString()) + " sort1=" + groupDoc.sort1.utf8ToString() + " sort2=" + groupDoc.sort2.utf8ToString() + " sort3=" + groupDoc.sort3.utf8ToString()); + System.out.println( + " doc content=" + + groupDoc.content + + " id=" + + i + + " group=" + + (groupDoc.group == null ? "null" : groupDoc.group.utf8ToString()) + + " sort1=" + + groupDoc.sort1.utf8ToString() + + " sort2=" + + groupDoc.sort2.utf8ToString() + + " sort3=" + + groupDoc.sort3.utf8ToString()); } groupDocs[i] = groupDoc; @@ -296,7 +319,8 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { Set seenIDs = new HashSet<>(); for (int contentID = 0; contentID < 3; contentID++) { - final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real" + contentID)), numDocs).scoreDocs; + final ScoreDoc[] hits = + s.search(new TermQuery(new Term("content", "real" + contentID)), numDocs).scoreDocs; for (ScoreDoc hit : hits) { int idValue = docIDToFieldId[hit.doc]; final GroupDoc gd = groupDocs[idValue]; @@ -315,19 +339,22 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { assertTrue(Float.isFinite(gd.score)); assertTrue(gd.score >= 0.0); } - + for (int searchIter = 0; searchIter < 100; searchIter++) { - + if (VERBOSE) { System.out.println("TEST: searchIter=" + searchIter); } - + final String searchTerm = "real" + random().nextInt(3); boolean sortByScoreOnly = random().nextBoolean(); Sort sortWithinGroup = getRandomSort(sortByScoreOnly); - AllGroupHeadsCollector allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup); + AllGroupHeadsCollector allGroupHeadsCollector = + createRandomCollector("group", sortWithinGroup); s.search(new TermQuery(new Term("content", searchTerm)), allGroupHeadsCollector); - int[] expectedGroupHeads = createExpectedGroupHeads(searchTerm, groupDocs, sortWithinGroup, sortByScoreOnly, fieldIdToDocID); + int[] expectedGroupHeads = + createExpectedGroupHeads( + searchTerm, groupDocs, sortWithinGroup, sortByScoreOnly, fieldIdToDocID); int[] actualGroupHeads = allGroupHeadsCollector.retrieveGroupHeads(); // The actual group heads contains Lucene ids. Need to change them into our id value. for (int i = 0; i < actualGroupHeads.length; i++) { @@ -336,7 +363,7 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { // Allows us the easily iterate and assert the actual and expected results. Arrays.sort(expectedGroupHeads); Arrays.sort(actualGroupHeads); - + if (VERBOSE) { System.out.println("Collector: " + allGroupHeadsCollector.getClass().getSimpleName()); System.out.println("Sort within group: " + sortWithinGroup); @@ -345,41 +372,50 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { System.out.println("\n=== Expected: \n"); for (int expectedDocId : expectedGroupHeads) { GroupDoc expectedGroupDoc = groupDocs[expectedDocId]; - String expectedGroup = expectedGroupDoc.group == null ? null : expectedGroupDoc.group.utf8ToString(); + String expectedGroup = + expectedGroupDoc.group == null ? null : expectedGroupDoc.group.utf8ToString(); System.out.println( - String.format(Locale.ROOT, + String.format( + Locale.ROOT, "Group:%10s score%5f Sort1:%10s Sort2:%10s Sort3:%10s doc:%5d", - expectedGroup, expectedGroupDoc.score, expectedGroupDoc.sort1.utf8ToString(), - expectedGroupDoc.sort2.utf8ToString(), expectedGroupDoc.sort3.utf8ToString(), expectedDocId - ) - ); + expectedGroup, + expectedGroupDoc.score, + expectedGroupDoc.sort1.utf8ToString(), + expectedGroupDoc.sort2.utf8ToString(), + expectedGroupDoc.sort3.utf8ToString(), + expectedDocId)); } System.out.println("\n=== Actual: \n"); for (int actualDocId : actualGroupHeads) { GroupDoc actualGroupDoc = groupDocs[actualDocId]; - String actualGroup = actualGroupDoc.group == null ? null : actualGroupDoc.group.utf8ToString(); + String actualGroup = + actualGroupDoc.group == null ? null : actualGroupDoc.group.utf8ToString(); System.out.println( - String.format(Locale.ROOT, + String.format( + Locale.ROOT, "Group:%10s score%5f Sort1:%10s Sort2:%10s Sort3:%10s doc:%5d", - actualGroup, actualGroupDoc.score, actualGroupDoc.sort1.utf8ToString(), - actualGroupDoc.sort2.utf8ToString(), actualGroupDoc.sort3.utf8ToString(), actualDocId - ) - ); + actualGroup, + actualGroupDoc.score, + actualGroupDoc.sort1.utf8ToString(), + actualGroupDoc.sort2.utf8ToString(), + actualGroupDoc.sort3.utf8ToString(), + actualDocId)); } - System.out.println("\n==================================================================================="); + System.out.println( + "\n==================================================================================="); } - + assertArrayEquals(expectedGroupHeads, actualGroupHeads); } - + r.close(); dir.close(); } } - private boolean arrayContains(int[] expected, int[] actual) { - Arrays.sort(actual); // in some cases the actual docs aren't sorted by docid. This method expects that. + // in some cases the actual docs aren't sorted by docid. This method expects that. + Arrays.sort(actual); if (expected.length != actual.length) { return false; } @@ -401,9 +437,10 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { return true; } - private boolean openBitSetContains(int[] expectedDocs, Bits actual, int maxDoc) throws IOException { + private boolean openBitSetContains(int[] expectedDocs, Bits actual, int maxDoc) + throws IOException { assert actual instanceof FixedBitSet; - if (expectedDocs.length != ((FixedBitSet)actual).cardinality()) { + if (expectedDocs.length != ((FixedBitSet) actual).cardinality()) { return false; } @@ -412,7 +449,12 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { expected.set(expectedDoc); } - for (int docId = expected.nextSetBit(0); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docId + 1 >= expected.length() ? DocIdSetIterator.NO_MORE_DOCS : expected.nextSetBit(docId + 1)) { + for (int docId = expected.nextSetBit(0); + docId != DocIdSetIterator.NO_MORE_DOCS; + docId = + docId + 1 >= expected.length() + ? DocIdSetIterator.NO_MORE_DOCS + : expected.nextSetBit(docId + 1)) { if (!actual.get(docId)) { return false; } @@ -421,7 +463,12 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { return true; } - private int[] createExpectedGroupHeads(String searchTerm, GroupDoc[] groupDocs, Sort docSort, boolean sortByScoreOnly, int[] fieldIdToDocID) { + private int[] createExpectedGroupHeads( + String searchTerm, + GroupDoc[] groupDocs, + Sort docSort, + boolean sortByScoreOnly, + int[] fieldIdToDocID) { Map> groupHeads = new HashMap<>(); for (GroupDoc groupDoc : groupDocs) { if (!groupDoc.content.startsWith(searchTerm)) { @@ -473,7 +520,8 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { return new Sort(sortFields.toArray(new SortField[sortFields.size()])); } - private Comparator getComparator(Sort sort, final boolean sortByScoreOnly, final int[] fieldIdToDocID) { + private Comparator getComparator( + Sort sort, final boolean sortByScoreOnly, final int[] fieldIdToDocID) { final SortField[] sortFields = sort.getSort(); return new Comparator() { @Override @@ -509,19 +557,22 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { }; } - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) private AllGroupHeadsCollector createRandomCollector(String groupField, Sort sortWithinGroup) { if (random().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); - return AllGroupHeadsCollector.newCollector(new ValueSourceGroupSelector(vs, new HashMap<>()), sortWithinGroup); + return AllGroupHeadsCollector.newCollector( + new ValueSourceGroupSelector(vs, new HashMap<>()), sortWithinGroup); } else { - return AllGroupHeadsCollector.newCollector(new TermGroupSelector(groupField), sortWithinGroup); + return AllGroupHeadsCollector.newCollector( + new TermGroupSelector(groupField), sortWithinGroup); } } - private void addGroupField(Document doc, String groupField, String value, DocValuesType valueType) { + private void addGroupField( + Document doc, String groupField, String value, DocValuesType valueType) { Field valuesField = null; - switch(valueType) { + switch (valueType) { case BINARY: valuesField = new BinaryDocValuesField(groupField, new BytesRef(value)); break; @@ -544,7 +595,8 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { final String content; float score; - public GroupDoc(int id, BytesRef group, BytesRef sort1, BytesRef sort2, BytesRef sort3, String content) { + public GroupDoc( + int id, BytesRef group, BytesRef sort1, BytesRef sort2, BytesRef sort3, String content) { this.id = id; this.group = group; this.sort1 = sort1; @@ -552,7 +604,5 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase { this.sort3 = sort3; this.content = content; } - } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupsCollector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupsCollector.java index ccdf3c347b6..73e7dc059fc 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupsCollector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupsCollector.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.grouping; import java.util.HashMap; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -43,10 +42,11 @@ public class TestAllGroupsCollector extends LuceneTestCase { customType.setStored(true); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random(), - dir, - newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter w = + new RandomIndexWriter( + random(), + dir, + newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); // 0 Document doc = new Document(); @@ -124,11 +124,9 @@ public class TestAllGroupsCollector extends LuceneTestCase { private AllGroupsCollector createRandomCollector(String groupField) { if (random().nextBoolean()) { return new AllGroupsCollector<>(new TermGroupSelector(groupField)); - } - else { + } else { ValueSource vs = new BytesRefFieldSource(groupField); return new AllGroupsCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>())); } } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestBlockGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestBlockGrouping.java index 78acbbfaf51..e70ed27a8c7 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestBlockGrouping.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestBlockGrouping.java @@ -20,7 +20,6 @@ package org.apache.lucene.search.grouping; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; @@ -65,16 +64,16 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { String bookName = searcher.doc(tg.groups[i].scoreDocs[0].doc).get("book"); // The contents of each group should be equal to the results of a search for // that group alone - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(new TermQuery(new Term("book", bookName)), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(new TermQuery(new Term("book", bookName)), BooleanClause.Occur.FILTER) + .build(); TopDocs td = searcher.search(filtered, 10); assertScoreDocsEquals(td.scoreDocs, tg.groups[i].scoreDocs); } shard.close(); - } public void testTopLevelSort() throws IOException { @@ -88,7 +87,8 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { Query blockEndQuery = new TermQuery(new Term("blockEnd", "true")); GroupingSearch grouper = new GroupingSearch(blockEndQuery); grouper.setGroupDocsLimit(10); - grouper.setGroupSort(sort); // groups returned sorted by length, chapters within group sorted by relevancy + // groups returned sorted by length, chapters within group sorted by relevancy + grouper.setGroupSort(sort); Query topLevel = new TermQuery(new Term("text", "grandmother")); TopGroups tg = grouper.search(searcher, topLevel, 0, 5); @@ -96,16 +96,17 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { // The sort value of the top doc in the top group should be the same as the sort value // of the top result from the same search done with no grouping TopDocs topDoc = searcher.search(topLevel, 1, sort); - assertEquals(((FieldDoc)topDoc.scoreDocs[0]).fields[0], tg.groups[0].groupSortValues[0]); + assertEquals(((FieldDoc) topDoc.scoreDocs[0]).fields[0], tg.groups[0].groupSortValues[0]); for (int i = 0; i < tg.groups.length; i++) { String bookName = searcher.doc(tg.groups[i].scoreDocs[0].doc).get("book"); // The contents of each group should be equal to the results of a search for // that group alone, sorted by score - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(new TermQuery(new Term("book", bookName)), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(new TermQuery(new Term("book", bookName)), BooleanClause.Occur.FILTER) + .build(); TopDocs td = searcher.search(filtered, 10); assertScoreDocsEquals(td.scoreDocs, tg.groups[i].scoreDocs); if (i > 1) { @@ -114,7 +115,6 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { } shard.close(); - } public void testWithinGroupSort() throws IOException { @@ -128,7 +128,8 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { Query blockEndQuery = new TermQuery(new Term("blockEnd", "true")); GroupingSearch grouper = new GroupingSearch(blockEndQuery); grouper.setGroupDocsLimit(10); - grouper.setSortWithinGroup(sort); // groups returned sorted by relevancy, chapters within group sorted by length + // groups returned sorted by relevancy, chapters within group sorted by length + grouper.setSortWithinGroup(sort); Query topLevel = new TermQuery(new Term("text", "grandmother")); TopGroups tg = grouper.search(searcher, topLevel, 0, 5); @@ -136,16 +137,17 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { // We're sorting by score, so the score of the top group should be the same as the // score of the top document from the same query with no grouping TopDocs topDoc = searcher.search(topLevel, 1); - assertEquals(topDoc.scoreDocs[0].score, (float)tg.groups[0].groupSortValues[0], 0); + assertEquals(topDoc.scoreDocs[0].score, (float) tg.groups[0].groupSortValues[0], 0); for (int i = 0; i < tg.groups.length; i++) { String bookName = searcher.doc(tg.groups[i].scoreDocs[0].doc).get("book"); // The contents of each group should be equal to the results of a search for // that group alone, sorted by length - Query filtered = new BooleanQuery.Builder() - .add(topLevel, BooleanClause.Occur.MUST) - .add(new TermQuery(new Term("book", bookName)), BooleanClause.Occur.FILTER) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(topLevel, BooleanClause.Occur.MUST) + .add(new TermQuery(new Term("book", bookName)), BooleanClause.Occur.FILTER) + .build(); TopDocs td = searcher.search(filtered, 10, sort); assertFieldDocsEquals(td.scoreDocs, tg.groups[i].scoreDocs); // We're sorting by score, so the group sort value for each group should be a float, @@ -188,14 +190,15 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { return block; } - private static final String[] TEXT = new String[]{ - "It was the day my grandmother exploded", - "It was the best of times, it was the worst of times", - "It was a bright cold morning in April", - "It is a truth universally acknowledged", - "I have just returned from a visit to my landlord", - "I've been here and I've been there" - }; + private static final String[] TEXT = + new String[] { + "It was the day my grandmother exploded", + "It was the best of times, it was the worst of times", + "It was a bright cold morning in April", + "It is a truth universally acknowledged", + "I have just returned from a visit to my landlord", + "I've been here and I've been there" + }; private static String randomText() { StringBuilder sb = new StringBuilder(TEXT[random().nextInt(TEXT.length)]); @@ -209,7 +212,7 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { private void assertSortsBefore(GroupDocs first, GroupDocs second) { Object[] groupSortValues = second.groupSortValues; Object[] prevSortValues = first.groupSortValues; - assertTrue(((Long)prevSortValues[0]).compareTo((Long)groupSortValues[0]) <= 0); + assertTrue(((Long) prevSortValues[0]).compareTo((Long) groupSortValues[0]) <= 0); } protected static void assertFieldDocsEquals(ScoreDoc[] expected, ScoreDoc[] actual) { @@ -221,5 +224,4 @@ public class TestBlockGrouping extends AbstractGroupingTestCase { assertArrayEquals(e.fields, a.fields); } } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDistinctValuesCollector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDistinctValuesCollector.java index cedbeeab26c..8ec8e883106 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDistinctValuesCollector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDistinctValuesCollector.java @@ -29,7 +29,6 @@ import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -51,18 +50,19 @@ import org.apache.lucene.util.mutable.MutableValueStr; public class TestDistinctValuesCollector extends AbstractGroupingTestCase { - private final static NullComparator nullComparator = new NullComparator(); - + private static final NullComparator nullComparator = new NullComparator(); + private static final String GROUP_FIELD = "author"; private static final String COUNT_FIELD = "publisher"; public void testSimple() throws Exception { Random random = random(); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random, - dir, - newIndexWriterConfig(new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter w = + new RandomIndexWriter( + random, + dir, + newIndexWriterConfig(new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); addField(doc, GROUP_FIELD, "1"); addField(doc, COUNT_FIELD, "1"); @@ -120,27 +120,30 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { IndexSearcher indexSearcher = newSearcher(w.getReader()); w.close(); - Comparator, Comparable>> cmp = (groupCount1, groupCount2) -> { - if (groupCount1.groupValue == null) { - if (groupCount2.groupValue == null) { - return 0; - } - return -1; - } else if (groupCount2.groupValue == null) { - return 1; - } else { - return groupCount1.groupValue.compareTo(groupCount2.groupValue); - } - }; + Comparator, Comparable>> cmp = + (groupCount1, groupCount2) -> { + if (groupCount1.groupValue == null) { + if (groupCount2.groupValue == null) { + return 0; + } + return -1; + } else if (groupCount2.groupValue == null) { + return 1; + } else { + return groupCount1.groupValue.compareTo(groupCount2.groupValue); + } + }; // === Search for content:random - FirstPassGroupingCollector> firstCollector = createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10); + FirstPassGroupingCollector> firstCollector = + createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10); indexSearcher.search(new TermQuery(new Term("content", "random")), firstCollector); - DistinctValuesCollector, Comparable> distinctValuesCollector - = createDistinctCountCollector(firstCollector, COUNT_FIELD); + DistinctValuesCollector, Comparable> distinctValuesCollector = + createDistinctCountCollector(firstCollector, COUNT_FIELD); indexSearcher.search(new TermQuery(new Term("content", "random")), distinctValuesCollector); - List, Comparable>> gcs = distinctValuesCollector.getGroups(); + List, Comparable>> gcs = + distinctValuesCollector.getGroups(); Collections.sort(gcs, cmp); assertEquals(4, gcs.size()); @@ -193,7 +196,7 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { assertEquals(1, countValues.size()); compare("1", countValues.get(0)); - // === Search for content:blob + // === Search for content:blob firstCollector = createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10); indexSearcher.search(new TermQuery(new Term("content", "blob")), firstCollector); distinctValuesCollector = createDistinctCountCollector(firstCollector, COUNT_FIELD); @@ -229,33 +232,40 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { Sort groupSort = new Sort(new SortField("id", SortField.Type.STRING)); int topN = 1 + random.nextInt(10); - List, Comparable>> expectedResult = createExpectedResult(context, term, groupSort, topN); + List, Comparable>> + expectedResult = createExpectedResult(context, term, groupSort, topN); - FirstPassGroupingCollector> firstCollector = createRandomFirstPassCollector(groupSort, GROUP_FIELD, topN); + FirstPassGroupingCollector> firstCollector = + createRandomFirstPassCollector(groupSort, GROUP_FIELD, topN); searcher.search(new TermQuery(new Term("content", term)), firstCollector); - DistinctValuesCollector, Comparable> distinctValuesCollector - = createDistinctCountCollector(firstCollector, COUNT_FIELD); + DistinctValuesCollector, Comparable> distinctValuesCollector = + createDistinctCountCollector(firstCollector, COUNT_FIELD); searcher.search(new TermQuery(new Term("content", term)), distinctValuesCollector); @SuppressWarnings("unchecked") - List, Comparable>> actualResult = distinctValuesCollector.getGroups(); + List, Comparable>> + actualResult = distinctValuesCollector.getGroups(); if (VERBOSE) { System.out.println("Index iter=" + indexIter); System.out.println("Search iter=" + searchIter); - System.out.println("1st pass collector class name=" + firstCollector.getClass().getName()); - System.out.println("2nd pass collector class name=" + distinctValuesCollector.getClass().getName()); + System.out.println( + "1st pass collector class name=" + firstCollector.getClass().getName()); + System.out.println( + "2nd pass collector class name=" + distinctValuesCollector.getClass().getName()); System.out.println("Search term=" + term); System.out.println("1st pass groups=" + firstCollector.getTopGroups(0)); - System.out.println("Expected:"); + System.out.println("Expected:"); printGroups(expectedResult); - System.out.println("Actual:"); + System.out.println("Actual:"); printGroups(actualResult); } assertEquals(expectedResult.size(), actualResult.size()); for (int i = 0; i < expectedResult.size(); i++) { - DistinctValuesCollector.GroupCount, Comparable> expected = expectedResult.get(i); - DistinctValuesCollector.GroupCount, Comparable> actual = actualResult.get(i); + DistinctValuesCollector.GroupCount, Comparable> expected = + expectedResult.get(i); + DistinctValuesCollector.GroupCount, Comparable> actual = + actualResult.get(i); assertValues(expected.groupValue, actual.groupValue); assertEquals(expected.uniqueValues.size(), actual.uniqueValues.size()); List> expectedUniqueValues = new ArrayList<>(expected.uniqueValues); @@ -272,16 +282,18 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { } } - private void printGroups(List, Comparable>> results) { - for(int i=0;i, Comparable> group = results.get(i); + private void printGroups( + List, Comparable>> results) { + for (int i = 0; i < results.size(); i++) { + DistinctValuesCollector.GroupCount, Comparable> group = + results.get(i); Object gv = group.groupValue; if (gv instanceof BytesRef) { System.out.println(i + ": groupValue=" + ((BytesRef) gv).utf8ToString()); } else { System.out.println(i + ": groupValue=" + gv); } - for(Object o : group.uniqueValues) { + for (Object o : group.uniqueValues) { if (o instanceof BytesRef) { System.out.println(" " + ((BytesRef) o).utf8ToString()); } else { @@ -298,7 +310,7 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { compare(((BytesRef) expected).utf8ToString(), actual); } } - + private void compare(String expected, Object groupValue) { if (BytesRef.class.isAssignableFrom(groupValue.getClass())) { assertEquals(expected, ((BytesRef) groupValue).utf8ToString()); @@ -338,13 +350,16 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { doc.add(new SortedDocValuesField(field, new BytesRef(value))); } - @SuppressWarnings({"unchecked","rawtypes"}) - private , R extends Comparable> DistinctValuesCollector createDistinctCountCollector(FirstPassGroupingCollector firstPassGroupingCollector, - String countField) throws IOException { + @SuppressWarnings({"unchecked", "rawtypes"}) + private , R extends Comparable> + DistinctValuesCollector createDistinctCountCollector( + FirstPassGroupingCollector firstPassGroupingCollector, String countField) + throws IOException { Collection> searchGroups = firstPassGroupingCollector.getTopGroups(0); GroupSelector selector = firstPassGroupingCollector.getGroupSelector(); if (ValueSourceGroupSelector.class.isAssignableFrom(selector.getClass())) { - GroupSelector gs = new ValueSourceGroupSelector(new BytesRefFieldSource(countField), new HashMap<>()); + GroupSelector gs = + new ValueSourceGroupSelector(new BytesRefFieldSource(countField), new HashMap<>()); return new DistinctValuesCollector<>(selector, searchGroups, gs); } else { GroupSelector ts = new TermGroupSelector(countField); @@ -352,18 +367,26 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { } } - @SuppressWarnings({"unchecked","rawtypes"}) - private FirstPassGroupingCollector createRandomFirstPassCollector(Sort groupSort, String groupField, int topNGroups) throws IOException { + @SuppressWarnings({"unchecked", "rawtypes"}) + private FirstPassGroupingCollector createRandomFirstPassCollector( + Sort groupSort, String groupField, int topNGroups) throws IOException { Random random = random(); if (random.nextBoolean()) { - return (FirstPassGroupingCollector) new FirstPassGroupingCollector<>(new ValueSourceGroupSelector(new BytesRefFieldSource(groupField), new HashMap<>()), groupSort, topNGroups); + return (FirstPassGroupingCollector) + new FirstPassGroupingCollector<>( + new ValueSourceGroupSelector(new BytesRefFieldSource(groupField), new HashMap<>()), + groupSort, + topNGroups); } else { - return (FirstPassGroupingCollector) new FirstPassGroupingCollector<>(new TermGroupSelector(groupField), groupSort, topNGroups); + return (FirstPassGroupingCollector) + new FirstPassGroupingCollector<>( + new TermGroupSelector(groupField), groupSort, topNGroups); } } - @SuppressWarnings({"unchecked","rawtypes"}) - private List, Comparable>> createExpectedResult(IndexContext context, String term, Sort groupSort, int topN) { + @SuppressWarnings({"unchecked", "rawtypes"}) + private List, Comparable>> + createExpectedResult(IndexContext context, String term, Sort groupSort, int topN) { List result = new ArrayList(); Map> groupCounts = context.searchTermToGroupCounts.get(term); int i = 0; @@ -375,7 +398,9 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { for (String val : groupCounts.get(group)) { uniqueValues.add(val != null ? new BytesRef(val) : null); } - result.add(new DistinctValuesCollector.GroupCount(group != null ? new BytesRef(group) : null, uniqueValues)); + result.add( + new DistinctValuesCollector.GroupCount( + group != null ? new BytesRef(group) : null, uniqueValues)); } return result; } @@ -384,11 +409,11 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { Random random = random(); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random, - dir, - newIndexWriterConfig(new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()) - ); + RandomIndexWriter w = + new RandomIndexWriter( + random, + dir, + newIndexWriterConfig(new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); int numDocs = 86 + random.nextInt(1087) * RANDOM_MULTIPLIER; String[] groupValues = new String[numDocs / 5]; @@ -399,12 +424,14 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { for (int i = 0; i < countValues.length; i++) { countValues[i] = generateRandomNonEmptyString(); } - + List contentStrings = new ArrayList<>(); Map>> searchTermToGroupCounts = new HashMap<>(); for (int i = 1; i <= numDocs; i++) { - String groupValue = random.nextInt(23) == 14 ? null : groupValues[random.nextInt(groupValues.length)]; - String countValue = random.nextInt(21) == 13 ? null : countValues[random.nextInt(countValues.length)]; + String groupValue = + random.nextInt(23) == 14 ? null : groupValues[random.nextInt(groupValues.length)]; + String countValue = + random.nextInt(21) == 13 ? null : countValues[random.nextInt(countValues.length)]; String content = "random" + random.nextInt(numDocs / 20); Map> groupToCounts = searchTermToGroupCounts.get(content); if (groupToCounts == null) { @@ -434,14 +461,28 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { DirectoryReader reader = w.getReader(); if (VERBOSE) { - for(int docID=0;docID>> searchTermToGroupCounts; final String[] contentStrings; - IndexContext(Directory directory, DirectoryReader indexReader, - Map>> searchTermToGroupCounts, String[] contentStrings) { + IndexContext( + Directory directory, + DirectoryReader indexReader, + Map>> searchTermToGroupCounts, + String[] contentStrings) { this.directory = directory; this.indexReader = indexReader; this.searchTermToGroupCounts = searchTermToGroupCounts; @@ -463,7 +507,7 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { private static class NullComparator implements Comparator> { @Override - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) public int compare(Comparable a, Comparable b) { if (a == b) { return 0; @@ -475,7 +519,5 @@ public class TestDistinctValuesCollector extends AbstractGroupingTestCase { return a.compareTo(b); } } - } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDoubleRangeFactory.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDoubleRangeFactory.java index d6e05c33642..0e52ef24e54 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDoubleRangeFactory.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDoubleRangeFactory.java @@ -33,7 +33,5 @@ public class TestDoubleRangeFactory extends LuceneTestCase { assertEquals(new DoubleRange(30, 40), factory.getRange(35, scratch)); assertEquals(new DoubleRange(50, Double.MAX_VALUE), factory.getRange(50, scratch)); assertEquals(new DoubleRange(50, Double.MAX_VALUE), factory.getRange(500, scratch)); - } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDoubleRangeGroupSelector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDoubleRangeGroupSelector.java index 5350a04d639..ada3c04c373 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDoubleRangeGroupSelector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDoubleRangeGroupSelector.java @@ -32,7 +32,7 @@ public class TestDoubleRangeGroupSelector extends BaseGroupSelectorTestCase getGroupSelector() { - return new DoubleRangeGroupSelector(DoubleValuesSource.fromDoubleField("double"), - new DoubleRangeFactory(100, 100, 900)); + return new DoubleRangeGroupSelector( + DoubleValuesSource.fromDoubleField("double"), new DoubleRangeFactory(100, 100, 900)); } @Override diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGroupFacetCollector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGroupFacetCollector.java index fffcd69948c..8060e152311 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGroupFacetCollector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGroupFacetCollector.java @@ -29,7 +29,6 @@ import java.util.NavigableSet; import java.util.Random; import java.util.Set; import java.util.TreeSet; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -48,7 +47,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; - public class TestGroupFacetCollector extends AbstractGroupingTestCase { public void testSimple() throws Exception { @@ -57,10 +55,11 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { customType.setStored(true); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random(), - dir, - newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter w = + new RandomIndexWriter( + random(), + dir, + newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); boolean useDv = true; // 0 @@ -104,18 +103,19 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { List entries; GroupFacetCollector groupedAirportFacetCollector; TermGroupFacetCollector.GroupedFacetResult airportResult; - - for (int limit : new int[] { 2, 10, 100, Integer.MAX_VALUE }) { + + for (int limit : new int[] {2, 10, 100, Integer.MAX_VALUE}) { // any of these limits is plenty for the data we have - groupedAirportFacetCollector = createRandomCollector - (useDv ? "hotel_dv" : "hotel", - useDv ? "airport_dv" : "airport", null, false); + groupedAirportFacetCollector = + createRandomCollector( + useDv ? "hotel_dv" : "hotel", useDv ? "airport_dv" : "airport", null, false); indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector); int maxOffset = 5; - airportResult = groupedAirportFacetCollector.mergeSegmentResults - (Integer.MAX_VALUE == limit ? limit : maxOffset + limit, 0, false); - + airportResult = + groupedAirportFacetCollector.mergeSegmentResults( + Integer.MAX_VALUE == limit ? limit : maxOffset + limit, 0, false); + assertEquals(3, airportResult.getTotalCount()); assertEquals(0, airportResult.getTotalMissingCount()); @@ -135,9 +135,12 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { assertEquals(1, entries.get(0).getCount()); } - GroupFacetCollector groupedDurationFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", null, false); + GroupFacetCollector groupedDurationFacetCollector = + createRandomCollector( + useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", null, false); indexSearcher.search(new MatchAllDocsQuery(), groupedDurationFacetCollector); - TermGroupFacetCollector.GroupedFacetResult durationResult = groupedDurationFacetCollector.mergeSegmentResults(10, 0, false); + TermGroupFacetCollector.GroupedFacetResult durationResult = + groupedDurationFacetCollector.mergeSegmentResults(10, 0, false); assertEquals(4, durationResult.getTotalCount()); assertEquals(0, durationResult.getTotalMissingCount()); @@ -181,7 +184,9 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { indexSearcher.getIndexReader().close(); indexSearcher = newSearcher(w.getReader()); - groupedAirportFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "airport_dv" : "airport", null, !useDv); + groupedAirportFacetCollector = + createRandomCollector( + useDv ? "hotel_dv" : "hotel", useDv ? "airport_dv" : "airport", null, !useDv); indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector); airportResult = groupedAirportFacetCollector.mergeSegmentResults(3, 0, true); entries = airportResult.getFacetEntries(1, 2); @@ -202,7 +207,9 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { assertEquals(1, entries.get(1).getCount()); } - groupedDurationFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", null, false); + groupedDurationFacetCollector = + createRandomCollector( + useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", null, false); indexSearcher.search(new MatchAllDocsQuery(), groupedDurationFacetCollector); durationResult = groupedDurationFacetCollector.mergeSegmentResults(10, 2, true); assertEquals(5, durationResult.getTotalCount()); @@ -229,7 +236,9 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { indexSearcher.getIndexReader().close(); indexSearcher = newSearcher(w.getReader()); - groupedAirportFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "airport_dv" : "airport", null, false); + groupedAirportFacetCollector = + createRandomCollector( + useDv ? "hotel_dv" : "hotel", useDv ? "airport_dv" : "airport", null, false); indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector); airportResult = groupedAirportFacetCollector.mergeSegmentResults(10, 0, false); entries = airportResult.getFacetEntries(0, 10); @@ -257,7 +266,9 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { assertEquals(2, entries.get(2).getCount()); } - groupedDurationFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", "1", false); + groupedDurationFacetCollector = + createRandomCollector( + useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", "1", false); indexSearcher.search(new MatchAllDocsQuery(), groupedDurationFacetCollector); durationResult = groupedDurationFacetCollector.mergeSegmentResults(10, 0, true); assertEquals(5, durationResult.getTotalCount()); @@ -281,10 +292,12 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { customType.setStored(true); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random(), - dir, - newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE)); + RandomIndexWriter w = + new RandomIndexWriter( + random(), + dir, + newIndexWriterConfig(new MockAnalyzer(random())) + .setMergePolicy(NoMergePolicy.INSTANCE)); boolean useDv = true; // Cannot assert this since we use NoMergePolicy: @@ -343,9 +356,11 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { w.close(); IndexSearcher indexSearcher = newSearcher(DirectoryReader.open(dir)); - GroupFacetCollector groupedAirportFacetCollector = createRandomCollector(groupField + "_dv", "airport", null, true); + GroupFacetCollector groupedAirportFacetCollector = + createRandomCollector(groupField + "_dv", "airport", null, true); indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector); - TermGroupFacetCollector.GroupedFacetResult airportResult = groupedAirportFacetCollector.mergeSegmentResults(10, 0, false); + TermGroupFacetCollector.GroupedFacetResult airportResult = + groupedAirportFacetCollector.mergeSegmentResults(10, 0, false); assertEquals(3, airportResult.getTotalCount()); assertEquals(1, airportResult.getTotalMissingCount()); @@ -385,9 +400,11 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { int limit = random.nextInt(context.facetValues.size()); int offset = random.nextInt(context.facetValues.size() - limit); int size = offset + limit; - int minCount = random.nextBoolean() ? 0 : random.nextInt(1 + context.facetWithMostGroups / 10); + int minCount = + random.nextBoolean() ? 0 : random.nextInt(1 + context.facetWithMostGroups / 10); boolean orderByCount = random.nextBoolean(); - String randomStr = getFromSet(context.facetValues, random.nextInt(context.facetValues.size())); + String randomStr = + getFromSet(context.facetValues, random.nextInt(context.facetValues.size())); final String facetPrefix; if (randomStr == null) { facetPrefix = null; @@ -402,13 +419,19 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { } } - GroupedFacetResult expectedFacetResult = createExpectedFacetResult(searchTerm, context, offset, limit, minCount, orderByCount, facetPrefix); - GroupFacetCollector groupFacetCollector = createRandomCollector("group", "facet", facetPrefix, multipleFacetsPerDocument); + GroupedFacetResult expectedFacetResult = + createExpectedFacetResult( + searchTerm, context, offset, limit, minCount, orderByCount, facetPrefix); + GroupFacetCollector groupFacetCollector = + createRandomCollector("group", "facet", facetPrefix, multipleFacetsPerDocument); searcher.search(new TermQuery(new Term("content", searchTerm)), groupFacetCollector); - TermGroupFacetCollector.GroupedFacetResult actualFacetResult = groupFacetCollector.mergeSegmentResults(size, minCount, orderByCount); + TermGroupFacetCollector.GroupedFacetResult actualFacetResult = + groupFacetCollector.mergeSegmentResults(size, minCount, orderByCount); - List expectedFacetEntries = expectedFacetResult.getFacetEntries(); - List actualFacetEntries = actualFacetResult.getFacetEntries(offset, limit); + List expectedFacetEntries = + expectedFacetResult.getFacetEntries(); + List actualFacetEntries = + actualFacetResult.getFacetEntries(offset, limit); if (VERBOSE) { System.out.println("Collector: " + groupFacetCollector.getClass().getSimpleName()); @@ -431,11 +454,12 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { int counter = 0; for (TermGroupFacetCollector.FacetEntry expectedFacetEntry : expectedFacetEntries) { System.out.println( - String.format(Locale.ROOT, + String.format( + Locale.ROOT, "%d. Expected facet value %s with count %d", - counter++, expectedFacetEntry.getValue().utf8ToString(), expectedFacetEntry.getCount() - ) - ); + counter++, + expectedFacetEntry.getValue().utf8ToString(), + expectedFacetEntry.getCount())); } System.out.println("\n=== Actual: \n"); @@ -444,23 +468,42 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { counter = 0; for (TermGroupFacetCollector.FacetEntry actualFacetEntry : actualFacetEntries) { System.out.println( - String.format(Locale.ROOT, + String.format( + Locale.ROOT, "%d. Actual facet value %s with count %d", - counter++, actualFacetEntry.getValue().utf8ToString(), actualFacetEntry.getCount() - ) - ); + counter++, + actualFacetEntry.getValue().utf8ToString(), + actualFacetEntry.getCount())); } - System.out.println("\n==================================================================================="); + System.out.println( + "\n==================================================================================="); } - + assertEquals(expectedFacetResult.getTotalCount(), actualFacetResult.getTotalCount()); - assertEquals(expectedFacetResult.getTotalMissingCount(), actualFacetResult.getTotalMissingCount()); + assertEquals( + expectedFacetResult.getTotalMissingCount(), actualFacetResult.getTotalMissingCount()); assertEquals(expectedFacetEntries.size(), actualFacetEntries.size()); for (int i = 0; i < expectedFacetEntries.size(); i++) { TermGroupFacetCollector.FacetEntry expectedFacetEntry = expectedFacetEntries.get(i); TermGroupFacetCollector.FacetEntry actualFacetEntry = actualFacetEntries.get(i); - assertEquals("i=" + i + ": " + expectedFacetEntry.getValue().utf8ToString() + " != " + actualFacetEntry.getValue().utf8ToString(), expectedFacetEntry.getValue(), actualFacetEntry.getValue()); - assertEquals("i=" + i + ": " + expectedFacetEntry.getCount() + " != " + actualFacetEntry.getCount(), expectedFacetEntry.getCount(), actualFacetEntry.getCount()); + assertEquals( + "i=" + + i + + ": " + + expectedFacetEntry.getValue().utf8ToString() + + " != " + + actualFacetEntry.getValue().utf8ToString(), + expectedFacetEntry.getValue(), + actualFacetEntry.getValue()); + assertEquals( + "i=" + + i + + ": " + + expectedFacetEntry.getCount() + + " != " + + actualFacetEntry.getCount(), + expectedFacetEntry.getCount(), + actualFacetEntry.getCount()); } } @@ -469,7 +512,8 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { } } - private IndexContext createIndexContext(boolean multipleFacetValuesPerDocument) throws IOException { + private IndexContext createIndexContext(boolean multipleFacetValuesPerDocument) + throws IOException { final Random random = random(); final int numDocs = TestUtil.nextInt(random, 138, 1145) * RANDOM_MULTIPLIER; final int numGroups = TestUtil.nextInt(random, 1, numDocs / 4); @@ -499,11 +543,8 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { } Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter( - random, - dir, - newIndexWriterConfig(new MockAnalyzer(random)) - ); + RandomIndexWriter writer = + new RandomIndexWriter(random, dir, newIndexWriterConfig(new MockAnalyzer(random))); Document doc = new Document(); Document docNoGroup = new Document(); Document docNoFacet = new Document(); @@ -524,7 +565,8 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { doc.add(facetFields[1]); docNoGroup.add(facetFields[1]); } else { - facetFields = multipleFacetValuesPerDocument ? new Field[2 + random.nextInt(6)] : new Field[1]; + facetFields = + multipleFacetValuesPerDocument ? new Field[2 + random.nextInt(6)] : new Field[1]; for (int i = 0; i < facetFields.length; i++) { facetFields[i] = new SortedSetDocValuesField("facet", new BytesRef()); doc.add(facetFields[i]); @@ -537,22 +579,23 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { docNoFacet.add(content); docNoGroupNoFacet.add(content); - NavigableSet uniqueFacetValues = new TreeSet<>(new Comparator() { + NavigableSet uniqueFacetValues = + new TreeSet<>( + new Comparator() { - @Override - public int compare(String a, String b) { - if (a == b) { - return 0; - } else if (a == null) { - return -1; - } else if (b == null) { - return 1; - } else { - return a.compareTo(b); - } - } - - }); + @Override + public int compare(String a, String b) { + if (a == b) { + return 0; + } else if (a == null) { + return -1; + } else if (b == null) { + return 1; + } else { + return a.compareTo(b); + } + } + }); Map>> searchTermToFacetToGroups = new HashMap<>(); int facetWithMostGroups = 0; for (int i = 0; i < numDocs; i++) { @@ -604,7 +647,13 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { } if (VERBOSE) { - System.out.println(" doc content=" + contentStr + " group=" + (groupValue == null ? "null" : groupValue) + " facetVals=" + facetVals); + System.out.println( + " doc content=" + + contentStr + + " group=" + + (groupValue == null ? "null" : groupValue) + + " facetVals=" + + facetVals); } if (groupValue != null) { @@ -630,10 +679,25 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { DirectoryReader reader = writer.getReader(); writer.close(); - return new IndexContext(searchTermToFacetToGroups, reader, numDocs, dir, facetWithMostGroups, numGroups, contentBrs, uniqueFacetValues); + return new IndexContext( + searchTermToFacetToGroups, + reader, + numDocs, + dir, + facetWithMostGroups, + numGroups, + contentBrs, + uniqueFacetValues); } - private GroupedFacetResult createExpectedFacetResult(String searchTerm, IndexContext context, int offset, int limit, int minCount, final boolean orderByCount, String facetPrefix) { + private GroupedFacetResult createExpectedFacetResult( + String searchTerm, + IndexContext context, + int offset, + int limit, + int minCount, + final boolean orderByCount, + String facetPrefix) { Map> facetGroups = context.searchTermToFacetGroups.get(searchTerm); if (facetGroups == null) { facetGroups = new HashMap<>(); @@ -676,20 +740,22 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { } } - Collections.sort(entries, new Comparator() { + Collections.sort( + entries, + new Comparator() { - @Override - public int compare(TermGroupFacetCollector.FacetEntry a, TermGroupFacetCollector.FacetEntry b) { - if (orderByCount) { - int cmp = b.getCount() - a.getCount(); - if (cmp != 0) { - return cmp; + @Override + public int compare( + TermGroupFacetCollector.FacetEntry a, TermGroupFacetCollector.FacetEntry b) { + if (orderByCount) { + int cmp = b.getCount() - a.getCount(); + if (cmp != 0) { + return cmp; + } + } + return a.getValue().compareTo(b.getValue()); } - } - return a.getValue().compareTo(b.getValue()); - } - - }); + }); int endOffset = offset + limit; List entriesResult; @@ -703,9 +769,11 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { return new GroupedFacetResult(totalCount, totalMissCount, entriesResult); } - private GroupFacetCollector createRandomCollector(String groupField, String facetField, String facetPrefix, boolean multipleFacetsPerDocument) { + private GroupFacetCollector createRandomCollector( + String groupField, String facetField, String facetPrefix, boolean multipleFacetsPerDocument) { BytesRef facetPrefixBR = facetPrefix == null ? null : new BytesRef(facetPrefix); - return TermGroupFacetCollector.createTermGroupFacetCollector(groupField, facetField, multipleFacetsPerDocument, facetPrefixBR, random().nextInt(1024)); + return TermGroupFacetCollector.createTermGroupFacetCollector( + groupField, facetField, multipleFacetsPerDocument, facetPrefixBR, random().nextInt(1024)); } private String getFromSet(Set set, int index) { @@ -730,8 +798,15 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { final int numGroups; final String[] contentStrings; - public IndexContext(Map>> searchTermToFacetGroups, DirectoryReader r, - int numDocs, Directory dir, int facetWithMostGroups, int numGroups, String[] contentStrings, NavigableSet facetValues) { + public IndexContext( + Map>> searchTermToFacetGroups, + DirectoryReader r, + int numDocs, + Directory dir, + int facetWithMostGroups, + int numGroups, + String[] contentStrings, + NavigableSet facetValues) { this.searchTermToFacetGroups = searchTermToFacetGroups; this.indexReader = r; this.numDocs = numDocs; @@ -749,7 +824,10 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { final int totalMissingCount; final List facetEntries; - private GroupedFacetResult(int totalCount, int totalMissingCount, List facetEntries) { + private GroupedFacetResult( + int totalCount, + int totalMissingCount, + List facetEntries) { this.totalCount = totalCount; this.totalMissingCount = totalMissingCount; this.facetEntries = facetEntries; @@ -767,5 +845,4 @@ public class TestGroupFacetCollector extends AbstractGroupingTestCase { return facetEntries; } } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java index 4ef6c02d70e..df53dff0016 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java @@ -27,7 +27,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -84,10 +83,11 @@ public class TestGrouping extends LuceneTestCase { customType.setStored(true); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random(), - dir, - newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter w = + new RandomIndexWriter( + random(), + dir, + newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); // 0 Document doc = new Document(); addGroupField(doc, groupField, "author1"); @@ -143,10 +143,12 @@ public class TestGrouping extends LuceneTestCase { final Sort groupSort = Sort.RELEVANCE; - final FirstPassGroupingCollector c1 = createRandomFirstPassCollector(groupField, groupSort, 10); + final FirstPassGroupingCollector c1 = + createRandomFirstPassCollector(groupField, groupSort, 10); indexSearcher.search(new TermQuery(new Term("content", "random")), c1); - final TopGroupsCollector c2 = createSecondPassCollector(c1, groupSort, Sort.RELEVANCE, 0, 5, true); + final TopGroupsCollector c2 = + createSecondPassCollector(c1, groupSort, Sort.RELEVANCE, 0, 5, true); indexSearcher.search(new TermQuery(new Term("content", "random")), c2); final TopGroups groups = c2.getTopGroups(0); @@ -194,49 +196,74 @@ public class TestGrouping extends LuceneTestCase { doc.add(new SortedDocValuesField(groupField, new BytesRef(value))); } - private FirstPassGroupingCollector createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs) throws IOException { + private FirstPassGroupingCollector createRandomFirstPassCollector( + String groupField, Sort groupSort, int topDocs) throws IOException { if (random().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); - return new FirstPassGroupingCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>()), groupSort, topDocs); + return new FirstPassGroupingCollector<>( + new ValueSourceGroupSelector(vs, new HashMap<>()), groupSort, topDocs); } else { - return new FirstPassGroupingCollector<>(new TermGroupSelector(groupField), groupSort, topDocs); + return new FirstPassGroupingCollector<>( + new TermGroupSelector(groupField), groupSort, topDocs); } } - private FirstPassGroupingCollector createFirstPassCollector(String groupField, Sort groupSort, int topDocs, FirstPassGroupingCollector firstPassGroupingCollector) throws IOException { + private FirstPassGroupingCollector createFirstPassCollector( + String groupField, + Sort groupSort, + int topDocs, + FirstPassGroupingCollector firstPassGroupingCollector) + throws IOException { GroupSelector selector = firstPassGroupingCollector.getGroupSelector(); if (TermGroupSelector.class.isAssignableFrom(selector.getClass())) { ValueSource vs = new BytesRefFieldSource(groupField); - return new FirstPassGroupingCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>()), groupSort, topDocs); + return new FirstPassGroupingCollector<>( + new ValueSourceGroupSelector(vs, new HashMap<>()), groupSort, topDocs); } else { - return new FirstPassGroupingCollector<>(new TermGroupSelector(groupField), groupSort, topDocs); + return new FirstPassGroupingCollector<>( + new TermGroupSelector(groupField), groupSort, topDocs); } } - @SuppressWarnings({"unchecked","rawtypes"}) - private TopGroupsCollector createSecondPassCollector(FirstPassGroupingCollector firstPassGroupingCollector, - Sort groupSort, - Sort sortWithinGroup, - int groupOffset, - int maxDocsPerGroup, - boolean getMaxScores) throws IOException { + @SuppressWarnings({"unchecked", "rawtypes"}) + private TopGroupsCollector createSecondPassCollector( + FirstPassGroupingCollector firstPassGroupingCollector, + Sort groupSort, + Sort sortWithinGroup, + int groupOffset, + int maxDocsPerGroup, + boolean getMaxScores) + throws IOException { Collection> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset); - return new TopGroupsCollector<>(firstPassGroupingCollector.getGroupSelector(), searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getMaxScores); + return new TopGroupsCollector<>( + firstPassGroupingCollector.getGroupSelector(), + searchGroups, + groupSort, + sortWithinGroup, + maxDocsPerGroup, + getMaxScores); } // Basically converts searchGroups from MutableValue to BytesRef if grouping by ValueSource @SuppressWarnings("unchecked") - private TopGroupsCollector createSecondPassCollector(FirstPassGroupingCollector firstPassGroupingCollector, - String groupField, - Collection> searchGroups, - Sort groupSort, - Sort sortWithinGroup, - int maxDocsPerGroup, - boolean getMaxScores) throws IOException { - if (firstPassGroupingCollector.getGroupSelector().getClass().isAssignableFrom(TermGroupSelector.class)) { - GroupSelector selector = (GroupSelector) firstPassGroupingCollector.getGroupSelector(); - return new TopGroupsCollector<>(selector, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getMaxScores); + private TopGroupsCollector createSecondPassCollector( + FirstPassGroupingCollector firstPassGroupingCollector, + String groupField, + Collection> searchGroups, + Sort groupSort, + Sort sortWithinGroup, + int maxDocsPerGroup, + boolean getMaxScores) + throws IOException { + if (firstPassGroupingCollector + .getGroupSelector() + .getClass() + .isAssignableFrom(TermGroupSelector.class)) { + GroupSelector selector = + (GroupSelector) firstPassGroupingCollector.getGroupSelector(); + return new TopGroupsCollector<>( + selector, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getMaxScores); } else { ValueSource vs = new BytesRefFieldSource(groupField); List> mvalSearchGroups = new ArrayList<>(searchGroups.size()); @@ -253,12 +280,13 @@ public class TestGrouping extends LuceneTestCase { mvalSearchGroups.add(sg); } ValueSourceGroupSelector selector = new ValueSourceGroupSelector(vs, new HashMap<>()); - return new TopGroupsCollector<>(selector, mvalSearchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getMaxScores); + return new TopGroupsCollector<>( + selector, mvalSearchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getMaxScores); } } - private AllGroupsCollector createAllGroupsCollector(FirstPassGroupingCollector firstPassGroupingCollector, - String groupField) { + private AllGroupsCollector createAllGroupsCollector( + FirstPassGroupingCollector firstPassGroupingCollector, String groupField) { return new AllGroupsCollector<>(firstPassGroupingCollector.getGroupSelector()); } @@ -285,15 +313,18 @@ public class TestGrouping extends LuceneTestCase { } } - private Collection> getSearchGroups(FirstPassGroupingCollector c, int groupOffset) throws IOException { + private Collection> getSearchGroups( + FirstPassGroupingCollector c, int groupOffset) throws IOException { if (TermGroupSelector.class.isAssignableFrom(c.getGroupSelector().getClass())) { @SuppressWarnings("unchecked") FirstPassGroupingCollector collector = (FirstPassGroupingCollector) c; return collector.getTopGroups(groupOffset); } else if (ValueSourceGroupSelector.class.isAssignableFrom(c.getGroupSelector().getClass())) { @SuppressWarnings("unchecked") - FirstPassGroupingCollector collector = (FirstPassGroupingCollector) c; - Collection> mutableValueGroups = collector.getTopGroups(groupOffset); + FirstPassGroupingCollector collector = + (FirstPassGroupingCollector) c; + Collection> mutableValueGroups = + collector.getTopGroups(groupOffset); if (mutableValueGroups == null) { return null; } @@ -301,7 +332,10 @@ public class TestGrouping extends LuceneTestCase { List> groups = new ArrayList<>(mutableValueGroups.size()); for (SearchGroup mutableValueGroup : mutableValueGroups) { SearchGroup sg = new SearchGroup<>(); - sg.groupValue = mutableValueGroup.groupValue.exists() ? ((MutableValueStr) mutableValueGroup.groupValue).value.get() : null; + sg.groupValue = + mutableValueGroup.groupValue.exists() + ? ((MutableValueStr) mutableValueGroup.groupValue).value.get() + : null; sg.sortValues = mutableValueGroup.sortValues; groups.add(sg); } @@ -321,12 +355,27 @@ public class TestGrouping extends LuceneTestCase { TopGroups mvalTopGroups = collector.getTopGroups(withinGroupOffset); List> groups = new ArrayList<>(mvalTopGroups.groups.length); for (GroupDocs mvalGd : mvalTopGroups.groups) { - BytesRef groupValue = mvalGd.groupValue.exists() ? ((MutableValueStr) mvalGd.groupValue).value.get() : null; - groups.add(new GroupDocs<>(Float.NaN, mvalGd.maxScore, mvalGd.totalHits, mvalGd.scoreDocs, groupValue, mvalGd.groupSortValues)); + BytesRef groupValue = + mvalGd.groupValue.exists() ? ((MutableValueStr) mvalGd.groupValue).value.get() : null; + groups.add( + new GroupDocs<>( + Float.NaN, + mvalGd.maxScore, + mvalGd.totalHits, + mvalGd.scoreDocs, + groupValue, + mvalGd.groupSortValues)); } - // NOTE: currenlty using diamond operator on MergedIterator (without explicit Term class) causes + // NOTE: currenlty using diamond operator on MergedIterator (without explicit Term class) + // causes // errors on Eclipse Compiler (ecj) used for javadoc lint - return new TopGroups(mvalTopGroups.groupSort, mvalTopGroups.withinGroupSort, mvalTopGroups.totalHitCount, mvalTopGroups.totalGroupedHitCount, groups.toArray(new GroupDocs[groups.size()]), Float.NaN); + return new TopGroups( + mvalTopGroups.groupSort, + mvalTopGroups.withinGroupSort, + mvalTopGroups.totalHitCount, + mvalTopGroups.totalGroupedHitCount, + groups.toArray(new GroupDocs[groups.size()]), + Float.NaN); } fail(); return null; @@ -377,7 +426,7 @@ public class TestGrouping extends LuceneTestCase { return new Comparator() { @Override public int compare(GroupDoc d1, GroupDoc d2) { - for(SortField sf : sortFields) { + for (SortField sf : sortFields) { final int cmp; if (sf.getType() == SortField.Type.SCORE) { if (d1.score > d2.score) { @@ -406,11 +455,11 @@ public class TestGrouping extends LuceneTestCase { }; } - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) private Comparable[] fillFields(GroupDoc d, Sort sort) { final SortField[] sortFields = sort.getSort(); final Comparable[] fields = new Comparable[sortFields.length]; - for(int fieldIDX=0;fieldIDX c; final SortField sf = sortFields[fieldIDX]; if (sf.getType() == SortField.Type.SCORE) { @@ -436,46 +485,47 @@ public class TestGrouping extends LuceneTestCase { } } - private TopGroups slowGrouping(GroupDoc[] groupDocs, - String searchTerm, - boolean getMaxScores, - boolean doAllGroups, - Sort groupSort, - Sort docSort, - int topNGroups, - int docsPerGroup, - int groupOffset, - int docOffset) { + private TopGroups slowGrouping( + GroupDoc[] groupDocs, + String searchTerm, + boolean getMaxScores, + boolean doAllGroups, + Sort groupSort, + Sort docSort, + int topNGroups, + int docsPerGroup, + int groupOffset, + int docOffset) { final Comparator groupSortComp = getComparator(groupSort); Arrays.sort(groupDocs, groupSortComp); - final HashMap> groups = new HashMap<>(); + final HashMap> groups = new HashMap<>(); final List sortedGroups = new ArrayList<>(); final List[]> sortedGroupFields = new ArrayList<>(); int totalHitCount = 0; Set knownGroups = new HashSet<>(); - //System.out.println("TEST: slowGrouping"); - for(GroupDoc d : groupDocs) { + // System.out.println("TEST: slowGrouping"); + for (GroupDoc d : groupDocs) { // TODO: would be better to filter by searchTerm before sorting! if (!d.content.startsWith(searchTerm)) { continue; } totalHitCount++; - //System.out.println(" match id=" + d.id + " score=" + d.score); + // System.out.println(" match id=" + d.id + " score=" + d.score); if (doAllGroups) { if (!knownGroups.contains(d.group)) { knownGroups.add(d.group); - //System.out.println(" add group=" + groupToString(d.group)); + // System.out.println(" add group=" + groupToString(d.group)); } } List l = groups.get(d.group); if (l == null) { - //System.out.println(" add sortedGroup=" + groupToString(d.group)); + // System.out.println(" add sortedGroup=" + groupToString(d.group)); sortedGroups.add(d.group); sortedGroupFields.add(fillFields(d, groupSort)); l = new ArrayList<>(); @@ -492,10 +542,10 @@ public class TestGrouping extends LuceneTestCase { final int limit = Math.min(groupOffset + topNGroups, groups.size()); final Comparator docSortComp = getComparator(docSort); - @SuppressWarnings({"unchecked","rawtypes"}) - final GroupDocs[] result = new GroupDocs[limit-groupOffset]; + @SuppressWarnings({"unchecked", "rawtypes"}) + final GroupDocs[] result = new GroupDocs[limit - groupOffset]; int totalGroupedHitCount = 0; - for(int idx=groupOffset;idx < limit;idx++) { + for (int idx = groupOffset; idx < limit; idx++) { final BytesRef group = sortedGroups.get(idx); final List docs = groups.get(group); totalGroupedHitCount += docs.size(); @@ -504,41 +554,55 @@ public class TestGrouping extends LuceneTestCase { if (docs.size() > docOffset) { final int docIDXLimit = Math.min(docOffset + docsPerGroup, docs.size()); hits = new ScoreDoc[docIDXLimit - docOffset]; - for(int docIDX=docOffset; docIDX < docIDXLimit; docIDX++) { + for (int docIDX = docOffset; docIDX < docIDXLimit; docIDX++) { final GroupDoc d = docs.get(docIDX); final FieldDoc fd; fd = new FieldDoc(d.id, Float.NaN, fillFields(d, docSort)); - hits[docIDX-docOffset] = fd; + hits[docIDX - docOffset] = fd; } - } else { + } else { hits = new ScoreDoc[0]; } - result[idx-groupOffset] = new GroupDocs<>(Float.NaN, - 0.0f, - new TotalHits(docs.size(), TotalHits.Relation.EQUAL_TO), - hits, - group, - sortedGroupFields.get(idx)); + result[idx - groupOffset] = + new GroupDocs<>( + Float.NaN, + 0.0f, + new TotalHits(docs.size(), TotalHits.Relation.EQUAL_TO), + hits, + group, + sortedGroupFields.get(idx)); } if (doAllGroups) { return new TopGroups<>( - new TopGroups<>(groupSort.getSort(), docSort.getSort(), totalHitCount, totalGroupedHitCount, result, Float.NaN), - knownGroups.size() - ); + new TopGroups<>( + groupSort.getSort(), + docSort.getSort(), + totalHitCount, + totalGroupedHitCount, + result, + Float.NaN), + knownGroups.size()); } else { - return new TopGroups<>(groupSort.getSort(), docSort.getSort(), totalHitCount, totalGroupedHitCount, result, Float.NaN); + return new TopGroups<>( + groupSort.getSort(), + docSort.getSort(), + totalHitCount, + totalGroupedHitCount, + result, + Float.NaN); } } - private DirectoryReader getDocBlockReader(Directory dir, GroupDoc[] groupDocs) throws IOException { + private DirectoryReader getDocBlockReader(Directory dir, GroupDoc[] groupDocs) + throws IOException { // Coalesce by group, but in random order: Collections.shuffle(Arrays.asList(groupDocs), random()); - final Map> groupMap = new HashMap<>(); + final Map> groupMap = new HashMap<>(); final List groupValues = new ArrayList<>(); - for(GroupDoc groupDoc : groupDocs) { + for (GroupDoc groupDoc : groupDocs) { if (!groupMap.containsKey(groupDoc.group)) { groupValues.add(groupDoc.group); groupMap.put(groupDoc.group, new ArrayList()); @@ -546,10 +610,8 @@ public class TestGrouping extends LuceneTestCase { groupMap.get(groupDoc.group).add(groupDoc); } - RandomIndexWriter w = new RandomIndexWriter( - random(), - dir, - newIndexWriterConfig(new MockAnalyzer(random()))); + RandomIndexWriter w = + new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random()))); final List> updateDocs = new ArrayList<>(); @@ -557,11 +619,11 @@ public class TestGrouping extends LuceneTestCase { groupEndType.setIndexOptions(IndexOptions.DOCS); groupEndType.setOmitNorms(true); - //System.out.println("TEST: index groups"); - for(BytesRef group : groupValues) { + // System.out.println("TEST: index groups"); + for (BytesRef group : groupValues) { final List docs = new ArrayList<>(); - //System.out.println("TEST: group=" + (group == null ? "null" : group.utf8ToString())); - for(GroupDoc groupValue : groupMap.get(group)) { + // System.out.println("TEST: group=" + (group == null ? "null" : group.utf8ToString())); + for (GroupDoc groupValue : groupMap.get(group)) { Document doc = new Document(); docs.add(doc); if (groupValue.group != null) { @@ -574,11 +636,13 @@ public class TestGrouping extends LuceneTestCase { doc.add(new SortedDocValuesField("sort2", BytesRef.deepCopyOf(groupValue.sort2))); doc.add(new NumericDocValuesField("id", groupValue.id)); doc.add(newTextField("content", groupValue.content, Field.Store.NO)); - //System.out.println("TEST: doc content=" + groupValue.content + " group=" + (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + groupValue.sort1.utf8ToString() + " id=" + groupValue.id); + // System.out.println("TEST: doc content=" + groupValue.content + " group=" + + // (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + + // groupValue.sort1.utf8ToString() + " id=" + groupValue.id); } // So we can pull filter marking last doc in block: final Field groupEnd = newField("groupend", "x", groupEndType); - docs.get(docs.size()-1).add(groupEnd); + docs.get(docs.size() - 1).add(groupEnd); // Add as a doc block: w.addDocuments(docs); if (group != null && random().nextInt(7) == 4) { @@ -586,7 +650,7 @@ public class TestGrouping extends LuceneTestCase { } } - for(List docs : updateDocs) { + for (List docs : updateDocs) { // Just replaces docs w/ same docs: w.updateDocuments(new Term("group", docs.get(0).get("group")), docs); } @@ -606,27 +670,27 @@ public class TestGrouping extends LuceneTestCase { final IndexReaderContext ctx = s.getTopReaderContext(); final List leaves = ctx.leaves(); subSearchers = new ShardSearcher[leaves.size()]; - for(int searcherIDX=0;searcherIDX= 0.0); } - + // Build 2nd index, where docs are added in blocks by // group, so we can use single pass collector dirBlocks = newDirectory(); rBlocks = getDocBlockReader(dirBlocks, groupDocs); final Query lastDocInBlock = new TermQuery(new Term("groupend", "x")); - + final IndexSearcher sBlocks = newSearcher(rBlocks); // This test relies on the fact that longer fields produce lower scores sBlocks.setSimilarity(new BM25Similarity()); final ShardState shardsBlocks = new ShardState(sBlocks); - + // ReaderBlocks only increases maxDoc() vs reader, which // means a monotonic shift in scores, so we can // reliably remap them w/ Map: - final Map> scoreMap = new HashMap<>(); + final Map> scoreMap = new HashMap<>(); values = MultiDocValues.getNumericValues(rBlocks, "id"); assertNotNull(values); int[] docIDToIDBlocks = new int[rBlocks.maxDoc()]; - for(int i=0;i termScoreMap = new HashMap<>(); - scoreMap.put("real"+contentID, termScoreMap); - //System.out.println("term=real" + contentID + " dfold=" + s.docFreq(new Term("content", "real"+contentID)) + - //" dfnew=" + sBlocks.docFreq(new Term("content", "real"+contentID))); - final ScoreDoc[] hits = sBlocks.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs; - for(ScoreDoc hit : hits) { + // System.out.println("fixup score2"); + for (int contentID = 0; contentID < 3; contentID++) { + // System.out.println(" term=real" + contentID); + final Map termScoreMap = new HashMap<>(); + scoreMap.put("real" + contentID, termScoreMap); + // System.out.println("term=real" + contentID + " dfold=" + s.docFreq(new Term("content", + // "real"+contentID)) + + // " dfnew=" + sBlocks.docFreq(new Term("content", "real"+contentID))); + final ScoreDoc[] hits = + sBlocks.search(new TermQuery(new Term("content", "real" + contentID)), numDocs) + .scoreDocs; + for (ScoreDoc hit : hits) { final GroupDoc gd = groupDocsByID[docIDToIDBlocks[hit.doc]]; assertTrue(gd.score2 == 0.0); gd.score2 = hit.score; assertEquals(gd.id, docIDToIDBlocks[hit.doc]); - //System.out.println(" score=" + gd.score + " score2=" + hit.score + " id=" + docIDToIDBlocks[hit.doc]); + // System.out.println(" score=" + gd.score + " score2=" + hit.score + " id=" + + // docIDToIDBlocks[hit.doc]); termScoreMap.put(gd.score, gd.score2); } } - - for(int searchIter=0;searchIter<100;searchIter++) { - + + for (int searchIter = 0; searchIter < 100; searchIter++) { + if (VERBOSE) { System.out.println("\nTEST: searchIter=" + searchIter); } - + final String searchTerm = "real" + random().nextInt(3); final boolean getMaxScores = random().nextBoolean(); final Sort groupSort = getRandomSort(); - //final Sort groupSort = new Sort(new SortField[] {new SortField("sort1", SortField.STRING), new SortField("id", SortField.INT)}); + // final Sort groupSort = new Sort(new SortField[] {new SortField("sort1", + // SortField.STRING), new SortField("id", SortField.INT)}); final Sort docSort = getRandomSort(); - + final int topNGroups = TestUtil.nextInt(random(), 1, 30); - //final int topNGroups = 10; + // final int topNGroups = 10; final int docsPerGroup = TestUtil.nextInt(random(), 1, 50); - + final int groupOffset = TestUtil.nextInt(random(), 0, (topNGroups - 1) / 2); - //final int groupOffset = 0; - + // final int groupOffset = 0; + final int docOffset = TestUtil.nextInt(random(), 0, docsPerGroup - 1); - //final int docOffset = 0; + // final int docOffset = 0; final boolean doCache = random().nextBoolean(); final boolean doAllGroups = random().nextBoolean(); if (VERBOSE) { - System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " dF=" + r.docFreq(new Term("content", searchTerm)) +" dFBlock=" + rBlocks.docFreq(new Term("content", searchTerm)) + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups + " getMaxScores=" + getMaxScores); + System.out.println( + "TEST: groupSort=" + + groupSort + + " docSort=" + + docSort + + " searchTerm=" + + searchTerm + + " dF=" + + r.docFreq(new Term("content", searchTerm)) + + " dFBlock=" + + rBlocks.docFreq(new Term("content", searchTerm)) + + " topNGroups=" + + topNGroups + + " groupOffset=" + + groupOffset + + " docOffset=" + + docOffset + + " doCache=" + + doCache + + " docsPerGroup=" + + docsPerGroup + + " doAllGroups=" + + doAllGroups + + " getMaxScores=" + + getMaxScores); } - + String groupField = "group"; if (VERBOSE) { System.out.println(" groupField=" + groupField); } - final FirstPassGroupingCollector c1 = createRandomFirstPassCollector(groupField, groupSort, groupOffset+topNGroups); + final FirstPassGroupingCollector c1 = + createRandomFirstPassCollector(groupField, groupSort, groupOffset + topNGroups); final CachingCollector cCache; final Collector c; - + final AllGroupsCollector allGroupsCollector; if (doAllGroups) { allGroupsCollector = createAllGroupsCollector(c1, groupField); } else { allGroupsCollector = null; } - + final boolean useWrappingCollector = random().nextBoolean(); - + if (doCache) { final double maxCacheMB = random().nextDouble(); if (VERBOSE) { System.out.println("TEST: maxCacheMB=" + maxCacheMB); } - + if (useWrappingCollector) { if (doAllGroups) { cCache = CachingCollector.create(c1, true, maxCacheMB); @@ -892,12 +998,12 @@ public class TestGrouping extends LuceneTestCase { c = c1; } } - + // Search top reader: final Query query = new TermQuery(new Term("content", searchTerm)); - + s.search(query, c); - + if (doCache && !useWrappingCollector) { if (cCache.isCached()) { // Replay for first-pass grouping @@ -914,7 +1020,7 @@ public class TestGrouping extends LuceneTestCase { } } } - + // Get 1st pass top groups final Collection> topGroups = getSearchGroups(c1, groupOffset); final TopGroups groupsResult; @@ -924,26 +1030,50 @@ public class TestGrouping extends LuceneTestCase { System.out.println(" null"); } else { for (SearchGroup searchGroup : topGroups) { - System.out.println(" " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue) + ": " + Arrays.deepToString(searchGroup.sortValues)); + System.out.println( + " " + + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue) + + ": " + + Arrays.deepToString(searchGroup.sortValues)); } } } - + // Get 1st pass top groups using shards - - final TopGroups topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort, - groupOffset, topNGroups, docOffset, docsPerGroup, getMaxScores, true, true); + + final TopGroups topGroupsShards = + searchShards( + s, + shards.subSearchers, + query, + groupSort, + docSort, + groupOffset, + topNGroups, + docOffset, + docsPerGroup, + getMaxScores, + true, + true); final TopGroupsCollector c2; if (topGroups != null) { - + if (VERBOSE) { System.out.println("TEST: topGroups"); for (SearchGroup searchGroup : topGroups) { - System.out.println(" " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue.utf8ToString()) + ": " + Arrays.deepToString(searchGroup.sortValues)); + System.out.println( + " " + + (searchGroup.groupValue == null + ? "null" + : searchGroup.groupValue.utf8ToString()) + + ": " + + Arrays.deepToString(searchGroup.sortValues)); } } - - c2 = createSecondPassCollector(c1, groupSort, docSort, groupOffset, docOffset + docsPerGroup, getMaxScores); + + c2 = + createSecondPassCollector( + c1, groupSort, docSort, groupOffset, docOffset + docsPerGroup, getMaxScores); if (doCache) { if (cCache.isCached()) { if (VERBOSE) { @@ -959,7 +1089,7 @@ public class TestGrouping extends LuceneTestCase { } else { s.search(query, c2); } - + if (doAllGroups) { TopGroups tempTopGroups = getTopGroups(c2, docOffset); groupsResult = new TopGroups<>(tempTopGroups, allGroupsCollector.getGroupCount()); @@ -973,63 +1103,99 @@ public class TestGrouping extends LuceneTestCase { System.out.println("TEST: no results"); } } - - final TopGroups expectedGroups = slowGrouping(groupDocs, searchTerm, getMaxScores, doAllGroups, groupSort, docSort, topNGroups, docsPerGroup, groupOffset, docOffset); - + + final TopGroups expectedGroups = + slowGrouping( + groupDocs, + searchTerm, + getMaxScores, + doAllGroups, + groupSort, + docSort, + topNGroups, + docsPerGroup, + groupOffset, + docOffset); + if (VERBOSE) { if (expectedGroups == null) { System.out.println("TEST: no expected groups"); } else { - System.out.println("TEST: expected groups totalGroupedHitCount=" + expectedGroups.totalGroupedHitCount); - for(GroupDocs gd : expectedGroups.groups) { - System.out.println(" group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits.value + " scoreDocs.len=" + gd.scoreDocs.length); - for(ScoreDoc sd : gd.scoreDocs) { + System.out.println( + "TEST: expected groups totalGroupedHitCount=" + + expectedGroups.totalGroupedHitCount); + for (GroupDocs gd : expectedGroups.groups) { + System.out.println( + " group=" + + (gd.groupValue == null ? "null" : gd.groupValue) + + " totalHits=" + + gd.totalHits.value + + " scoreDocs.len=" + + gd.scoreDocs.length); + for (ScoreDoc sd : gd.scoreDocs) { System.out.println(" id=" + sd.doc + " score=" + sd.score); } } } - + if (groupsResult == null) { System.out.println("TEST: no matched groups"); } else { - System.out.println("TEST: matched groups totalGroupedHitCount=" + groupsResult.totalGroupedHitCount); - for(GroupDocs gd : groupsResult.groups) { - System.out.println(" group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits.value); - for(ScoreDoc sd : gd.scoreDocs) { + System.out.println( + "TEST: matched groups totalGroupedHitCount=" + groupsResult.totalGroupedHitCount); + for (GroupDocs gd : groupsResult.groups) { + System.out.println( + " group=" + + (gd.groupValue == null ? "null" : gd.groupValue) + + " totalHits=" + + gd.totalHits.value); + for (ScoreDoc sd : gd.scoreDocs) { System.out.println(" id=" + docIDToID[sd.doc] + " score=" + sd.score); } } - + if (searchIter == 14) { - for(int docIDX=0;docIDX tempTopGroupsBlocks = (TopGroups) c3.getTopGroups(docSort, groupOffset, docOffset, docOffset+docsPerGroup); + @SuppressWarnings({"unchecked", "rawtypes"}) + final TopGroups tempTopGroupsBlocks = + (TopGroups) + c3.getTopGroups(docSort, groupOffset, docOffset, docOffset + docsPerGroup); final TopGroups groupsResultBlocks; if (doAllGroups && tempTopGroupsBlocks != null) { - assertEquals((int) tempTopGroupsBlocks.totalGroupCount, allGroupsCollector2.getGroupCount()); - groupsResultBlocks = new TopGroups<>(tempTopGroupsBlocks, allGroupsCollector2.getGroupCount()); + assertEquals( + (int) tempTopGroupsBlocks.totalGroupCount, allGroupsCollector2.getGroupCount()); + groupsResultBlocks = + new TopGroups<>(tempTopGroupsBlocks, allGroupsCollector2.getGroupCount()); } else { groupsResultBlocks = tempTopGroupsBlocks; } - + if (VERBOSE) { if (groupsResultBlocks == null) { System.out.println("TEST: no block groups"); } else { - System.out.println("TEST: block groups totalGroupedHitCount=" + groupsResultBlocks.totalGroupedHitCount); + System.out.println( + "TEST: block groups totalGroupedHitCount=" + + groupsResultBlocks.totalGroupedHitCount); boolean first = true; - for(GroupDocs gd : groupsResultBlocks.groups) { - System.out.println(" group=" + (gd.groupValue == null ? "null" : gd.groupValue.utf8ToString()) + " totalHits=" + gd.totalHits.value); - for(ScoreDoc sd : gd.scoreDocs) { + for (GroupDocs gd : groupsResultBlocks.groups) { + System.out.println( + " group=" + + (gd.groupValue == null ? "null" : gd.groupValue.utf8ToString()) + + " totalHits=" + + gd.totalHits.value); + for (ScoreDoc sd : gd.scoreDocs) { System.out.println(" id=" + docIDToIDBlocks[sd.doc] + " score=" + sd.score); if (first) { System.out.println("explain: " + sBlocks.explain(query, sd.doc)); @@ -1072,41 +1248,56 @@ public class TestGrouping extends LuceneTestCase { } } } - + // Get shard'd block grouping result: - final TopGroups topGroupsBlockShards = searchShards(sBlocks, shardsBlocks.subSearchers, query, - groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getMaxScores, false, false); - + final TopGroups topGroupsBlockShards = + searchShards( + sBlocks, + shardsBlocks.subSearchers, + query, + groupSort, + docSort, + groupOffset, + topNGroups, + docOffset, + docsPerGroup, + getMaxScores, + false, + false); + if (expectedGroups != null) { // Fixup scores for reader2 for (GroupDocs groupDocsHits : expectedGroups.groups) { - for(ScoreDoc hit : groupDocsHits.scoreDocs) { + for (ScoreDoc hit : groupDocsHits.scoreDocs) { final GroupDoc gd = groupDocsByID[hit.doc]; assertEquals(gd.id, hit.doc); - //System.out.println("fixup score " + hit.score + " to " + gd.score2 + " vs " + gd.score); + // System.out.println("fixup score " + hit.score + " to " + gd.score2 + " vs " + + // gd.score); hit.score = gd.score2; } } - + final SortField[] sortFields = groupSort.getSort(); - final Map termScoreMap = scoreMap.get(searchTerm); - for(int groupSortIDX=0;groupSortIDX termScoreMap = scoreMap.get(searchTerm); + for (int groupSortIDX = 0; groupSortIDX < sortFields.length; groupSortIDX++) { if (sortFields[groupSortIDX].getType() == SortField.Type.SCORE) { for (GroupDocs groupDocsHits : expectedGroups.groups) { if (groupDocsHits.groupSortValues != null) { - //System.out.println("remap " + groupDocsHits.groupSortValues[groupSortIDX] + " to " + termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX])); - groupDocsHits.groupSortValues[groupSortIDX] = termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]); + // System.out.println("remap " + groupDocsHits.groupSortValues[groupSortIDX] + " + // to " + termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX])); + groupDocsHits.groupSortValues[groupSortIDX] = + termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]); assertNotNull(groupDocsHits.groupSortValues[groupSortIDX]); } } } } - + final SortField[] docSortFields = docSort.getSort(); - for(int docSortIDX=0;docSortIDX groupDocsHits : expectedGroups.groups) { - for(ScoreDoc _hit : groupDocsHits.scoreDocs) { + for (ScoreDoc _hit : groupDocsHits.scoreDocs) { FieldDoc hit = (FieldDoc) _hit; if (hit.fields != null) { hit.fields[docSortIDX] = termScoreMap.get(hit.fields[docSortIDX]); @@ -1117,40 +1308,66 @@ public class TestGrouping extends LuceneTestCase { } } } - + assertEquals(docIDToIDBlocks, expectedGroups, groupsResultBlocks, false, true, false); assertEquals(docIDToIDBlocks, expectedGroups, topGroupsBlockShards, false, false, false); } - + r.close(); dir.close(); - + rBlocks.close(); dirBlocks.close(); } } private void verifyShards(int[] docStarts, TopGroups topGroups) { - for(GroupDocs group : topGroups.groups) { - for(int hitIDX=0;hitIDX group : topGroups.groups) { + for (int hitIDX = 0; hitIDX < group.scoreDocs.length; hitIDX++) { final ScoreDoc sd = group.scoreDocs[hitIDX]; - assertEquals("doc=" + sd.doc + " wrong shard", - ReaderUtil.subIndex(sd.doc, docStarts), - sd.shardIndex); + assertEquals( + "doc=" + sd.doc + " wrong shard", + ReaderUtil.subIndex(sd.doc, docStarts), + sd.shardIndex); } } } - private TopGroups searchShards(IndexSearcher topSearcher, ShardSearcher[] subSearchers, Query query, Sort groupSort, Sort docSort, int groupOffset, int topNGroups, int docOffset, - int topNDocs, boolean getMaxScores, boolean canUseIDV, boolean preFlex) throws Exception { + private TopGroups searchShards( + IndexSearcher topSearcher, + ShardSearcher[] subSearchers, + Query query, + Sort groupSort, + Sort docSort, + int groupOffset, + int topNGroups, + int docOffset, + int topNDocs, + boolean getMaxScores, + boolean canUseIDV, + boolean preFlex) + throws Exception { - // TODO: swap in caching, all groups collector hereassertEquals(expected.totalHitCount, actual.totalHitCount); + // TODO: swap in caching, all groups collector hereassertEquals(expected.totalHitCount, + // actual.totalHitCount); // too... if (VERBOSE) { - System.out.println("TEST: " + subSearchers.length + " shards: " + Arrays.toString(subSearchers) + " canUseIDV=" + canUseIDV); + System.out.println( + "TEST: " + + subSearchers.length + + " shards: " + + Arrays.toString(subSearchers) + + " canUseIDV=" + + canUseIDV); } // Run 1st pass collector to get top groups per shard - final Weight w = topSearcher.createWeight(topSearcher.rewrite(query), groupSort.needsScores() || docSort.needsScores() || getMaxScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES, 1); + final Weight w = + topSearcher.createWeight( + topSearcher.rewrite(query), + groupSort.needsScores() || docSort.needsScores() || getMaxScores + ? ScoreMode.COMPLETE + : ScoreMode.COMPLETE_NO_SCORES, + 1); final List>> shardGroups = new ArrayList<>(); List> firstPassGroupingCollectors = new ArrayList<>(); FirstPassGroupingCollector firstPassCollector = null; @@ -1158,14 +1375,17 @@ public class TestGrouping extends LuceneTestCase { String groupField = "group"; - for(int shardIDX=0;shardIDX group : topGroups) { - System.out.println(" " + groupToString(group.groupValue) + " groupSort=" + Arrays.toString(group.sortValues)); + System.out.println( + " shard " + + shardIDX + + " s=" + + subSearchers[shardIDX] + + " totalGroupedHitCount=?" + + " " + + topGroups.size() + + " groups:"); + for (SearchGroup group : topGroups) { + System.out.println( + " " + + groupToString(group.groupValue) + + " groupSort=" + + Arrays.toString(group.sortValues)); } } shardGroups.add(topGroups); } } - final Collection> mergedTopGroups = SearchGroup.merge(shardGroups, groupOffset, topNGroups, groupSort); + final Collection> mergedTopGroups = + SearchGroup.merge(shardGroups, groupOffset, topNGroups, groupSort); if (VERBOSE) { System.out.println(" top groups merged:"); if (mergedTopGroups == null) { System.out.println(" null"); } else { System.out.println(" " + mergedTopGroups.size() + " top groups:"); - for(SearchGroup group : mergedTopGroups) { - System.out.println(" [" + groupToString(group.groupValue) + "] groupSort=" + Arrays.toString(group.sortValues)); + for (SearchGroup group : mergedTopGroups) { + System.out.println( + " [" + + groupToString(group.groupValue) + + "] groupSort=" + + Arrays.toString(group.sortValues)); } } } if (mergedTopGroups != null) { // Now 2nd pass: - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) final TopGroups[] shardTopGroups = new TopGroups[subSearchers.length]; - for(int shardIDX=0;shardIDX secondPassCollector = createSecondPassCollector(firstPassGroupingCollectors.get(shardIDX), - groupField, mergedTopGroups, groupSort, docSort, docOffset + topNDocs, getMaxScores); + for (int shardIDX = 0; shardIDX < subSearchers.length; shardIDX++) { + final TopGroupsCollector secondPassCollector = + createSecondPassCollector( + firstPassGroupingCollectors.get(shardIDX), + groupField, + mergedTopGroups, + groupSort, + docSort, + docOffset + topNDocs, + getMaxScores); subSearchers[shardIDX].search(w, secondPassCollector); shardTopGroups[shardIDX] = getTopGroups(secondPassCollector, 0); if (VERBOSE) { - System.out.println(" " + shardTopGroups[shardIDX].groups.length + " shard[" + shardIDX + "] groups:"); - for(GroupDocs group : shardTopGroups[shardIDX].groups) { - System.out.println(" [" + groupToString(group.groupValue) + "] groupSort=" + Arrays.toString(group.groupSortValues) + " numDocs=" + group.scoreDocs.length); + System.out.println( + " " + shardTopGroups[shardIDX].groups.length + " shard[" + shardIDX + "] groups:"); + for (GroupDocs group : shardTopGroups[shardIDX].groups) { + System.out.println( + " [" + + groupToString(group.groupValue) + + "] groupSort=" + + Arrays.toString(group.groupSortValues) + + " numDocs=" + + group.scoreDocs.length); } } } - TopGroups mergedGroups = TopGroups.merge(shardTopGroups, groupSort, docSort, docOffset, topNDocs, TopGroups.ScoreMergeMode.None); + TopGroups mergedGroups = + TopGroups.merge( + shardTopGroups, + groupSort, + docSort, + docOffset, + topNDocs, + TopGroups.ScoreMergeMode.None); if (VERBOSE) { System.out.println(" " + mergedGroups.groups.length + " merged groups:"); - for(GroupDocs group : mergedGroups.groups) { - System.out.println(" [" + groupToString(group.groupValue) + "] groupSort=" + Arrays.toString(group.groupSortValues) + " numDocs=" + group.scoreDocs.length); + for (GroupDocs group : mergedGroups.groups) { + System.out.println( + " [" + + groupToString(group.groupValue) + + "] groupSort=" + + Arrays.toString(group.groupSortValues) + + " numDocs=" + + group.scoreDocs.length); } } return mergedGroups; @@ -1228,21 +1492,39 @@ public class TestGrouping extends LuceneTestCase { } } - private void assertEquals(int[] docIDtoID, TopGroups expected, TopGroups actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean idvBasedImplsUsed) { + private void assertEquals( + int[] docIDtoID, + TopGroups expected, + TopGroups actual, + boolean verifyGroupValues, + boolean verifyTotalGroupCount, + boolean idvBasedImplsUsed) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); - assertEquals("expected.groups.length != actual.groups.length", expected.groups.length, actual.groups.length); - assertEquals("expected.totalHitCount != actual.totalHitCount", expected.totalHitCount, actual.totalHitCount); - assertEquals("expected.totalGroupedHitCount != actual.totalGroupedHitCount", expected.totalGroupedHitCount, actual.totalGroupedHitCount); + assertEquals( + "expected.groups.length != actual.groups.length", + expected.groups.length, + actual.groups.length); + assertEquals( + "expected.totalHitCount != actual.totalHitCount", + expected.totalHitCount, + actual.totalHitCount); + assertEquals( + "expected.totalGroupedHitCount != actual.totalGroupedHitCount", + expected.totalGroupedHitCount, + actual.totalGroupedHitCount); if (expected.totalGroupCount != null && verifyTotalGroupCount) { - assertEquals("expected.totalGroupCount != actual.totalGroupCount", expected.totalGroupCount, actual.totalGroupCount); + assertEquals( + "expected.totalGroupCount != actual.totalGroupCount", + expected.totalGroupCount, + actual.totalGroupCount); } - for(int groupIDX=0;groupIDX groups = + groupingSearch.search(indexSearcher, new TermQuery(new Term("content", "random")), 0, 10); assertEquals(7, groups.totalHitCount); assertEquals(7, groups.totalGroupedHitCount); @@ -160,13 +161,14 @@ public class TestGroupingSearch extends LuceneTestCase { Query lastDocInBlock = new TermQuery(new Term("groupend", "x")); groupingSearch = new GroupingSearch(lastDocInBlock); - groups = groupingSearch.search(indexSearcher, new TermQuery(new Term("content", "random")), 0, 10); + groups = + groupingSearch.search(indexSearcher, new TermQuery(new Term("content", "random")), 0, 10); assertEquals(7, groups.totalHitCount); assertEquals(7, groups.totalGroupedHitCount); assertEquals(4, groups.totalGroupCount.longValue()); assertEquals(4, groups.groups.length); - + indexSearcher.getIndexReader().close(); dir.close(); } @@ -201,7 +203,8 @@ public class TestGroupingSearch extends LuceneTestCase { } } - private GroupingSearch createRandomGroupingSearch(String groupField, Sort groupSort, int docsInGroup, boolean canUseIDV) { + private GroupingSearch createRandomGroupingSearch( + String groupField, Sort groupSort, int docsInGroup, boolean canUseIDV) { GroupingSearch groupingSearch; if (random().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); @@ -222,10 +225,11 @@ public class TestGroupingSearch extends LuceneTestCase { public void testSetAllGroups() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter( - random(), - dir, - newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter w = + new RandomIndexWriter( + random(), + dir, + newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); doc.add(newField("group", "foo", StringField.TYPE_NOT_STORED)); doc.add(new SortedDocValuesField("group", new BytesRef("foo"))); @@ -238,7 +242,7 @@ public class TestGroupingSearch extends LuceneTestCase { gs.setAllGroups(true); TopGroups groups = gs.search(indexSearcher, new TermQuery(new Term("group", "foo")), 0, 10); assertEquals(1, groups.totalHitCount); - //assertEquals(1, groups.totalGroupCount.intValue()); + // assertEquals(1, groups.totalGroupCount.intValue()); assertEquals(1, groups.totalGroupedHitCount); assertEquals(1, gs.getAllMatchingGroups().size()); indexSearcher.getIndexReader().close(); diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestLongRangeFactory.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestLongRangeFactory.java index 0677631476e..70c1d94c039 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestLongRangeFactory.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestLongRangeFactory.java @@ -33,7 +33,5 @@ public class TestLongRangeFactory extends LuceneTestCase { assertEquals(new LongRange(30, 40), factory.getRange(35, scratch)); assertEquals(new LongRange(50, Long.MAX_VALUE), factory.getRange(50, scratch)); assertEquals(new LongRange(50, Long.MAX_VALUE), factory.getRange(500, scratch)); - } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestLongRangeGroupSelector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestLongRangeGroupSelector.java index 7384a5653f7..083bb5cf9d6 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestLongRangeGroupSelector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestLongRangeGroupSelector.java @@ -42,8 +42,8 @@ public class TestLongRangeGroupSelector extends BaseGroupSelectorTestCase getGroupSelector() { - return new LongRangeGroupSelector(LongValuesSource.fromLongField("long"), - new LongRangeFactory(100, 100, 900)); + return new LongRangeGroupSelector( + LongValuesSource.fromLongField("long"), new LongRangeFactory(100, 100, 900)); } @Override diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestTermGroupSelector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestTermGroupSelector.java index e55fd641daf..9249d45ed01 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestTermGroupSelector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestTermGroupSelector.java @@ -35,7 +35,7 @@ public class TestTermGroupSelector extends BaseGroupSelectorTestCase { @Override protected void addGroupField(Document document, int id) { if (rarely()) { - return; // missing value + return; // missing value } String groupValue = "group" + random().nextInt(10); document.add(new SortedDocValuesField("groupField", new BytesRef(groupValue))); diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestTopGroups.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestTopGroups.java index 6ba5ba160ab..c547525398e 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestTopGroups.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestTopGroups.java @@ -104,56 +104,87 @@ public class TestTopGroups extends LuceneTestCase { final TopGroups shard1TopGroups; { - final GroupDocs group1 = haveBlueWhale - ? createSingletonGroupDocs(blueGroupValue, new Object[] { blueWhaleSize }, 1 /* docId */, blueWhaleScore, 0 /* shardIndex */) - : createEmptyGroupDocs(blueGroupValue, new Object[] { blueWhaleSize }); + final GroupDocs group1 = + haveBlueWhale + ? createSingletonGroupDocs( + blueGroupValue, + new Object[] {blueWhaleSize}, + 1 /* docId */, + blueWhaleScore, + 0 /* shardIndex */) + : createEmptyGroupDocs(blueGroupValue, new Object[] {blueWhaleSize}); - final GroupDocs group2 = haveRedAnt - ? createSingletonGroupDocs(redGroupValue, new Object[] { redAntSize }, 2 /* docId */, redAntScore, 0 /* shardIndex */) - : createEmptyGroupDocs(redGroupValue, new Object[] { redAntSize }); + final GroupDocs group2 = + haveRedAnt + ? createSingletonGroupDocs( + redGroupValue, + new Object[] {redAntSize}, + 2 /* docId */, + redAntScore, + 0 /* shardIndex */) + : createEmptyGroupDocs(redGroupValue, new Object[] {redAntSize}); - shard1TopGroups = new TopGroups( - sort.getSort() /* groupSort */, - sort.getSort() /* withinGroupSort */, - group1.scoreDocs.length + group2.scoreDocs.length /* totalHitCount */, - group1.scoreDocs.length + group2.scoreDocs.length /* totalGroupedHitCount */, - combineGroupDocs(group1, group2) /* groups */, - (haveBlueWhale ? blueWhaleScore : (haveRedAnt ? redAntScore : Float.NaN)) /* maxScore */); + shard1TopGroups = + new TopGroups( + sort.getSort() /* groupSort */, + sort.getSort() /* withinGroupSort */, + group1.scoreDocs.length + group2.scoreDocs.length /* totalHitCount */, + group1.scoreDocs.length + group2.scoreDocs.length /* totalGroupedHitCount */, + combineGroupDocs(group1, group2) /* groups */, + (haveBlueWhale + ? blueWhaleScore + : (haveRedAnt ? redAntScore : Float.NaN)) /* maxScore */); } final TopGroups shard2TopGroups; { - final GroupDocs group1 = haveBlueDragonfly - ? createSingletonGroupDocs(blueGroupValue, new Object[] { blueDragonflySize }, 3 /* docId */, blueDragonflyScore, 1 /* shardIndex */) - : createEmptyGroupDocs(blueGroupValue, new Object[] { blueDragonflySize }); + final GroupDocs group1 = + haveBlueDragonfly + ? createSingletonGroupDocs( + blueGroupValue, + new Object[] {blueDragonflySize}, + 3 /* docId */, + blueDragonflyScore, + 1 /* shardIndex */) + : createEmptyGroupDocs(blueGroupValue, new Object[] {blueDragonflySize}); - final GroupDocs group2 = haveRedSquirrel - ? createSingletonGroupDocs(redGroupValue, new Object[] { redSquirrelSize }, 4 /* docId */, redSquirrelScore, 1 /* shardIndex */) - : createEmptyGroupDocs(redGroupValue, new Object[] { redSquirrelSize }); + final GroupDocs group2 = + haveRedSquirrel + ? createSingletonGroupDocs( + redGroupValue, + new Object[] {redSquirrelSize}, + 4 /* docId */, + redSquirrelScore, + 1 /* shardIndex */) + : createEmptyGroupDocs(redGroupValue, new Object[] {redSquirrelSize}); - shard2TopGroups = new TopGroups( - sort.getSort() /* groupSort */, - sort.getSort() /* withinGroupSort */, - group1.scoreDocs.length + group2.scoreDocs.length /* totalHitCount */, - group1.scoreDocs.length + group2.scoreDocs.length /* totalGroupedHitCount */, - combineGroupDocs(group1, group2) /* groups */, - (haveRedSquirrel ? redSquirrelScore : (haveBlueDragonfly ? blueDragonflyScore : Float.NaN)) /* maxScore */); + shard2TopGroups = + new TopGroups( + sort.getSort() /* groupSort */, + sort.getSort() /* withinGroupSort */, + group1.scoreDocs.length + group2.scoreDocs.length /* totalHitCount */, + group1.scoreDocs.length + group2.scoreDocs.length /* totalGroupedHitCount */, + combineGroupDocs(group1, group2) /* groups */, + (haveRedSquirrel + ? redSquirrelScore + : (haveBlueDragonfly ? blueDragonflyScore : Float.NaN)) /* maxScore */); } - final TopGroups mergedTopGroups = TopGroups.merge( - combineTopGroups(shard1TopGroups, shard2TopGroups), - sort /* groupSort */, - sort /* docSort */, - 0 /* docOffset */, - 2 /* docTopN */, - TopGroups.ScoreMergeMode.None); + final TopGroups mergedTopGroups = + TopGroups.merge( + combineTopGroups(shard1TopGroups, shard2TopGroups), + sort /* groupSort */, + sort /* docSort */, + 0 /* docOffset */, + 2 /* docTopN */, + TopGroups.ScoreMergeMode.None); assertNotNull(mergedTopGroups); final int expectedCount = - (haveBlueWhale ? 1 : 0) + - (haveRedAnt ? 1 : 0) + - (haveBlueDragonfly ? 1 : 0) + - (haveRedSquirrel ? 1 : 0); + (haveBlueWhale ? 1 : 0) + + (haveRedAnt ? 1 : 0) + + (haveBlueDragonfly ? 1 : 0) + + (haveRedSquirrel ? 1 : 0); assertEquals(expectedCount, mergedTopGroups.totalHitCount); assertEquals(expectedCount, mergedTopGroups.totalGroupedHitCount); @@ -173,11 +204,13 @@ public class TestTopGroups extends LuceneTestCase { } final float expectedMaxScore = - (haveBlueWhale ? blueWhaleScore - : (haveRedSquirrel ? redSquirrelScore - : (haveBlueDragonfly ? blueDragonflyScore - : (haveRedAnt ? redAntScore - : Float.NaN)))); + (haveBlueWhale + ? blueWhaleScore + : (haveRedSquirrel + ? redSquirrelScore + : (haveBlueDragonfly + ? blueDragonflyScore + : (haveRedAnt ? redAntScore : Float.NaN)))); checkMaxScore(expectedMaxScore, mergedTopGroups.maxScore); } @@ -191,41 +224,43 @@ public class TestTopGroups extends LuceneTestCase { // helper methods - private static GroupDocs createEmptyGroupDocs(String groupValue, Object[] groupSortValues) { - return new GroupDocs( + private static GroupDocs createEmptyGroupDocs( + String groupValue, Object[] groupSortValues) { + return new GroupDocs( Float.NaN /* score */, Float.NaN /* maxScore */, new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0], groupValue, groupSortValues); - } + } - private static GroupDocs createSingletonGroupDocs(String groupValue, Object[] groupSortValues, - int docId, float docScore, int shardIndex) { - return new GroupDocs( + private static GroupDocs createSingletonGroupDocs( + String groupValue, Object[] groupSortValues, int docId, float docScore, int shardIndex) { + return new GroupDocs( Float.NaN /* score */, docScore /* maxScore */, new TotalHits(1, TotalHits.Relation.EQUAL_TO), - new ScoreDoc[] { new ScoreDoc(docId, docScore, shardIndex) }, + new ScoreDoc[] {new ScoreDoc(docId, docScore, shardIndex)}, groupValue, groupSortValues); - } + } - private static GroupDocs[] combineGroupDocs(GroupDocs group0, GroupDocs group1) { - @SuppressWarnings({"unchecked","rawtypes"}) + private static GroupDocs[] combineGroupDocs( + GroupDocs group0, GroupDocs group1) { + @SuppressWarnings({"unchecked", "rawtypes"}) final GroupDocs[] groups = new GroupDocs[2]; groups[0] = group0; groups[1] = group1; return groups; } - private static TopGroups[] combineTopGroups(TopGroups group0, TopGroups group1) { - @SuppressWarnings({"unchecked","rawtypes"}) + private static TopGroups[] combineTopGroups( + TopGroups group0, TopGroups group1) { + @SuppressWarnings({"unchecked", "rawtypes"}) final TopGroups[] groups = new TopGroups[2]; groups[0] = group0; groups[1] = group1; return groups; } - } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestValueSourceGroupSelector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestValueSourceGroupSelector.java index 9e652b643b4..0c621694c3e 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestValueSourceGroupSelector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestValueSourceGroupSelector.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.grouping; import java.util.HashMap; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedDocValuesField; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/AbstractHandler.java b/lucene/luke/src/java/org/apache/lucene/luke/app/AbstractHandler.java index bca88f18632..5abd9d931fd 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/AbstractHandler.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/AbstractHandler.java @@ -20,7 +20,6 @@ package org.apache.lucene.luke.app; import java.lang.invoke.MethodHandles; import java.util.ArrayList; import java.util.List; - import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.util.LoggerFactory; @@ -45,5 +44,4 @@ public abstract class AbstractHandler { } protected abstract void notifyOne(T observer); - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/DirectoryHandler.java b/lucene/luke/src/java/org/apache/lucene/luke/app/DirectoryHandler.java index ec4e7e5d23a..3d2de05c60f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/DirectoryHandler.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/DirectoryHandler.java @@ -19,7 +19,6 @@ package org.apache.lucene.luke.app; import java.io.IOException; import java.util.Objects; - import org.apache.lucene.luke.app.desktop.util.MessageUtils; import org.apache.lucene.luke.models.LukeException; import org.apache.lucene.luke.models.util.IndexUtils; @@ -60,7 +59,8 @@ public final class DirectoryHandler extends AbstractHandler { try { dir = IndexUtils.openDirectory(indexPath, dirImpl); } catch (IOException e) { - throw new LukeException(MessageUtils.getLocalizedMessage("openindex.message.index_path_invalid", indexPath), e); + throw new LukeException( + MessageUtils.getLocalizedMessage("openindex.message.index_path_invalid", indexPath), e); } state = new LukeStateImpl(); @@ -108,5 +108,4 @@ public final class DirectoryHandler extends AbstractHandler { return dir; } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/DirectoryObserver.java b/lucene/luke/src/java/org/apache/lucene/luke/app/DirectoryObserver.java index 64371150f87..83afae0d538 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/DirectoryObserver.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/DirectoryObserver.java @@ -23,5 +23,4 @@ public interface DirectoryObserver extends Observer { void openDirectory(LukeState state); void closeDirectory(); - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/IndexHandler.java b/lucene/luke/src/java/org/apache/lucene/luke/app/IndexHandler.java index 885c34a2e8b..f3fc635872b 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/IndexHandler.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/IndexHandler.java @@ -19,7 +19,6 @@ package org.apache.lucene.luke.app; import java.lang.invoke.MethodHandles; import java.util.Objects; - import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexReader; import org.apache.lucene.luke.app.desktop.util.MessageUtils; @@ -57,7 +56,12 @@ public final class IndexHandler extends AbstractHandler { open(indexPath, dirImpl, false, false, false); } - public void open(String indexPath, String dirImpl, boolean readOnly, boolean useCompound, boolean keepAllCommits) { + public void open( + String indexPath, + String dirImpl, + boolean readOnly, + boolean useCompound, + boolean keepAllCommits) { Objects.requireNonNull(indexPath); if (indexOpened()) { @@ -69,7 +73,8 @@ public final class IndexHandler extends AbstractHandler { reader = IndexUtils.openIndex(indexPath, dirImpl); } catch (Exception e) { log.error("Error opening index", e); - throw new LukeException(MessageUtils.getLocalizedMessage("openindex.message.index_path_invalid", indexPath), e); + throw new LukeException( + MessageUtils.getLocalizedMessage("openindex.message.index_path_invalid", indexPath), e); } state = new LukeStateImpl(); @@ -96,7 +101,12 @@ public final class IndexHandler extends AbstractHandler { public void reOpen() { close(); - open(state.getIndexPath(), state.getDirImpl(), state.readOnly(), state.useCompound(), state.keepAllCommits()); + open( + state.getIndexPath(), + state.getDirImpl(), + state.readOnly(), + state.useCompound(), + state.keepAllCommits()); } public LukeState getState() { diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/IndexObserver.java b/lucene/luke/src/java/org/apache/lucene/luke/app/IndexObserver.java index 599b1090c4d..21265aeb41f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/IndexObserver.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/IndexObserver.java @@ -23,5 +23,4 @@ public interface IndexObserver extends Observer { void openIndex(LukeState state); void closeIndex(); - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/LukeState.java b/lucene/luke/src/java/org/apache/lucene/luke/app/LukeState.java index 33ca829bca5..492efd31944 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/LukeState.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/LukeState.java @@ -21,9 +21,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.store.Directory; -/** - * Holder for current index/directory. - */ +/** Holder for current index/directory. */ public interface LukeState { String getIndexPath(); @@ -53,5 +51,4 @@ public interface LukeState { default boolean hasDirectoryReader() { return getIndexReader() instanceof DirectoryReader; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/Observer.java b/lucene/luke/src/java/org/apache/lucene/luke/app/Observer.java index 290865b8986..16d35a7d53f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/Observer.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/Observer.java @@ -18,5 +18,4 @@ package org.apache.lucene.luke.app; /** Marker interface for observers */ -public interface Observer { -} +public interface Observer {} diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/LukeMain.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/LukeMain.java index fae52f29abd..78c52544091 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/LukeMain.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/LukeMain.java @@ -17,13 +17,14 @@ package org.apache.lucene.luke.app.desktop; -import javax.swing.JFrame; -import javax.swing.UIManager; +import static org.apache.lucene.luke.app.desktop.util.ExceptionHandler.handle; + import java.awt.GraphicsEnvironment; import java.io.IOException; import java.lang.invoke.MethodHandles; import java.nio.file.FileSystems; - +import javax.swing.JFrame; +import javax.swing.UIManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.app.desktop.components.LukeWindowProvider; import org.apache.lucene.luke.app.desktop.components.dialog.menubar.OpenIndexDialogFactory; @@ -32,20 +33,22 @@ import org.apache.lucene.luke.app.desktop.util.FontUtils; import org.apache.lucene.luke.app.desktop.util.MessageUtils; import org.apache.lucene.luke.util.LoggerFactory; -import static org.apache.lucene.luke.app.desktop.util.ExceptionHandler.handle; - /** Entry class for desktop Luke */ public class LukeMain { - public static final String LOG_FILE = System.getProperty("user.home") + - FileSystems.getDefault().getSeparator() + ".luke.d" + - FileSystems.getDefault().getSeparator() + "luke.log"; + public static final String LOG_FILE = + System.getProperty("user.home") + + FileSystems.getDefault().getSeparator() + + ".luke.d" + + FileSystems.getDefault().getSeparator() + + "luke.log"; static { LoggerFactory.initGuiLogging(LOG_FILE); } + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - + private static JFrame frame; public static JFrame getOwnerFrame() { @@ -55,9 +58,7 @@ public class LukeMain { private static void createAndShowGUI() { // uncaught error handler MessageBroker messageBroker = MessageBroker.getInstance(); - Thread.setDefaultUncaughtExceptionHandler((thread, cause) -> - handle(cause, messageBroker) - ); + Thread.setDefaultUncaughtExceptionHandler((thread, cause) -> handle(cause, messageBroker)); try { frame = new LukeWindowProvider().get(); @@ -68,9 +69,12 @@ public class LukeMain { // show open index dialog OpenIndexDialogFactory openIndexDialogFactory = OpenIndexDialogFactory.getInstance(); - new DialogOpener<>(openIndexDialogFactory).open(MessageUtils.getLocalizedMessage("openindex.dialog.title"), 600, 420, - (factory) -> { - }); + new DialogOpener<>(openIndexDialogFactory) + .open( + MessageUtils.getLocalizedMessage("openindex.dialog.title"), + 600, + 420, + (factory) -> {}); } catch (IOException e) { messageBroker.showUnknownErrorMessage(); log.error("Cannot initialize components.", e); @@ -79,7 +83,8 @@ public class LukeMain { public static void main(String[] args) throws Exception { String lookAndFeelClassName = UIManager.getSystemLookAndFeelClassName(); - if (!lookAndFeelClassName.contains("AquaLookAndFeel") && !lookAndFeelClassName.contains("PlasticXPLookAndFeel")) { + if (!lookAndFeelClassName.contains("AquaLookAndFeel") + && !lookAndFeelClassName.contains("PlasticXPLookAndFeel")) { // may be running on linux platform lookAndFeelClassName = "javax.swing.plaf.metal.MetalLookAndFeel"; } @@ -89,6 +94,5 @@ public class LukeMain { genv.registerFont(FontUtils.createElegantIconFont()); javax.swing.SwingUtilities.invokeLater(LukeMain::createAndShowGUI); - } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/MessageBroker.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/MessageBroker.java index 9609a2f56ef..56c791bbc1c 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/MessageBroker.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/MessageBroker.java @@ -61,5 +61,4 @@ public class MessageBroker { void clearStatusMessage(); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/Preferences.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/Preferences.java index b0df6607403..982e4d858d5 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/Preferences.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/Preferences.java @@ -38,7 +38,13 @@ public interface Preferences { boolean isKeepAllCommits(); - void setIndexOpenerPrefs(boolean readOnly, String dirImpl, boolean noReader, boolean useCompound, boolean keepAllCommits) throws IOException; + void setIndexOpenerPrefs( + boolean readOnly, + String dirImpl, + boolean noReader, + boolean useCompound, + boolean keepAllCommits) + throws IOException; ColorTheme getColorTheme(); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/PreferencesFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/PreferencesFactory.java index 2502553297f..b043e8f1043 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/PreferencesFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/PreferencesFactory.java @@ -24,11 +24,10 @@ public class PreferencesFactory { private static Preferences prefs; - public synchronized static Preferences getInstance() throws IOException { + public static synchronized Preferences getInstance() throws IOException { if (prefs == null) { prefs = new PreferencesImpl(); } return prefs; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/PreferencesImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/PreferencesImpl.java index ebf78c5a57b..47d63313195 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/PreferencesImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/PreferencesImpl.java @@ -23,7 +23,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.luke.app.desktop.util.inifile.IniFile; import org.apache.lucene.luke.app.desktop.util.inifile.SimpleIniFile; import org.apache.lucene.store.FSDirectory; @@ -31,14 +30,14 @@ import org.apache.lucene.store.FSDirectory; /** Default implementation of {@link Preferences} */ public final class PreferencesImpl implements Preferences { - private static final String CONFIG_DIR = System.getProperty("user.home") + FileSystems.getDefault().getSeparator() + ".luke.d"; + private static final String CONFIG_DIR = + System.getProperty("user.home") + FileSystems.getDefault().getSeparator() + ".luke.d"; private static final String INIT_FILE = "luke.ini"; private static final String HISTORY_FILE = "history"; private static final int MAX_HISTORY = 10; private final IniFile ini = new SimpleIniFile(); - private final List history = new ArrayList<>(); public PreferencesImpl() throws IOException { @@ -61,7 +60,6 @@ public final class PreferencesImpl implements Preferences { List allHistory = Files.readAllLines(histFile); history.addAll(allHistory.subList(0, Math.min(MAX_HISTORY, allHistory.size()))); } - } public List getHistory() { @@ -128,7 +126,13 @@ public final class PreferencesImpl implements Preferences { } @Override - public void setIndexOpenerPrefs(boolean readOnly, String dirImpl, boolean noReader, boolean useCompound, boolean keepAllCommits) throws IOException { + public void setIndexOpenerPrefs( + boolean readOnly, + String dirImpl, + boolean noReader, + boolean useCompound, + boolean keepAllCommits) + throws IOException { ini.put("opener", "readOnly", readOnly); ini.put("opener", "dirImpl", dirImpl); ini.put("opener", "noReader", noReader); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisPanelProvider.java index 864dfcd57ff..b0314378585 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisPanelProvider.java @@ -17,16 +17,6 @@ package org.apache.lucene.luke.app.desktop.components; -import javax.swing.BorderFactory; -import javax.swing.ButtonGroup; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JRadioButton; -import javax.swing.JScrollPane; -import javax.swing.JSplitPane; -import javax.swing.JTextArea; import java.awt.BorderLayout; import java.awt.Color; import java.awt.FlowLayout; @@ -39,7 +29,16 @@ import java.io.IOException; import java.util.Objects; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - +import javax.swing.BorderFactory; +import javax.swing.ButtonGroup; +import javax.swing.JButton; +import javax.swing.JCheckBox; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JRadioButton; +import javax.swing.JScrollPane; +import javax.swing.JSplitPane; +import javax.swing.JTextArea; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.custom.CustomAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; @@ -126,16 +125,21 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { operatorRegistry.register(AnalysisTabOperator.class, this); - operatorRegistry.get(PresetAnalyzerPanelOperator.class).ifPresent(operator -> { - // Scanning all Analyzer types will take time... - ExecutorService executorService = - Executors.newFixedThreadPool(1, new NamedThreadFactory("load-preset-analyzer-types")); - executorService.execute(() -> { - operator.setPresetAnalyzers(analysisModel.getPresetAnalyzerTypes()); - operator.setSelectedAnalyzer(analysisModel.currentAnalyzer().getClass()); - }); - executorService.shutdown(); - }); + operatorRegistry + .get(PresetAnalyzerPanelOperator.class) + .ifPresent( + operator -> { + // Scanning all Analyzer types will take time... + ExecutorService executorService = + Executors.newFixedThreadPool( + 1, new NamedThreadFactory("load-preset-analyzer-types")); + executorService.execute( + () -> { + operator.setPresetAnalyzers(analysisModel.getPresetAnalyzerTypes()); + operator.setSelectedAnalyzer(analysisModel.currentAnalyzer().getClass()); + }); + executorService.shutdown(); + }); } public JPanel get() { @@ -143,7 +147,8 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { panel.setOpaque(false); panel.setBorder(BorderFactory.createLineBorder(Color.gray)); - JSplitPane splitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); + JSplitPane splitPane = + new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); splitPane.setOpaque(false); splitPane.setDividerLocation(320); panel.add(splitPane); @@ -194,16 +199,18 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { JPanel analyzerName = new JPanel(new FlowLayout(FlowLayout.LEADING, 10, 2)); analyzerName.setOpaque(false); - analyzerName.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.label.selected_analyzer"))); + analyzerName.add( + new JLabel(MessageUtils.getLocalizedMessage("analysis.label.selected_analyzer"))); analyzerNameLbl.setText(analysisModel.currentAnalyzer().getClass().getName()); analyzerName.add(analyzerNameLbl); showChainLbl.setText(MessageUtils.getLocalizedMessage("analysis.label.show_chain")); - showChainLbl.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - listeners.showAnalysisChain(e); - } - }); + showChainLbl.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + listeners.showAnalysisChain(e); + } + }); showChainLbl.setVisible(analysisModel.currentAnalyzer() instanceof CustomAnalyzer); analyzerName.add(FontUtils.toLinkText(showChainLbl)); inner1.add(analyzerName, BorderLayout.PAGE_START); @@ -217,8 +224,10 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { inputArea.setText(MessageUtils.getLocalizedMessage("analysis.textarea.prompt")); input.add(new JScrollPane(inputArea)); - JButton executeBtn = new JButton(FontUtils.elegantIconHtml("", - MessageUtils.getLocalizedMessage("analysis.button.test"))); + JButton executeBtn = + new JButton( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("analysis.button.test"))); executeBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); executeBtn.setMargin(new Insets(3, 3, 3, 3)); executeBtn.addActionListener(listeners::executeAnalysis); @@ -233,13 +242,16 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { JButton clearBtn = new JButton(MessageUtils.getLocalizedMessage("button.clear")); clearBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); clearBtn.setMargin(new Insets(5, 5, 5, 5)); - clearBtn.addActionListener(e -> { - inputArea.setText(""); - operatorRegistry.get(SimpleAnalyzeResultPanelOperator.class).ifPresent( - SimpleAnalyzeResultPanelOperator::clearTable); - operatorRegistry.get(StepByStepAnalyzeResultPanelOperator.class).ifPresent( - StepByStepAnalyzeResultPanelOperator::clearTable); - }); + clearBtn.addActionListener( + e -> { + inputArea.setText(""); + operatorRegistry + .get(SimpleAnalyzeResultPanelOperator.class) + .ifPresent(SimpleAnalyzeResultPanelOperator::clearTable); + operatorRegistry + .get(StepByStepAnalyzeResultPanelOperator.class) + .ifPresent(StepByStepAnalyzeResultPanelOperator::clearTable); + }); input.add(clearBtn); inner1.add(input, BorderLayout.CENTER); @@ -259,20 +271,26 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { mainPanel.remove(custom); mainPanel.add(preset, BorderLayout.CENTER); - operatorRegistry.get(PresetAnalyzerPanelOperator.class).ifPresent(operator -> { - operator.setPresetAnalyzers(analysisModel.getPresetAnalyzerTypes()); - operator.setSelectedAnalyzer(analysisModel.currentAnalyzer().getClass()); - }); + operatorRegistry + .get(PresetAnalyzerPanelOperator.class) + .ifPresent( + operator -> { + operator.setPresetAnalyzers(analysisModel.getPresetAnalyzerTypes()); + operator.setSelectedAnalyzer(analysisModel.currentAnalyzer().getClass()); + }); stepByStepCB.setSelected(false); stepByStepCB.setVisible(false); } else if (command.equalsIgnoreCase(TYPE_CUSTOM)) { mainPanel.remove(preset); mainPanel.add(custom, BorderLayout.CENTER); - operatorRegistry.get(CustomAnalyzerPanelOperator.class).ifPresent(operator -> { - operator.setAnalysisModel(analysisModel); - operator.resetAnalysisComponents(); - }); + operatorRegistry + .get(CustomAnalyzerPanelOperator.class) + .ifPresent( + operator -> { + operator.setAnalysisModel(analysisModel); + operator.resetAnalysisComponents(); + }); stepByStepCB.setVisible(true); } mainPanel.setVisible(false); @@ -282,16 +300,20 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { void executeAnalysis() { String text = inputArea.getText(); if (Objects.isNull(text) || text.isEmpty()) { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("analysis.message.empry_input")); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("analysis.message.empry_input")); } lowerPanel.remove(stepByStepResult); lowerPanel.add(simpleResult, BorderLayout.CENTER); - operatorRegistry.get(SimpleAnalyzeResultPanelOperator.class).ifPresent(operator -> { - operator.setAnalysisModel(analysisModel); - operator.executeAnalysis(text); - }); + operatorRegistry + .get(SimpleAnalyzeResultPanelOperator.class) + .ifPresent( + operator -> { + operator.setAnalysisModel(analysisModel); + operator.executeAnalysis(text); + }); lowerPanel.setVisible(false); lowerPanel.setVisible(true); @@ -300,14 +322,18 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { void executeAnalysisStepByStep() { String text = inputArea.getText(); if (Objects.isNull(text) || text.isEmpty()) { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("analysis.message.empry_input")); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("analysis.message.empry_input")); } lowerPanel.remove(simpleResult); lowerPanel.add(stepByStepResult, BorderLayout.CENTER); - operatorRegistry.get(StepByStepAnalyzeResultPanelOperator.class).ifPresent(operator -> { - operator.setAnalysisModel(analysisModel); - operator.executeAnalysisStepByStep(text); - }); + operatorRegistry + .get(StepByStepAnalyzeResultPanelOperator.class) + .ifPresent( + operator -> { + operator.setAnalysisModel(analysisModel); + operator.executeAnalysisStepByStep(text); + }); lowerPanel.setVisible(false); lowerPanel.setVisible(true); @@ -316,10 +342,14 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { void showAnalysisChainDialog() { if (getCurrentAnalyzer() instanceof CustomAnalyzer) { CustomAnalyzer analyzer = (CustomAnalyzer) getCurrentAnalyzer(); - new DialogOpener<>(analysisChainDialogFactory).open("Analysis chain", 600, 320, - (factory) -> { - factory.setAnalyzer(analyzer); - }); + new DialogOpener<>(analysisChainDialogFactory) + .open( + "Analysis chain", + 600, + 320, + (factory) -> { + factory.setAnalyzer(analyzer); + }); } } @@ -328,12 +358,15 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { analysisModel.createAnalyzerFromClassName(analyzerType); analyzerNameLbl.setText(analysisModel.currentAnalyzer().getClass().getName()); showChainLbl.setVisible(false); - operatorRegistry.get(AnalyzerTabOperator.class).ifPresent(operator -> - operator.setAnalyzer(analysisModel.currentAnalyzer())); - operatorRegistry.get(MLTTabOperator.class).ifPresent(operator -> - operator.setAnalyzer(analysisModel.currentAnalyzer())); - operatorRegistry.get(AddDocumentDialogOperator.class).ifPresent(operator -> - operator.setAnalyzer(analysisModel.currentAnalyzer())); + operatorRegistry + .get(AnalyzerTabOperator.class) + .ifPresent(operator -> operator.setAnalyzer(analysisModel.currentAnalyzer())); + operatorRegistry + .get(MLTTabOperator.class) + .ifPresent(operator -> operator.setAnalyzer(analysisModel.currentAnalyzer())); + operatorRegistry + .get(AddDocumentDialogOperator.class) + .ifPresent(operator -> operator.setAnalyzer(analysisModel.currentAnalyzer())); } @Override @@ -341,12 +374,15 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { analysisModel.buildCustomAnalyzer(config); analyzerNameLbl.setText(analysisModel.currentAnalyzer().getClass().getName()); showChainLbl.setVisible(true); - operatorRegistry.get(AnalyzerTabOperator.class).ifPresent(operator -> - operator.setAnalyzer(analysisModel.currentAnalyzer())); - operatorRegistry.get(MLTTabOperator.class).ifPresent(operator -> - operator.setAnalyzer(analysisModel.currentAnalyzer())); - operatorRegistry.get(AddDocumentDialogOperator.class).ifPresent(operator -> - operator.setAnalyzer(analysisModel.currentAnalyzer())); + operatorRegistry + .get(AnalyzerTabOperator.class) + .ifPresent(operator -> operator.setAnalyzer(analysisModel.currentAnalyzer())); + operatorRegistry + .get(MLTTabOperator.class) + .ifPresent(operator -> operator.setAnalyzer(analysisModel.currentAnalyzer())); + operatorRegistry + .get(AddDocumentDialogOperator.class) + .ifPresent(operator -> operator.setAnalyzer(analysisModel.currentAnalyzer())); } @Override @@ -372,9 +408,6 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator { } } - void executeAnalysisStepByStep(ActionEvent e) { - } + void executeAnalysisStepByStep(ActionEvent e) {} } - } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisTabOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisTabOperator.java index 555f1c0245c..1ad7d5cf57a 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisTabOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisTabOperator.java @@ -28,6 +28,4 @@ public interface AnalysisTabOperator extends ComponentOperatorRegistry.Component void setAnalyzerByCustomConfiguration(CustomAnalyzerConfig config); Analyzer getCurrentAnalyzer(); - } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/CommitsPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/CommitsPanelProvider.java index d06abcc0789..8d55855377c 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/CommitsPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/CommitsPanelProvider.java @@ -17,6 +17,18 @@ package org.apache.lucene.luke.app.desktop.components; +import java.awt.BorderLayout; +import java.awt.Color; +import java.awt.FlowLayout; +import java.awt.GridBagConstraints; +import java.awt.GridBagLayout; +import java.awt.GridLayout; +import java.awt.event.ActionEvent; +import java.awt.event.MouseAdapter; +import java.awt.event.MouseEvent; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import javax.swing.BorderFactory; import javax.swing.BoxLayout; import javax.swing.ButtonGroup; @@ -32,19 +44,6 @@ import javax.swing.JSplitPane; import javax.swing.JTable; import javax.swing.JTextArea; import javax.swing.ListSelectionModel; -import java.awt.BorderLayout; -import java.awt.Color; -import java.awt.FlowLayout; -import java.awt.GridBagConstraints; -import java.awt.GridBagLayout; -import java.awt.GridLayout; -import java.awt.event.ActionEvent; -import java.awt.event.MouseAdapter; -import java.awt.event.MouseEvent; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.luke.app.DirectoryHandler; import org.apache.lucene.luke.app.DirectoryObserver; @@ -100,7 +99,8 @@ public final class CommitsPanelProvider { panel.setOpaque(false); panel.setBorder(BorderFactory.createLineBorder(Color.gray)); - JSplitPane splitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); + JSplitPane splitPane = + new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); splitPane.setOpaque(false); splitPane.setBorder(BorderFactory.createEmptyBorder()); splitPane.setDividerLocation(120); @@ -162,7 +162,11 @@ public final class CommitsPanelProvider { userDataTA.setLineWrap(true); userDataTA.setWrapStyleWord(true); userDataTA.setEditable(false); - JScrollPane userDataScroll = new JScrollPane(userDataTA, JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, JScrollPane.HORIZONTAL_SCROLLBAR_NEVER); + JScrollPane userDataScroll = + new JScrollPane( + userDataTA, + JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, + JScrollPane.HORIZONTAL_SCROLLBAR_NEVER); c1.gridx = 1; c1.gridy = 2; c1.weightx = 0.5; @@ -179,7 +183,8 @@ public final class CommitsPanelProvider { panel.setOpaque(false); panel.setBorder(BorderFactory.createEmptyBorder(3, 3, 3, 3)); - JSplitPane splitPane = new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, initFilesPanel(), initSegmentsPanel()); + JSplitPane splitPane = + new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, initFilesPanel(), initSegmentsPanel()); splitPane.setOpaque(false); splitPane.setBorder(BorderFactory.createEmptyBorder()); splitPane.setDividerLocation(300); @@ -197,7 +202,12 @@ public final class CommitsPanelProvider { header.add(new JLabel(MessageUtils.getLocalizedMessage("commits.label.files"))); panel.add(header, BorderLayout.PAGE_START); - TableUtils.setupTable(filesTable, ListSelectionModel.SINGLE_SELECTION, new FilesTableModel(), null, FilesTableModel.Column.FILENAME.getColumnWidth()); + TableUtils.setupTable( + filesTable, + ListSelectionModel.SINGLE_SELECTION, + new FilesTableModel(), + null, + FilesTableModel.Column.FILENAME.getColumnWidth()); panel.add(new JScrollPane(filesTable), BorderLayout.CENTER); return panel; @@ -213,7 +223,10 @@ public final class CommitsPanelProvider { segments.add(new JLabel(MessageUtils.getLocalizedMessage("commits.label.segments"))); panel.add(segments); - TableUtils.setupTable(segmentsTable, ListSelectionModel.SINGLE_SELECTION, new SegmentsTableModel(), + TableUtils.setupTable( + segmentsTable, + ListSelectionModel.SINGLE_SELECTION, + new SegmentsTableModel(), new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { @@ -241,12 +254,13 @@ public final class CommitsPanelProvider { diagRB.setSelected(true); diagRB.setEnabled(false); diagRB.setOpaque(false); - diagRB.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - listeners.showSegmentDetails(e); - } - }); + diagRB.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + listeners.showSegmentDetails(e); + } + }); buttons.add(diagRB); attrRB.setText("Attributes"); @@ -254,12 +268,13 @@ public final class CommitsPanelProvider { attrRB.setSelected(false); attrRB.setEnabled(false); attrRB.setOpaque(false); - attrRB.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - listeners.showSegmentDetails(e); - } - }); + attrRB.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + listeners.showSegmentDetails(e); + } + }); buttons.add(attrRB); codecRB.setText("Codec"); @@ -267,12 +282,13 @@ public final class CommitsPanelProvider { codecRB.setSelected(false); codecRB.setEnabled(false); codecRB.setOpaque(false); - codecRB.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - listeners.showSegmentDetails(e); - } - }); + codecRB.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + listeners.showSegmentDetails(e); + } + }); buttons.add(codecRB); rbGroup.add(diagRB); @@ -296,30 +312,55 @@ public final class CommitsPanelProvider { segDetailList.setModel(new DefaultListModel<>()); long commitGen = (long) commitGenCombo.getSelectedItem(); - commitsModel.getCommit(commitGen).ifPresent(commit -> { - deletedLbl.setText(String.valueOf(commit.isDeleted())); - segCntLbl.setText(String.valueOf(commit.getSegCount())); - userDataTA.setText(commit.getUserData()); - }); + commitsModel + .getCommit(commitGen) + .ifPresent( + commit -> { + deletedLbl.setText(String.valueOf(commit.isDeleted())); + segCntLbl.setText(String.valueOf(commit.getSegCount())); + userDataTA.setText(commit.getUserData()); + }); filesTable.setModel(new FilesTableModel(commitsModel.getFiles(commitGen))); filesTable.setShowGrid(true); - filesTable.getColumnModel().getColumn(FilesTableModel.Column.FILENAME.getIndex()).setPreferredWidth(FilesTableModel.Column.FILENAME.getColumnWidth()); + filesTable + .getColumnModel() + .getColumn(FilesTableModel.Column.FILENAME.getIndex()) + .setPreferredWidth(FilesTableModel.Column.FILENAME.getColumnWidth()); segmentsTable.setModel(new SegmentsTableModel(commitsModel.getSegments(commitGen))); segmentsTable.setShowGrid(true); - segmentsTable.getColumnModel().getColumn(SegmentsTableModel.Column.NAME.getIndex()).setPreferredWidth(SegmentsTableModel.Column.NAME.getColumnWidth()); - segmentsTable.getColumnModel().getColumn(SegmentsTableModel.Column.MAXDOCS.getIndex()).setPreferredWidth(SegmentsTableModel.Column.MAXDOCS.getColumnWidth()); - segmentsTable.getColumnModel().getColumn(SegmentsTableModel.Column.DELS.getIndex()).setPreferredWidth(SegmentsTableModel.Column.DELS.getColumnWidth()); - segmentsTable.getColumnModel().getColumn(SegmentsTableModel.Column.DELGEN.getIndex()).setPreferredWidth(SegmentsTableModel.Column.DELGEN.getColumnWidth()); - segmentsTable.getColumnModel().getColumn(SegmentsTableModel.Column.VERSION.getIndex()).setPreferredWidth(SegmentsTableModel.Column.VERSION.getColumnWidth()); - segmentsTable.getColumnModel().getColumn(SegmentsTableModel.Column.CODEC.getIndex()).setPreferredWidth(SegmentsTableModel.Column.CODEC.getColumnWidth()); + segmentsTable + .getColumnModel() + .getColumn(SegmentsTableModel.Column.NAME.getIndex()) + .setPreferredWidth(SegmentsTableModel.Column.NAME.getColumnWidth()); + segmentsTable + .getColumnModel() + .getColumn(SegmentsTableModel.Column.MAXDOCS.getIndex()) + .setPreferredWidth(SegmentsTableModel.Column.MAXDOCS.getColumnWidth()); + segmentsTable + .getColumnModel() + .getColumn(SegmentsTableModel.Column.DELS.getIndex()) + .setPreferredWidth(SegmentsTableModel.Column.DELS.getColumnWidth()); + segmentsTable + .getColumnModel() + .getColumn(SegmentsTableModel.Column.DELGEN.getIndex()) + .setPreferredWidth(SegmentsTableModel.Column.DELGEN.getColumnWidth()); + segmentsTable + .getColumnModel() + .getColumn(SegmentsTableModel.Column.VERSION.getIndex()) + .setPreferredWidth(SegmentsTableModel.Column.VERSION.getColumnWidth()); + segmentsTable + .getColumnModel() + .getColumn(SegmentsTableModel.Column.CODEC.getIndex()) + .setPreferredWidth(SegmentsTableModel.Column.CODEC.getColumnWidth()); } private void showSegmentDetails() { int selectedRow = segmentsTable.getSelectedRow(); - if (commitGenCombo.getSelectedItem() == null || - selectedRow < 0 || selectedRow >= segmentsTable.getRowCount()) { + if (commitGenCombo.getSelectedItem() == null + || selectedRow < 0 + || selectedRow >= segmentsTable.getRowCount()) { return; } @@ -328,7 +369,8 @@ public final class CommitsPanelProvider { codecRB.setEnabled(true); long commitGen = (long) commitGenCombo.getSelectedItem(); - String segName = (String) segmentsTable.getValueAt(selectedRow, SegmentsTableModel.Column.NAME.getIndex()); + String segName = + (String) segmentsTable.getValueAt(selectedRow, SegmentsTableModel.Column.NAME.getIndex()); ActionCommand command = ActionCommand.valueOf(rbGroup.getSelection().getActionCommand()); final DefaultListModel detailsModel = new DefaultListModel<>(); @@ -344,27 +386,30 @@ public final class CommitsPanelProvider { .forEach(detailsModel::addElement); break; case CODEC: - commitsModel.getSegmentCodec(commitGen, segName).ifPresent(codec -> { - Map map = new HashMap<>(); - map.put("Codec name", codec.getName()); - map.put("Codec class name", codec.getClass().getName()); - map.put("Compound format", codec.compoundFormat().getClass().getName()); - map.put("DocValues format", codec.docValuesFormat().getClass().getName()); - map.put("FieldInfos format", codec.fieldInfosFormat().getClass().getName()); - map.put("LiveDocs format", codec.liveDocsFormat().getClass().getName()); - map.put("Norms format", codec.normsFormat().getClass().getName()); - map.put("Points format", codec.pointsFormat().getClass().getName()); - map.put("Postings format", codec.postingsFormat().getClass().getName()); - map.put("SegmentInfo format", codec.segmentInfoFormat().getClass().getName()); - map.put("StoredFields format", codec.storedFieldsFormat().getClass().getName()); - map.put("TermVectors format", codec.termVectorsFormat().getClass().getName()); - map.entrySet().stream() - .map(entry -> entry.getKey() + " = " + entry.getValue()).forEach(detailsModel::addElement); - }); + commitsModel + .getSegmentCodec(commitGen, segName) + .ifPresent( + codec -> { + Map map = new HashMap<>(); + map.put("Codec name", codec.getName()); + map.put("Codec class name", codec.getClass().getName()); + map.put("Compound format", codec.compoundFormat().getClass().getName()); + map.put("DocValues format", codec.docValuesFormat().getClass().getName()); + map.put("FieldInfos format", codec.fieldInfosFormat().getClass().getName()); + map.put("LiveDocs format", codec.liveDocsFormat().getClass().getName()); + map.put("Norms format", codec.normsFormat().getClass().getName()); + map.put("Points format", codec.pointsFormat().getClass().getName()); + map.put("Postings format", codec.postingsFormat().getClass().getName()); + map.put("SegmentInfo format", codec.segmentInfoFormat().getClass().getName()); + map.put("StoredFields format", codec.storedFieldsFormat().getClass().getName()); + map.put("TermVectors format", codec.termVectorsFormat().getClass().getName()); + map.entrySet().stream() + .map(entry -> entry.getKey() + " = " + entry.getValue()) + .forEach(detailsModel::addElement); + }); break; } segDetailList.setModel(detailsModel); - } private class ListenerFunctions { @@ -376,7 +421,6 @@ public final class CommitsPanelProvider { void showSegmentDetails(MouseEvent e) { CommitsPanelProvider.this.showSegmentDetails(); } - } private class Observer implements IndexObserver, DirectoryObserver { @@ -425,8 +469,17 @@ public final class CommitsPanelProvider { deletedLbl.setText(""); segCntLbl.setText(""); userDataTA.setText(""); - TableUtils.setupTable(filesTable, ListSelectionModel.SINGLE_SELECTION, new FilesTableModel(), null, FilesTableModel.Column.FILENAME.getColumnWidth()); - TableUtils.setupTable(segmentsTable, ListSelectionModel.SINGLE_SELECTION, new SegmentsTableModel(), null, + TableUtils.setupTable( + filesTable, + ListSelectionModel.SINGLE_SELECTION, + new FilesTableModel(), + null, + FilesTableModel.Column.FILENAME.getColumnWidth()); + TableUtils.setupTable( + segmentsTable, + ListSelectionModel.SINGLE_SELECTION, + new SegmentsTableModel(), + null, SegmentsTableModel.Column.NAME.getColumnWidth(), SegmentsTableModel.Column.MAXDOCS.getColumnWidth(), SegmentsTableModel.Column.DELS.getColumnWidth(), @@ -441,13 +494,14 @@ public final class CommitsPanelProvider { } enum ActionCommand { - DIAGNOSTICS, ATTRIBUTES, CODEC; + DIAGNOSTICS, + ATTRIBUTES, + CODEC; } static final class FilesTableModel extends TableModelBase { enum Column implements TableColumnInfo { - FILENAME("Filename", 0, String.class, 200), SIZE("Size", 1, String.class, Integer.MAX_VALUE); @@ -506,7 +560,6 @@ public final class CommitsPanelProvider { static final class SegmentsTableModel extends TableModelBase { enum Column implements TableColumnInfo { - NAME("Name", 0, String.class, 60), MAXDOCS("Max docs", 1, Integer.class, 60), DELS("Dels", 2, Integer.class, 60), @@ -572,4 +625,3 @@ public final class CommitsPanelProvider { } } } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/ComponentOperatorRegistry.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/ComponentOperatorRegistry.java index 0d9c99b0ec7..40eb7350120 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/ComponentOperatorRegistry.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/ComponentOperatorRegistry.java @@ -44,7 +44,5 @@ public class ComponentOperatorRegistry { } /** marker interface for operators */ - public interface ComponentOperator { - } - + public interface ComponentOperator {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/DocumentsPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/DocumentsPanelProvider.java index e9daece4db4..85ca168c379 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/DocumentsPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/DocumentsPanelProvider.java @@ -17,26 +17,6 @@ package org.apache.lucene.luke.app.desktop.components; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JComboBox; -import javax.swing.JComponent; -import javax.swing.JLabel; -import javax.swing.JList; -import javax.swing.JMenuItem; -import javax.swing.JPanel; -import javax.swing.JPopupMenu; -import javax.swing.JScrollPane; -import javax.swing.JSpinner; -import javax.swing.JSplitPane; -import javax.swing.JTable; -import javax.swing.JTextField; -import javax.swing.ListSelectionModel; -import javax.swing.SpinnerModel; -import javax.swing.SpinnerNumberModel; -import javax.swing.event.ChangeEvent; -import javax.swing.table.TableCellRenderer; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dimension; @@ -57,7 +37,26 @@ import java.math.BigInteger; import java.util.List; import java.util.Objects; import java.util.Optional; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JComboBox; +import javax.swing.JComponent; +import javax.swing.JLabel; +import javax.swing.JList; +import javax.swing.JMenuItem; +import javax.swing.JPanel; +import javax.swing.JPopupMenu; +import javax.swing.JScrollPane; +import javax.swing.JSpinner; +import javax.swing.JSplitPane; +import javax.swing.JTable; +import javax.swing.JTextField; +import javax.swing.ListSelectionModel; +import javax.swing.SpinnerModel; +import javax.swing.SpinnerNumberModel; +import javax.swing.event.ChangeEvent; +import javax.swing.table.TableCellRenderer; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; @@ -152,26 +151,30 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { this.dvDialogFactory = DocValuesDialogFactory.getInstance(); this.valueDialogFactory = StoredValueDialogFactory.getInstance(); HelpDialogFactory helpDialogFactory = HelpDialogFactory.getInstance(); - this.tableHeaderRenderer = new HelpHeaderRenderer( - "About Flags", "Format: IdfpoNPSB#txxVDtxxxxTx/x", - createFlagsHelpDialog(), helpDialogFactory); + this.tableHeaderRenderer = + new HelpHeaderRenderer( + "About Flags", + "Format: IdfpoNPSB#txxVDtxxxxTx/x", + createFlagsHelpDialog(), + helpDialogFactory); IndexHandler.getInstance().addObserver(new Observer()); operatorRegistry.register(DocumentsTabOperator.class, this); } private JComponent createFlagsHelpDialog() { - String[] values = new String[]{ - "I - index options(docs, frequencies, positions, offsets)", - "N - norms", - "P - payloads", - "S - stored", - "B - binary stored values", - "#txx - numeric stored values(type, precision)", - "V - term vectors", - "Dtxxxxx - doc values(type)", - "Tx/x - point values(num bytes/dimension)" - }; + String[] values = + new String[] { + "I - index options(docs, frequencies, positions, offsets)", + "N - norms", + "P - payloads", + "S - stored", + "B - binary stored values", + "#txx - numeric stored values(type, precision)", + "V - term vectors", + "Dtxxxxx - doc values(type)", + "Tx/x - point values(num bytes/dimension)" + }; JList list = new JList<>(values); return new JScrollPane(list); } @@ -181,7 +184,8 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { panel.setOpaque(false); panel.setBorder(BorderFactory.createLineBorder(Color.gray)); - JSplitPane splitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); + JSplitPane splitPane = + new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); splitPane.setOpaque(false); splitPane.setDividerLocation(0.4); panel.add(splitPane); @@ -238,7 +242,9 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { c.gridwidth = 2; center.add(fieldsCombo, c); - firstTermBtn.setText(FontUtils.elegantIconHtml("8", MessageUtils.getLocalizedMessage("documents.button.first_term"))); + firstTermBtn.setText( + FontUtils.elegantIconHtml( + "8", MessageUtils.getLocalizedMessage("documents.button.first_term"))); firstTermBtn.setMaximumSize(new Dimension(80, 30)); firstTermBtn.addActionListener(listeners::showFirstTerm); c.gridx = 0; @@ -272,7 +278,8 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { JPanel footer = new JPanel(new FlowLayout(FlowLayout.LEADING, 20, 5)); footer.setOpaque(false); - JLabel hintLbl = new JLabel(MessageUtils.getLocalizedMessage("documents.label.browse_terms_hint")); + JLabel hintLbl = + new JLabel(MessageUtils.getLocalizedMessage("documents.label.browse_terms_hint")); footer.add(hintLbl); panel.add(footer, BorderLayout.PAGE_END); @@ -289,7 +296,8 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { GridBagConstraints c = new GridBagConstraints(); c.fill = GridBagConstraints.BOTH; - JLabel label = new JLabel(MessageUtils.getLocalizedMessage("documents.label.browse_doc_by_term")); + JLabel label = + new JLabel(MessageUtils.getLocalizedMessage("documents.label.browse_doc_by_term")); c.gridx = 0; c.gridy = 0; c.weightx = 0.0; @@ -308,7 +316,9 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { c.insets = new Insets(5, 5, 5, 5); center.add(selectedTermTF, c); - firstTermDocBtn.setText(FontUtils.elegantIconHtml("8", MessageUtils.getLocalizedMessage("documents.button.first_termdoc"))); + firstTermDocBtn.setText( + FontUtils.elegantIconHtml( + "8", MessageUtils.getLocalizedMessage("documents.button.first_termdoc"))); firstTermDocBtn.addActionListener(listeners::showFirstTermDoc); c.gridx = 0; c.gridy = 2; @@ -343,8 +353,14 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { c.insets = new Insets(5, 5, 5, 5); center.add(termDocsNumLbl, c); - TableUtils.setupTable(posTable, ListSelectionModel.SINGLE_SELECTION, new PosTableModel(), null, - PosTableModel.Column.POSITION.getColumnWidth(), PosTableModel.Column.OFFSETS.getColumnWidth(), PosTableModel.Column.PAYLOAD.getColumnWidth()); + TableUtils.setupTable( + posTable, + ListSelectionModel.SINGLE_SELECTION, + new PosTableModel(), + null, + PosTableModel.Column.POSITION.getColumnWidth(), + PosTableModel.Column.OFFSETS.getColumnWidth(), + PosTableModel.Column.PAYLOAD.getColumnWidth()); JScrollPane scrollPane = new JScrollPane(posTable); scrollPane.setMinimumSize(new Dimension(100, 100)); c.gridx = 0; @@ -370,17 +386,23 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { JPanel browseDocsNote1 = new JPanel(new FlowLayout(FlowLayout.LEADING)); browseDocsNote1.setOpaque(false); - browseDocsNote1.add(new JLabel(MessageUtils.getLocalizedMessage("documents.label.doc_table_note1"))); + browseDocsNote1.add( + new JLabel(MessageUtils.getLocalizedMessage("documents.label.doc_table_note1"))); browseDocsPanel.add(browseDocsNote1); JPanel browseDocsNote2 = new JPanel(new FlowLayout(FlowLayout.LEADING)); browseDocsNote2.setOpaque(false); - browseDocsNote2.add(new JLabel(MessageUtils.getLocalizedMessage("documents.label.doc_table_note2"))); + browseDocsNote2.add( + new JLabel(MessageUtils.getLocalizedMessage("documents.label.doc_table_note2"))); browseDocsPanel.add(browseDocsNote2); panel.add(browseDocsPanel, BorderLayout.PAGE_START); - TableUtils.setupTable(documentTable, ListSelectionModel.MULTIPLE_INTERVAL_SELECTION, new DocumentsTableModel(), new MouseAdapter() { + TableUtils.setupTable( + documentTable, + ListSelectionModel.MULTIPLE_INTERVAL_SELECTION, + new DocumentsTableModel(), + new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { listeners.showDocumentContextMenu(e); @@ -394,7 +416,10 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { flagsHeader.setOpaque(false); flagsHeader.add(new JLabel("Flags")); flagsHeader.add(new JLabel("Help")); - documentTable.getColumnModel().getColumn(DocumentsTableModel.Column.FLAGS.getIndex()).setHeaderValue(flagsHeader); + documentTable + .getColumnModel() + .getColumn(DocumentsTableModel.Column.FLAGS.getIndex()) + .setHeaderValue(flagsHeader); JScrollPane scrollPane = new JScrollPane(documentTable); scrollPane.getHorizontalScrollBar().setAutoscrolls(false); @@ -410,7 +435,10 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { JPanel left = new JPanel(new FlowLayout(FlowLayout.LEADING, 10, 2)); left.setOpaque(false); - JLabel label = new JLabel(FontUtils.elegantIconHtml("h", MessageUtils.getLocalizedMessage("documents.label.browse_doc_by_idx"))); + JLabel label = + new JLabel( + FontUtils.elegantIconHtml( + "h", MessageUtils.getLocalizedMessage("documents.label.browse_doc_by_idx"))); label.setHorizontalTextPosition(JLabel.LEFT); left.add(label); docNumSpnr.setPreferredSize(new Dimension(100, 25)); @@ -422,15 +450,21 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { JPanel right = new JPanel(new FlowLayout(FlowLayout.TRAILING)); right.setOpaque(false); - copyDocValuesBtn.setText(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("documents.buttont.copy_values"))); + copyDocValuesBtn.setText( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("documents.buttont.copy_values"))); copyDocValuesBtn.setMargin(new Insets(5, 0, 5, 0)); copyDocValuesBtn.addActionListener(listeners::copySelectedOrAllStoredValues); right.add(copyDocValuesBtn); - mltBtn.setText(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("documents.button.mlt"))); + mltBtn.setText( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("documents.button.mlt"))); mltBtn.setMargin(new Insets(5, 0, 5, 0)); mltBtn.addActionListener(listeners::mltSearch); right.add(mltBtn); - addDocBtn.setText(FontUtils.elegantIconHtml("Y", MessageUtils.getLocalizedMessage("documents.button.add"))); + addDocBtn.setText( + FontUtils.elegantIconHtml( + "Y", MessageUtils.getLocalizedMessage("documents.button.add"))); addDocBtn.setMargin(new Insets(5, 0, 5, 0)); addDocBtn.addActionListener(listeners::showAddDocumentDialog); right.add(addDocBtn); @@ -441,22 +475,26 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { private void setUpDocumentContextMenu() { // show term vector - JMenuItem item1 = new JMenuItem(MessageUtils.getLocalizedMessage("documents.doctable.menu.item1")); + JMenuItem item1 = + new JMenuItem(MessageUtils.getLocalizedMessage("documents.doctable.menu.item1")); item1.addActionListener(listeners::showTermVectorDialog); documentContextMenu.add(item1); // show doc values - JMenuItem item2 = new JMenuItem(MessageUtils.getLocalizedMessage("documents.doctable.menu.item2")); + JMenuItem item2 = + new JMenuItem(MessageUtils.getLocalizedMessage("documents.doctable.menu.item2")); item2.addActionListener(listeners::showDocValuesDialog); documentContextMenu.add(item2); // show stored value - JMenuItem item3 = new JMenuItem(MessageUtils.getLocalizedMessage("documents.doctable.menu.item3")); + JMenuItem item3 = + new JMenuItem(MessageUtils.getLocalizedMessage("documents.doctable.menu.item3")); item3.addActionListener(listeners::showStoredValueDialog); documentContextMenu.add(item3); // copy stored value to clipboard - JMenuItem item4 = new JMenuItem(MessageUtils.getLocalizedMessage("documents.doctable.menu.item4")); + JMenuItem item4 = + new JMenuItem(MessageUtils.getLocalizedMessage("documents.doctable.menu.item4")); item4.addActionListener(listeners::copyStoredValue); documentContextMenu.add(item4); } @@ -466,7 +504,8 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { private void showFirstTerm() { String fieldName = (String) fieldsCombo.getSelectedItem(); if (fieldName == null || fieldName.length() == 0) { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("documents.field.message.not_selected")); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("documents.field.message.not_selected")); return; } @@ -542,9 +581,12 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { messageBroker.clearStatusMessage(); } - private void clearPosTable() { - TableUtils.setupTable(posTable, ListSelectionModel.SINGLE_SELECTION, new PosTableModel(), null, + TableUtils.setupTable( + posTable, + ListSelectionModel.SINGLE_SELECTION, + new PosTableModel(), + null, PosTableModel.Column.POSITION.getColumnWidth(), PosTableModel.Column.OFFSETS.getColumnWidth(), PosTableModel.Column.PAYLOAD.getColumnWidth()); @@ -555,7 +597,8 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { int docid = documentsModel.firstTermDoc().orElse(-1); if (docid < 0) { nextTermDocBtn.setEnabled(false); - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("documents.termdocs.message.not_available")); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("documents.termdocs.message.not_available")); return; } termDocIdxTF.setText(String.valueOf(1)); @@ -563,9 +606,18 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { List postings = documentsModel.getTermPositions(); posTable.setModel(new PosTableModel(postings)); - posTable.getColumnModel().getColumn(PosTableModel.Column.POSITION.getIndex()).setPreferredWidth(PosTableModel.Column.POSITION.getColumnWidth()); - posTable.getColumnModel().getColumn(PosTableModel.Column.OFFSETS.getIndex()).setPreferredWidth(PosTableModel.Column.OFFSETS.getColumnWidth()); - posTable.getColumnModel().getColumn(PosTableModel.Column.PAYLOAD.getIndex()).setPreferredWidth(PosTableModel.Column.PAYLOAD.getColumnWidth()); + posTable + .getColumnModel() + .getColumn(PosTableModel.Column.POSITION.getIndex()) + .setPreferredWidth(PosTableModel.Column.POSITION.getColumnWidth()); + posTable + .getColumnModel() + .getColumn(PosTableModel.Column.OFFSETS.getIndex()) + .setPreferredWidth(PosTableModel.Column.OFFSETS.getColumnWidth()); + posTable + .getColumnModel() + .getColumn(PosTableModel.Column.PAYLOAD.getIndex()) + .setPreferredWidth(PosTableModel.Column.PAYLOAD.getColumnWidth()); nextTermDocBtn.setEnabled(true); messageBroker.clearStatusMessage(); @@ -575,7 +627,8 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { int docid = documentsModel.nextTermDoc().orElse(-1); if (docid < 0) { nextTermDocBtn.setEnabled(false); - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("documents.termdocs.message.not_available")); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("documents.termdocs.message.not_available")); return; } int curIdx = Integer.parseInt(termDocIdxTF.getText()); @@ -596,75 +649,121 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { private void mltSearch() { int docNum = (int) docNumSpnr.getValue(); - operatorRegistry.get(SearchTabOperator.class).ifPresent(operator -> { - operator.mltSearch(docNum); - tabSwitcher.switchTab(TabbedPaneProvider.Tab.SEARCH); - }); + operatorRegistry + .get(SearchTabOperator.class) + .ifPresent( + operator -> { + operator.mltSearch(docNum); + tabSwitcher.switchTab(TabbedPaneProvider.Tab.SEARCH); + }); } private void showAddDocumentDialog() { - new DialogOpener<>(addDocDialogFactory).open("Add document", 600, 500, - (factory) -> { - }); + new DialogOpener<>(addDocDialogFactory).open("Add document", 600, 500, (factory) -> {}); } private void showTermVectorDialog() { int docid = (Integer) docNumSpnr.getValue(); - String field = (String) documentTable.getModel().getValueAt(documentTable.getSelectedRow(), DocumentsTableModel.Column.FIELD.getIndex()); + String field = + (String) + documentTable + .getModel() + .getValueAt( + documentTable.getSelectedRow(), DocumentsTableModel.Column.FIELD.getIndex()); List tvEntries = documentsModel.getTermVectors(docid, field); if (tvEntries.isEmpty()) { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("documents.termvector.message.not_available", field, docid)); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage( + "documents.termvector.message.not_available", field, docid)); return; } - new DialogOpener<>(tvDialogFactory).open( - "Term Vector", 600, 400, - (factory) -> { - factory.setField(field); - factory.setTvEntries(tvEntries); - }); + new DialogOpener<>(tvDialogFactory) + .open( + "Term Vector", + 600, + 400, + (factory) -> { + factory.setField(field); + factory.setTvEntries(tvEntries); + }); messageBroker.clearStatusMessage(); } private void showDocValuesDialog() { int docid = (Integer) docNumSpnr.getValue(); - String field = (String) documentTable.getModel().getValueAt(documentTable.getSelectedRow(), DocumentsTableModel.Column.FIELD.getIndex()); + String field = + (String) + documentTable + .getModel() + .getValueAt( + documentTable.getSelectedRow(), DocumentsTableModel.Column.FIELD.getIndex()); Optional docValues = documentsModel.getDocValues(docid, field); if (docValues.isPresent()) { - new DialogOpener<>(dvDialogFactory).open( - "Doc Values", 400, 300, - (factory) -> { - factory.setValue(field, docValues.get()); - }); + new DialogOpener<>(dvDialogFactory) + .open( + "Doc Values", + 400, + 300, + (factory) -> { + factory.setValue(field, docValues.get()); + }); messageBroker.clearStatusMessage(); } else { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("documents.docvalues.message.not_available", field, docid)); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage( + "documents.docvalues.message.not_available", field, docid)); } } private void showStoredValueDialog() { int docid = (Integer) docNumSpnr.getValue(); - String field = (String) documentTable.getModel().getValueAt(documentTable.getSelectedRow(), DocumentsTableModel.Column.FIELD.getIndex()); - String value = (String) documentTable.getModel().getValueAt(documentTable.getSelectedRow(), DocumentsTableModel.Column.VALUE.getIndex()); + String field = + (String) + documentTable + .getModel() + .getValueAt( + documentTable.getSelectedRow(), DocumentsTableModel.Column.FIELD.getIndex()); + String value = + (String) + documentTable + .getModel() + .getValueAt( + documentTable.getSelectedRow(), DocumentsTableModel.Column.VALUE.getIndex()); if (Objects.isNull(value)) { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("documents.stored.message.not_availabe", field, docid)); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("documents.stored.message.not_availabe", field, docid)); return; } - new DialogOpener<>(valueDialogFactory).open( - "Stored Value", 400, 300, - (factory) -> { - factory.setField(field); - factory.setValue(value); - }); + new DialogOpener<>(valueDialogFactory) + .open( + "Stored Value", + 400, + 300, + (factory) -> { + factory.setField(field); + factory.setValue(value); + }); messageBroker.clearStatusMessage(); } private void copyStoredValue() { int docid = (Integer) docNumSpnr.getValue(); - String field = (String) documentTable.getModel().getValueAt(documentTable.getSelectedRow(), DocumentsTableModel.Column.FIELD.getIndex()); - String value = (String) documentTable.getModel().getValueAt(documentTable.getSelectedRow(), DocumentsTableModel.Column.VALUE.getIndex()); + String field = + (String) + documentTable + .getModel() + .getValueAt( + documentTable.getSelectedRow(), DocumentsTableModel.Column.FIELD.getIndex()); + String value = + (String) + documentTable + .getModel() + .getValueAt( + documentTable.getSelectedRow(), DocumentsTableModel.Column.VALUE.getIndex()); if (Objects.isNull(value)) { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("documents.stored.message.not_availabe", field, docid)); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("documents.stored.message.not_availabe", field, docid)); return; } Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); @@ -688,7 +787,9 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { private StringSelection copyAllValues() { StringBuilder sb = new StringBuilder(); for (int i = 0; i < documentTable.getRowCount(); i++) { - String value = (String) documentTable.getModel().getValueAt(i, DocumentsTableModel.Column.VALUE.getIndex()); + String value = + (String) + documentTable.getModel().getValueAt(i, DocumentsTableModel.Column.VALUE.getIndex()); if (Objects.nonNull(value)) { sb.append((i == 0) ? value : System.lineSeparator() + value); } @@ -700,7 +801,11 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { StringBuilder sb = new StringBuilder(); boolean isFirst = true; for (int rowIndex : documentTable.getSelectedRows()) { - String value = (String) documentTable.getModel().getValueAt(rowIndex, DocumentsTableModel.Column.VALUE.getIndex()); + String value = + (String) + documentTable + .getModel() + .getValueAt(rowIndex, DocumentsTableModel.Column.VALUE.getIndex()); if (Objects.nonNull(value)) { sb.append(isFirst ? value : System.lineSeparator() + value); isFirst = false; @@ -727,7 +832,6 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { public void displayDoc(int docid) { showDoc(docid); } - ; private void showDoc(int docid) { @@ -736,13 +840,34 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { List doc = documentsModel.getDocumentFields(docid); documentTable.setModel(new DocumentsTableModel(doc)); documentTable.setFont(StyleConstants.FONT_MONOSPACE_LARGE); - documentTable.getColumnModel().getColumn(DocumentsTableModel.Column.FIELD.getIndex()).setPreferredWidth(DocumentsTableModel.Column.FIELD.getColumnWidth()); - documentTable.getColumnModel().getColumn(DocumentsTableModel.Column.FLAGS.getIndex()).setMinWidth(DocumentsTableModel.Column.FLAGS.getColumnWidth()); - documentTable.getColumnModel().getColumn(DocumentsTableModel.Column.FLAGS.getIndex()).setMaxWidth(DocumentsTableModel.Column.FIELD.getColumnWidth()); - documentTable.getColumnModel().getColumn(DocumentsTableModel.Column.NORM.getIndex()).setMinWidth(DocumentsTableModel.Column.NORM.getColumnWidth()); - documentTable.getColumnModel().getColumn(DocumentsTableModel.Column.NORM.getIndex()).setMaxWidth(DocumentsTableModel.Column.NORM.getColumnWidth()); - documentTable.getColumnModel().getColumn(DocumentsTableModel.Column.VALUE.getIndex()).setPreferredWidth(DocumentsTableModel.Column.VALUE.getColumnWidth()); - documentTable.getColumnModel().getColumn(DocumentsTableModel.Column.FLAGS.getIndex()).setHeaderRenderer(tableHeaderRenderer); + documentTable + .getColumnModel() + .getColumn(DocumentsTableModel.Column.FIELD.getIndex()) + .setPreferredWidth(DocumentsTableModel.Column.FIELD.getColumnWidth()); + documentTable + .getColumnModel() + .getColumn(DocumentsTableModel.Column.FLAGS.getIndex()) + .setMinWidth(DocumentsTableModel.Column.FLAGS.getColumnWidth()); + documentTable + .getColumnModel() + .getColumn(DocumentsTableModel.Column.FLAGS.getIndex()) + .setMaxWidth(DocumentsTableModel.Column.FIELD.getColumnWidth()); + documentTable + .getColumnModel() + .getColumn(DocumentsTableModel.Column.NORM.getIndex()) + .setMinWidth(DocumentsTableModel.Column.NORM.getColumnWidth()); + documentTable + .getColumnModel() + .getColumn(DocumentsTableModel.Column.NORM.getIndex()) + .setMaxWidth(DocumentsTableModel.Column.NORM.getColumnWidth()); + documentTable + .getColumnModel() + .getColumn(DocumentsTableModel.Column.VALUE.getIndex()) + .setPreferredWidth(DocumentsTableModel.Column.VALUE.getColumnWidth()); + documentTable + .getColumnModel() + .getColumn(DocumentsTableModel.Column.FLAGS.getIndex()) + .setHeaderRenderer(tableHeaderRenderer); messageBroker.clearStatusMessage(); } @@ -810,7 +935,6 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { void copySelectedOrAllStoredValues(ActionEvent e) { DocumentsPanelProvider.this.copySelectedOrAllStoredValues(); } - } private class Observer implements IndexObserver { @@ -854,7 +978,6 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { static final class PosTableModel extends TableModelBase { enum Column implements TableColumnInfo { - POSITION("Position", 0, Integer.class, 80), OFFSETS("Offsets", 1, String.class, 120), PAYLOAD("Payload", 2, String.class, 300); @@ -912,7 +1035,7 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { payload = BytesRefUtils.decode(p.getPayload()); } - data[i] = new Object[]{position, offset, payload}; + data[i] = new Object[] {position, offset, payload}; } } @@ -983,7 +1106,7 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { } else if (docField.getBinaryValue() != null) { value = String.valueOf(docField.getBinaryValue()); } - data[i] = new Object[]{field, flags, norm, value}; + data[i] = new Object[] {field, flags, norm, value}; } } @@ -1110,6 +1233,4 @@ public final class DocumentsPanelProvider implements DocumentsTabOperator { return Column.values(); } } - } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/DocumentsTabOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/DocumentsTabOperator.java index a0618da1f0a..8983740bb74 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/DocumentsTabOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/DocumentsTabOperator.java @@ -28,4 +28,4 @@ public interface DocumentsTabOperator extends ComponentOperatorRegistry.Componen void seekNextTerm(); void showFirstTermDoc(); -} \ No newline at end of file +} diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/LogsPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/LogsPanelProvider.java index 1d27cea9ff3..1f6bcf11d72 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/LogsPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/LogsPanelProvider.java @@ -17,14 +17,13 @@ package org.apache.lucene.luke.app.desktop.components; +import java.awt.BorderLayout; +import java.awt.FlowLayout; import javax.swing.BorderFactory; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.JTextArea; -import java.awt.BorderLayout; -import java.awt.FlowLayout; - import org.apache.lucene.luke.app.desktop.LukeMain; import org.apache.lucene.luke.app.desktop.util.MessageUtils; @@ -54,5 +53,4 @@ public final class LogsPanelProvider { panel.add(new JScrollPane(logTextArea), BorderLayout.CENTER); return panel; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/LukeWindowProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/LukeWindowProvider.java index faf5c1c1e27..bbea489e937 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/LukeWindowProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/LukeWindowProvider.java @@ -17,14 +17,6 @@ package org.apache.lucene.luke.app.desktop.components; -import javax.swing.BorderFactory; -import javax.swing.JFrame; -import javax.swing.JLabel; -import javax.swing.JMenuBar; -import javax.swing.JPanel; -import javax.swing.JTabbedPane; -import javax.swing.JTextArea; -import javax.swing.WindowConstants; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dimension; @@ -33,7 +25,14 @@ import java.awt.GridBagConstraints; import java.awt.GridBagLayout; import java.awt.GridLayout; import java.io.IOException; - +import javax.swing.BorderFactory; +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JMenuBar; +import javax.swing.JPanel; +import javax.swing.JTabbedPane; +import javax.swing.JTextArea; +import javax.swing.WindowConstants; import org.apache.lucene.luke.app.DirectoryHandler; import org.apache.lucene.luke.app.DirectoryObserver; import org.apache.lucene.luke.app.IndexHandler; @@ -51,7 +50,8 @@ import org.apache.lucene.util.Version; /** Provider of the root window */ public final class LukeWindowProvider implements LukeWindowOperator { - private static final String WINDOW_TITLE = MessageUtils.getLocalizedMessage("window.title") + " - v" + Version.LATEST.toString(); + private static final String WINDOW_TITLE = + MessageUtils.getLocalizedMessage("window.title") + " - v" + Version.LATEST.toString(); private final Preferences prefs; @@ -149,7 +149,6 @@ public final class LukeWindowProvider implements LukeWindowOperator { multiIcon.setVisible(false); iconPanel.add(multiIcon); - readOnlyIcon.setText(FontUtils.elegantIconHtml("")); readOnlyIcon.setToolTipText(MessageUtils.getLocalizedMessage("tooltip.read_only")); readOnlyIcon.setVisible(false); @@ -206,9 +205,11 @@ public final class LukeWindowProvider implements LukeWindowOperator { noReaderIcon.setVisible(false); if (state.readOnly()) { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("message.index_opened_ro")); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("message.index_opened_ro")); } else if (!state.hasDirectoryReader()) { - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("message.index_opened_multi")); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("message.index_opened_multi")); } else { messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("message.index_opened")); } @@ -222,7 +223,6 @@ public final class LukeWindowProvider implements LukeWindowOperator { messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("message.index_closed")); } - } private class MessageReceiverImpl implements MessageBroker.MessageReceiver { @@ -242,9 +242,6 @@ public final class LukeWindowProvider implements LukeWindowOperator { messageLbl.setText(""); } - private MessageReceiverImpl() { - } - + private MessageReceiverImpl() {} } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/MenuBarProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/MenuBarProvider.java index 90b2d4fb585..1c9526feae8 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/MenuBarProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/MenuBarProvider.java @@ -17,12 +17,11 @@ package org.apache.lucene.luke.app.desktop.components; +import java.awt.event.ActionEvent; +import java.io.IOException; import javax.swing.JMenu; import javax.swing.JMenuBar; import javax.swing.JMenuItem; -import java.awt.event.ActionEvent; -import java.io.IOException; - import org.apache.lucene.luke.app.DirectoryHandler; import org.apache.lucene.luke.app.DirectoryObserver; import org.apache.lucene.luke.app.IndexHandler; @@ -135,7 +134,6 @@ public final class MenuBarProvider { createIndexMItem.addActionListener(listeners::showCreateIndexDialog); fileMenu.add(createIndexMItem); - closeIndexMItem.setText(MessageUtils.getLocalizedMessage("menu.item.close_index")); closeIndexMItem.setEnabled(false); closeIndexMItem.addActionListener(listeners::closeIndex); @@ -197,13 +195,21 @@ public final class MenuBarProvider { private class ListenerFunctions { void showOpenIndexDialog(ActionEvent e) { - new DialogOpener<>(openIndexDialogFactory).open(MessageUtils.getLocalizedMessage("openindex.dialog.title"), 600, 420, - (factory) -> {}); + new DialogOpener<>(openIndexDialogFactory) + .open( + MessageUtils.getLocalizedMessage("openindex.dialog.title"), + 600, + 420, + (factory) -> {}); } void showCreateIndexDialog(ActionEvent e) { - new DialogOpener<>(createIndexDialogFactory).open(MessageUtils.getLocalizedMessage("createindex.dialog.title"), 600, 360, - (factory) -> {}); + new DialogOpener<>(createIndexDialogFactory) + .open( + MessageUtils.getLocalizedMessage("createindex.dialog.title"), + 600, + 360, + (factory) -> {}); } void reopenIndex(ActionEvent e) { @@ -233,7 +239,9 @@ public final class MenuBarProvider { private void changeTheme(Preferences.ColorTheme theme) { try { prefs.setColorTheme(theme); - operatorRegistry.get(LukeWindowOperator.class).ifPresent(operator -> operator.setColorTheme(theme)); + operatorRegistry + .get(LukeWindowOperator.class) + .ifPresent(operator -> operator.setColorTheme(theme)); } catch (IOException e) { throw new LukeException("Failed to set color theme : " + theme.name(), e); } @@ -250,30 +258,22 @@ public final class MenuBarProvider { } void showOptimizeIndexDialog(ActionEvent e) { - new DialogOpener<>(optimizeIndexDialogFactory).open("Optimize index", 600, 600, - factory -> { - }); + new DialogOpener<>(optimizeIndexDialogFactory) + .open("Optimize index", 600, 600, factory -> {}); } void showCheckIndexDialog(ActionEvent e) { - new DialogOpener<>(checkIndexDialogFactory).open("Check index", 600, 600, - factory -> { - }); + new DialogOpener<>(checkIndexDialogFactory).open("Check index", 600, 600, factory -> {}); } void showAboutDialog(ActionEvent e) { final String title = "About Luke v" + Version.LATEST.toString(); - new DialogOpener<>(aboutDialogFactory).open(title, 800, 480, - factory -> { - }); + new DialogOpener<>(aboutDialogFactory).open(title, 800, 480, factory -> {}); } void showExportTermsDialog(ActionEvent e) { - new DialogOpener<>(exportTermsDialogFactory).open("Export terms", 600, 450, - factory -> { - }); + new DialogOpener<>(exportTermsDialogFactory).open("Export terms", 600, 450, factory -> {}); } - } private class Observer implements IndexObserver, DirectoryObserver { @@ -317,6 +317,5 @@ public final class MenuBarProvider { checkIndexMItem.setEnabled(false); exportTermsMItem.setEnabled(false); } - } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/OverviewPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/OverviewPanelProvider.java index c85e93bcd7c..00a704b6cbe 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/OverviewPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/OverviewPanelProvider.java @@ -17,6 +17,20 @@ package org.apache.lucene.luke.app.desktop.components; +import java.awt.BorderLayout; +import java.awt.Color; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.GridBagConstraints; +import java.awt.GridBagLayout; +import java.awt.GridLayout; +import java.awt.Insets; +import java.awt.event.ActionEvent; +import java.awt.event.MouseAdapter; +import java.awt.event.MouseEvent; +import java.util.List; +import java.util.Locale; +import java.util.Map; import javax.swing.BorderFactory; import javax.swing.BoxLayout; import javax.swing.JButton; @@ -33,21 +47,6 @@ import javax.swing.ListSelectionModel; import javax.swing.SpinnerNumberModel; import javax.swing.table.DefaultTableCellRenderer; import javax.swing.table.TableRowSorter; -import java.awt.BorderLayout; -import java.awt.Color; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.GridBagConstraints; -import java.awt.GridBagLayout; -import java.awt.GridLayout; -import java.awt.Insets; -import java.awt.event.ActionEvent; -import java.awt.event.MouseAdapter; -import java.awt.event.MouseEvent; -import java.util.List; -import java.util.Locale; -import java.util.Map; - import org.apache.lucene.luke.app.IndexHandler; import org.apache.lucene.luke.app.IndexObserver; import org.apache.lucene.luke.app.LukeState; @@ -127,7 +126,8 @@ public final class OverviewPanelProvider { panel.setLayout(new GridLayout(1, 1)); panel.setBorder(BorderFactory.createLineBorder(Color.gray)); - JSplitPane splitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); + JSplitPane splitPane = + new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); splitPane.setDividerLocation(0.4); splitPane.setOpaque(false); panel.add(splitPane); @@ -149,7 +149,8 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.index_path"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.index_path"), JLabel.RIGHT), c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -159,7 +160,8 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.num_fields"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.num_fields"), JLabel.RIGHT), c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -169,7 +171,8 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.num_docs"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.num_docs"), JLabel.RIGHT), c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -179,7 +182,8 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.num_terms"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.num_terms"), JLabel.RIGHT), c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -189,7 +193,8 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.del_opt"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.del_opt"), JLabel.RIGHT), c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -199,7 +204,9 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.index_version"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.index_version"), JLabel.RIGHT), + c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -209,7 +216,9 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.index_format"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.index_format"), JLabel.RIGHT), + c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -219,7 +228,8 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.dir_impl"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.dir_impl"), JLabel.RIGHT), c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -229,7 +239,9 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.commit_point"), JLabel.RIGHT), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("overview.label.commit_point"), JLabel.RIGHT), + c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -239,7 +251,10 @@ public final class OverviewPanelProvider { c.gridx = GRIDX_DESC; c.gridy += 1; c.weightx = WEIGHTX_DESC; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("overview.label.commit_userdata"), JLabel.RIGHT), c); + panel.add( + new JLabel( + MessageUtils.getLocalizedMessage("overview.label.commit_userdata"), JLabel.RIGHT), + c); c.gridx = GRIDX_VAL; c.weightx = WEIGHTX_VAL; @@ -257,7 +272,8 @@ public final class OverviewPanelProvider { label.setBorder(BorderFactory.createEmptyBorder(5, 10, 5, 10)); panel.add(label, BorderLayout.PAGE_START); - JSplitPane splitPane = new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, initTermCountsPanel(), initTopTermsPanel()); + JSplitPane splitPane = + new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, initTermCountsPanel(), initTopTermsPanel()); splitPane.setOpaque(false); splitPane.setDividerLocation(320); splitPane.setBorder(BorderFactory.createEmptyBorder(10, 10, 10, 10)); @@ -274,13 +290,18 @@ public final class OverviewPanelProvider { label.setBorder(BorderFactory.createEmptyBorder(0, 0, 5, 0)); panel.add(label, BorderLayout.PAGE_START); - TableUtils.setupTable(termCountsTable, ListSelectionModel.SINGLE_SELECTION, new TermCountsTableModel(), + TableUtils.setupTable( + termCountsTable, + ListSelectionModel.SINGLE_SELECTION, + new TermCountsTableModel(), new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { listeners.selectField(e); } - }, TermCountsTableModel.Column.NAME.getColumnWidth(), TermCountsTableModel.Column.TERM_COUNT.getColumnWidth()); + }, + TermCountsTableModel.Column.NAME.getColumnWidth(), + TermCountsTableModel.Column.TERM_COUNT.getColumnWidth()); JScrollPane scrollPane = new JScrollPane(termCountsTable); panel.add(scrollPane, BorderLayout.CENTER); @@ -344,13 +365,18 @@ public final class OverviewPanelProvider { label.setBorder(BorderFactory.createEmptyBorder(0, 0, 5, 0)); termsPanel.add(label, BorderLayout.PAGE_START); - TableUtils.setupTable(topTermsTable, ListSelectionModel.SINGLE_SELECTION, new TopTermsTableModel(), + TableUtils.setupTable( + topTermsTable, + ListSelectionModel.SINGLE_SELECTION, + new TopTermsTableModel(), new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { listeners.showTopTermsContextMenu(e); } - }, TopTermsTableModel.Column.RANK.getColumnWidth(), TopTermsTableModel.Column.FREQ.getColumnWidth()); + }, + TopTermsTableModel.Column.RANK.getColumnWidth(), + TopTermsTableModel.Column.FREQ.getColumnWidth()); JScrollPane scrollPane = new JScrollPane(topTermsTable); termsPanel.add(scrollPane, BorderLayout.CENTER); @@ -364,11 +390,13 @@ public final class OverviewPanelProvider { } private void setUpTopTermsContextMenu() { - JMenuItem item1 = new JMenuItem(MessageUtils.getLocalizedMessage("overview.toptermtable.menu.item1")); + JMenuItem item1 = + new JMenuItem(MessageUtils.getLocalizedMessage("overview.toptermtable.menu.item1")); item1.addActionListener(listeners::browseByTerm); topTermsContextMenu.add(item1); - JMenuItem item2 = new JMenuItem(MessageUtils.getLocalizedMessage("overview.toptermtable.menu.item2")); + JMenuItem item2 = + new JMenuItem(MessageUtils.getLocalizedMessage("overview.toptermtable.menu.item2")); item2.addActionListener(listeners::searchByTerm); topTermsContextMenu.add(item2); } @@ -388,27 +416,39 @@ public final class OverviewPanelProvider { // update top terms table topTermsTable.setModel(new TopTermsTableModel(termStats, numTerms)); - topTermsTable.getColumnModel().getColumn(TopTermsTableModel.Column.RANK.getIndex()).setMaxWidth(TopTermsTableModel.Column.RANK.getColumnWidth()); - topTermsTable.getColumnModel().getColumn(TopTermsTableModel.Column.FREQ.getIndex()).setMaxWidth(TopTermsTableModel.Column.FREQ.getColumnWidth()); + topTermsTable + .getColumnModel() + .getColumn(TopTermsTableModel.Column.RANK.getIndex()) + .setMaxWidth(TopTermsTableModel.Column.RANK.getColumnWidth()); + topTermsTable + .getColumnModel() + .getColumn(TopTermsTableModel.Column.FREQ.getIndex()) + .setMaxWidth(TopTermsTableModel.Column.FREQ.getColumnWidth()); messageBroker.clearStatusMessage(); } private void browseByTerm() { String field = getSelectedField(); String term = getSelectedTerm(); - operatorRegistry.get(DocumentsTabOperator.class).ifPresent(operator -> { - operator.browseTerm(field, term); - tabSwitcher.switchTab(TabbedPaneProvider.Tab.DOCUMENTS); - }); + operatorRegistry + .get(DocumentsTabOperator.class) + .ifPresent( + operator -> { + operator.browseTerm(field, term); + tabSwitcher.switchTab(TabbedPaneProvider.Tab.DOCUMENTS); + }); } private void searchByTerm() { String field = getSelectedField(); String term = getSelectedTerm(); - operatorRegistry.get(SearchTabOperator.class).ifPresent(operator -> { - operator.searchByTerm(field, term); - tabSwitcher.switchTab(TabbedPaneProvider.Tab.SEARCH); - }); + operatorRegistry + .get(SearchTabOperator.class) + .ifPresent( + operator -> { + operator.searchByTerm(field, term); + tabSwitcher.switchTab(TabbedPaneProvider.Tab.SEARCH); + }); } private String getSelectedField() { @@ -419,7 +459,8 @@ public final class OverviewPanelProvider { if (row < 0 || row >= termCountsTable.getRowCount()) { throw new IllegalStateException("Field is not selected."); } - return (String) termCountsTable.getModel().getValueAt(row, TermCountsTableModel.Column.NAME.getIndex()); + return (String) + termCountsTable.getModel().getValueAt(row, TermCountsTableModel.Column.NAME.getIndex()); } private String getSelectedTerm() { @@ -427,7 +468,8 @@ public final class OverviewPanelProvider { if (rowTerm < 0 || rowTerm >= topTermsTable.getRowCount()) { throw new IllegalStateException("Term is not selected."); } - return (String) topTermsTable.getModel().getValueAt(rowTerm, TopTermsTableModel.Column.TEXT.getIndex()); + return (String) + topTermsTable.getModel().getValueAt(rowTerm, TopTermsTableModel.Column.TEXT.getIndex()); } private class ListenerFunctions { @@ -457,7 +499,6 @@ public final class OverviewPanelProvider { void searchByTerm(ActionEvent e) { OverviewPanelProvider.this.searchByTerm(); } - } private class Observer implements IndexObserver { @@ -471,7 +512,10 @@ public final class OverviewPanelProvider { numFieldsLbl.setText(Integer.toString(overviewModel.getNumFields())); numDocsLbl.setText(Integer.toString(overviewModel.getNumDocuments())); numTermsLbl.setText(Long.toString(overviewModel.getNumTerms())); - String del = overviewModel.hasDeletions() ? String.format(Locale.ENGLISH, "Yes (%d)", overviewModel.getNumDeletedDocs()) : "No"; + String del = + overviewModel.hasDeletions() + ? String.format(Locale.ENGLISH, "Yes (%d)", overviewModel.getNumDeletedDocs()) + : "No"; String opt = overviewModel.isOptimized().map(b -> b ? "Yes" : "No").orElse("?"); delOptLbl.setText(del + " / " + opt); indexVerLbl.setText(overviewModel.getIndexVersion().map(v -> Long.toString(v)).orElse("?")); @@ -485,16 +529,31 @@ public final class OverviewPanelProvider { long numTerms = overviewModel.getNumTerms(); termCountsTable.setModel(new TermCountsTableModel(numTerms, termCounts)); termCountsTable.setRowSorter(new TableRowSorter<>(termCountsTable.getModel())); - termCountsTable.getColumnModel().getColumn(TermCountsTableModel.Column.NAME.getIndex()).setMaxWidth(TermCountsTableModel.Column.NAME.getColumnWidth()); - termCountsTable.getColumnModel().getColumn(TermCountsTableModel.Column.TERM_COUNT.getIndex()).setMaxWidth(TermCountsTableModel.Column.TERM_COUNT.getColumnWidth()); + termCountsTable + .getColumnModel() + .getColumn(TermCountsTableModel.Column.NAME.getIndex()) + .setMaxWidth(TermCountsTableModel.Column.NAME.getColumnWidth()); + termCountsTable + .getColumnModel() + .getColumn(TermCountsTableModel.Column.TERM_COUNT.getIndex()) + .setMaxWidth(TermCountsTableModel.Column.TERM_COUNT.getColumnWidth()); DefaultTableCellRenderer rightRenderer = new DefaultTableCellRenderer(); rightRenderer.setHorizontalAlignment(JLabel.RIGHT); - termCountsTable.getColumnModel().getColumn(TermCountsTableModel.Column.RATIO.getIndex()).setCellRenderer(rightRenderer); + termCountsTable + .getColumnModel() + .getColumn(TermCountsTableModel.Column.RATIO.getIndex()) + .setCellRenderer(rightRenderer); // top terms table topTermsTable.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); - topTermsTable.getColumnModel().getColumn(TopTermsTableModel.Column.RANK.getIndex()).setMaxWidth(TopTermsTableModel.Column.RANK.getColumnWidth()); - topTermsTable.getColumnModel().getColumn(TopTermsTableModel.Column.FREQ.getIndex()).setMaxWidth(TopTermsTableModel.Column.FREQ.getColumnWidth()); + topTermsTable + .getColumnModel() + .getColumn(TopTermsTableModel.Column.RANK.getIndex()) + .setMaxWidth(TopTermsTableModel.Column.RANK.getColumnWidth()); + topTermsTable + .getColumnModel() + .getColumn(TopTermsTableModel.Column.FREQ.getIndex()) + .setMaxWidth(TopTermsTableModel.Column.FREQ.getColumnWidth()); topTermsTable.getColumnModel().setColumnMargin(StyleConstants.TABLE_COLUMN_MARGIN_DEFAULT); } @@ -518,13 +577,11 @@ public final class OverviewPanelProvider { termCountsTable.setModel(new TermCountsTableModel()); topTermsTable.setModel(new TopTermsTableModel()); } - } static final class TermCountsTableModel extends TableModelBase { enum Column implements TableColumnInfo { - NAME("Name", 0, String.class, 150), TERM_COUNT("Term count", 1, Long.class, 100), RATIO("%", 2, String.class, Integer.MAX_VALUE); @@ -572,7 +629,10 @@ public final class OverviewPanelProvider { for (Map.Entry e : termCounts.entrySet()) { String term = e.getKey(); Long count = e.getValue(); - data[i++] = new Object[]{term, count, String.format(Locale.ENGLISH, "%.2f %%", count / numTerms * 100)}; + data[i++] = + new Object[] { + term, count, String.format(Locale.ENGLISH, "%.2f %%", count / numTerms * 100) + }; } } @@ -632,7 +692,7 @@ public final class OverviewPanelProvider { int rank = i + 1; int freq = termStats.get(i).getDocFreq(); String termText = termStats.get(i).getDecodedTermText(); - data[i] = new Object[]{rank, freq, termText}; + data[i] = new Object[] {rank, freq, termText}; } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java index 395d8359702..23e23889a27 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java @@ -17,21 +17,6 @@ package org.apache.lucene.luke.app.desktop.components; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JFormattedTextField; -import javax.swing.JLabel; -import javax.swing.JMenuItem; -import javax.swing.JPanel; -import javax.swing.JPopupMenu; -import javax.swing.JScrollPane; -import javax.swing.JSeparator; -import javax.swing.JSplitPane; -import javax.swing.JTabbedPane; -import javax.swing.JTable; -import javax.swing.JTextArea; -import javax.swing.ListSelectionModel; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dimension; @@ -51,7 +36,21 @@ import java.util.Locale; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JCheckBox; +import javax.swing.JFormattedTextField; +import javax.swing.JLabel; +import javax.swing.JMenuItem; +import javax.swing.JPanel; +import javax.swing.JPopupMenu; +import javax.swing.JScrollPane; +import javax.swing.JSeparator; +import javax.swing.JSplitPane; +import javax.swing.JTabbedPane; +import javax.swing.JTable; +import javax.swing.JTextArea; +import javax.swing.ListSelectionModel; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.Term; @@ -192,7 +191,8 @@ public final class SearchPanelProvider implements SearchTabOperator { panel.setOpaque(false); panel.setBorder(BorderFactory.createLineBorder(Color.gray)); - JSplitPane splitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); + JSplitPane splitPane = + new JSplitPane(JSplitPane.VERTICAL_SPLIT, initUpperPanel(), initLowerPanel()); splitPane.setOpaque(false); splitPane.setDividerLocation(350); panel.add(splitPane); @@ -201,7 +201,8 @@ public final class SearchPanelProvider implements SearchTabOperator { } private JSplitPane initUpperPanel() { - JSplitPane splitPane = new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, initQuerySettingsPane(), initQueryPane()); + JSplitPane splitPane = + new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, initQuerySettingsPane(), initQueryPane()); splitPane.setOpaque(false); splitPane.setDividerLocation(570); return splitPane; @@ -263,7 +264,12 @@ public final class SearchPanelProvider implements SearchTabOperator { c.gridwidth = 3; c.weightx = 0.0; c.insets = new Insets(2, 0, 2, 2); - panel.add(new JScrollPane(queryStringTA, JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, JScrollPane.HORIZONTAL_SCROLLBAR_NEVER), c); + panel.add( + new JScrollPane( + queryStringTA, + JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, + JScrollPane.HORIZONTAL_SCROLLBAR_NEVER), + c); JLabel labelPQ = new JLabel(MessageUtils.getLocalizedMessage("search.label.parsed")); c.gridx = 0; @@ -283,7 +289,9 @@ public final class SearchPanelProvider implements SearchTabOperator { c.insets = new Insets(2, 0, 2, 2); panel.add(new JScrollPane(parsedQueryTA), c); - parseBtn.setText(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("search.button.parse"))); + parseBtn.setText( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("search.button.parse"))); parseBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); parseBtn.setMargin(new Insets(3, 0, 3, 0)); parseBtn.addActionListener(listeners::execParse); @@ -303,7 +311,9 @@ public final class SearchPanelProvider implements SearchTabOperator { c.insets = new Insets(5, 0, 0, 2); panel.add(rewriteCB, c); - searchBtn.setText(FontUtils.elegantIconHtml("U", MessageUtils.getLocalizedMessage("search.button.search"))); + searchBtn.setText( + FontUtils.elegantIconHtml( + "U", MessageUtils.getLocalizedMessage("search.button.search"))); searchBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); searchBtn.setMargin(new Insets(3, 0, 3, 0)); searchBtn.addActionListener(listeners::execSearch); @@ -323,7 +333,9 @@ public final class SearchPanelProvider implements SearchTabOperator { c.insets = new Insets(5, 0, 0, 2); panel.add(exactHitsCntCB, c); - mltBtn.setText(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("search.button.mlt"))); + mltBtn.setText( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("search.button.mlt"))); mltBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); mltBtn.setMargin(new Insets(3, 0, 3, 0)); mltBtn.addActionListener(listeners::execMLTSearch); @@ -366,7 +378,10 @@ public final class SearchPanelProvider implements SearchTabOperator { JPanel panel = new JPanel(new GridLayout(1, 2)); panel.setOpaque(false); - JLabel label = new JLabel(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("search.label.results"))); + JLabel label = + new JLabel( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("search.label.results"))); label.setHorizontalTextPosition(JLabel.LEFT); label.setBorder(BorderFactory.createEmptyBorder(2, 0, 2, 0)); panel.add(label); @@ -407,7 +422,9 @@ public final class SearchPanelProvider implements SearchTabOperator { sep.setPreferredSize(new Dimension(5, 1)); resultsInfo.add(sep); - delBtn.setText(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("search.button.del_all"))); + delBtn.setText( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("search.button.del_all"))); delBtn.setMargin(new Insets(5, 0, 5, 0)); delBtn.setEnabled(false); delBtn.addActionListener(listeners::confirmDeletion); @@ -427,7 +444,10 @@ public final class SearchPanelProvider implements SearchTabOperator { note.add(new JLabel(MessageUtils.getLocalizedMessage("search.label.results.note"))); panel.add(note, BorderLayout.PAGE_START); - TableUtils.setupTable(resultsTable, ListSelectionModel.SINGLE_SELECTION, new SearchResultsTableModel(), + TableUtils.setupTable( + resultsTable, + ListSelectionModel.SINGLE_SELECTION, + new SearchResultsTableModel(), new MouseAdapter() { @Override public void mousePressed(MouseEvent e) { @@ -456,10 +476,10 @@ public final class SearchPanelProvider implements SearchTabOperator { tabbedPane.setEnabledAt(Tab.QPARSER.index(), false); tabbedPane.setEnabledAt(Tab.ANALYZER.index(), false); tabbedPane.setEnabledAt(Tab.SIMILARITY.index(), false); - if (tabbedPane.getSelectedIndex() == Tab.QPARSER.index() || - tabbedPane.getSelectedIndex() == Tab.ANALYZER.index() || - tabbedPane.getSelectedIndex() == Tab.SIMILARITY.index() || - tabbedPane.getSelectedIndex() == Tab.MLT.index()) { + if (tabbedPane.getSelectedIndex() == Tab.QPARSER.index() + || tabbedPane.getSelectedIndex() == Tab.ANALYZER.index() + || tabbedPane.getSelectedIndex() == Tab.SIMILARITY.index() + || tabbedPane.getSelectedIndex() == Tab.MLT.index()) { tabbedPane.setSelectedIndex(Tab.SORT.index()); } parseBtn.setEnabled(false); @@ -489,24 +509,34 @@ public final class SearchPanelProvider implements SearchTabOperator { } String[] tmp = queryStringTA.getText().split(":"); if (tmp.length < 2) { - throw new LukeException(String.format(Locale.ENGLISH, "Invalid query [ %s ]", queryStringTA.getText())); + throw new LukeException( + String.format(Locale.ENGLISH, "Invalid query [ %s ]", queryStringTA.getText())); } query = new TermQuery(new Term(tmp[0].trim(), tmp[1].trim())); } else { query = parse(false); } - SimilarityConfig simConfig = operatorRegistry.get(SimilarityTabOperator.class) - .map(SimilarityTabOperator::getConfig) - .orElse(new SimilarityConfig.Builder().build()); - Sort sort = operatorRegistry.get(SortTabOperator.class) - .map(SortTabOperator::getSort) - .orElse(null); - Set fieldsToLoad = operatorRegistry.get(FieldValuesTabOperator.class) - .map(FieldValuesTabOperator::getFieldsToLoad) - .orElse(Collections.emptySet()); - SearchResults results = searchModel.search(query, simConfig, sort, fieldsToLoad, DEFAULT_PAGE_SIZE, exactHitsCntCB.isSelected()); + SimilarityConfig simConfig = + operatorRegistry + .get(SimilarityTabOperator.class) + .map(SimilarityTabOperator::getConfig) + .orElse(new SimilarityConfig.Builder().build()); + Sort sort = + operatorRegistry.get(SortTabOperator.class).map(SortTabOperator::getSort).orElse(null); + Set fieldsToLoad = + operatorRegistry + .get(FieldValuesTabOperator.class) + .map(FieldValuesTabOperator::getFieldsToLoad) + .orElse(Collections.emptySet()); + SearchResults results = + searchModel.search( + query, simConfig, sort, fieldsToLoad, DEFAULT_PAGE_SIZE, exactHitsCntCB.isSelected()); - TableUtils.setupTable(resultsTable, ListSelectionModel.SINGLE_SELECTION, new SearchResultsTableModel(), null, + TableUtils.setupTable( + resultsTable, + ListSelectionModel.SINGLE_SELECTION, + new SearchResultsTableModel(), + null, SearchResultsTableModel.Column.DOCID.getColumnWidth(), SearchResultsTableModel.Column.SCORE.getColumnWidth()); populateResults(results); @@ -529,19 +559,31 @@ public final class SearchPanelProvider implements SearchTabOperator { throw new LukeException("Doc num is not set."); } int docNum = (int) mltDocFTF.getValue(); - MLTConfig mltConfig = operatorRegistry.get(MLTTabOperator.class) - .map(MLTTabOperator::getConfig) - .orElse(new MLTConfig.Builder().build()); - Analyzer analyzer = operatorRegistry.get(AnalysisTabOperator.class) - .map(AnalysisTabOperator::getCurrentAnalyzer) - .orElse(new StandardAnalyzer()); + MLTConfig mltConfig = + operatorRegistry + .get(MLTTabOperator.class) + .map(MLTTabOperator::getConfig) + .orElse(new MLTConfig.Builder().build()); + Analyzer analyzer = + operatorRegistry + .get(AnalysisTabOperator.class) + .map(AnalysisTabOperator::getCurrentAnalyzer) + .orElse(new StandardAnalyzer()); Query query = searchModel.mltQuery(docNum, mltConfig, analyzer); - Set fieldsToLoad = operatorRegistry.get(FieldValuesTabOperator.class) - .map(FieldValuesTabOperator::getFieldsToLoad) - .orElse(Collections.emptySet()); - SearchResults results = searchModel.search(query, new SimilarityConfig.Builder().build(), fieldsToLoad, DEFAULT_PAGE_SIZE, false); + Set fieldsToLoad = + operatorRegistry + .get(FieldValuesTabOperator.class) + .map(FieldValuesTabOperator::getFieldsToLoad) + .orElse(Collections.emptySet()); + SearchResults results = + searchModel.search( + query, new SimilarityConfig.Builder().build(), fieldsToLoad, DEFAULT_PAGE_SIZE, false); - TableUtils.setupTable(resultsTable, ListSelectionModel.SINGLE_SELECTION, new SearchResultsTableModel(), null, + TableUtils.setupTable( + resultsTable, + ListSelectionModel.SINGLE_SELECTION, + new SearchResultsTableModel(), + null, SearchResultsTableModel.Column.DOCID.getColumnWidth(), SearchResultsTableModel.Column.SCORE.getColumnWidth()); populateResults(results); @@ -550,16 +592,23 @@ public final class SearchPanelProvider implements SearchTabOperator { } private Query parse(boolean rewrite) { - String expr = StringUtils.isNullOrEmpty(queryStringTA.getText()) ? "*:*" : queryStringTA.getText(); - String df = operatorRegistry.get(QueryParserTabOperator.class) - .map(QueryParserTabOperator::getDefaultField) - .orElse(""); - QueryParserConfig config = operatorRegistry.get(QueryParserTabOperator.class) - .map(QueryParserTabOperator::getConfig) - .orElse(new QueryParserConfig.Builder().build()); - Analyzer analyzer = operatorRegistry.get(AnalysisTabOperator.class) - .map(AnalysisTabOperator::getCurrentAnalyzer) - .orElse(new StandardAnalyzer()); + String expr = + StringUtils.isNullOrEmpty(queryStringTA.getText()) ? "*:*" : queryStringTA.getText(); + String df = + operatorRegistry + .get(QueryParserTabOperator.class) + .map(QueryParserTabOperator::getDefaultField) + .orElse(""); + QueryParserConfig config = + operatorRegistry + .get(QueryParserTabOperator.class) + .map(QueryParserTabOperator::getConfig) + .orElse(new QueryParserConfig.Builder().build()); + Analyzer analyzer = + operatorRegistry + .get(AnalysisTabOperator.class) + .map(AnalysisTabOperator::getCurrentAnalyzer) + .orElse(new StandardAnalyzer()); return searchModel.parseQuery(expr, df, analyzer, config, rewrite); } @@ -570,16 +619,27 @@ public final class SearchPanelProvider implements SearchTabOperator { endLbl.setText(String.valueOf(res.getOffset() + res.size())); prevBtn.setEnabled(res.getOffset() > 0); - nextBtn.setEnabled(res.getTotalHits().relation == TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO || res.getTotalHits().value > res.getOffset() + res.size()); + nextBtn.setEnabled( + res.getTotalHits().relation == TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO + || res.getTotalHits().value > res.getOffset() + res.size()); if (!indexHandler.getState().readOnly() && indexHandler.getState().hasDirectoryReader()) { delBtn.setEnabled(true); } resultsTable.setModel(new SearchResultsTableModel(res)); - resultsTable.getColumnModel().getColumn(SearchResultsTableModel.Column.DOCID.getIndex()).setPreferredWidth(SearchResultsTableModel.Column.DOCID.getColumnWidth()); - resultsTable.getColumnModel().getColumn(SearchResultsTableModel.Column.SCORE.getIndex()).setPreferredWidth(SearchResultsTableModel.Column.SCORE.getColumnWidth()); - resultsTable.getColumnModel().getColumn(SearchResultsTableModel.Column.VALUE.getIndex()).setPreferredWidth(SearchResultsTableModel.Column.VALUE.getColumnWidth()); + resultsTable + .getColumnModel() + .getColumn(SearchResultsTableModel.Column.DOCID.getIndex()) + .setPreferredWidth(SearchResultsTableModel.Column.DOCID.getColumnWidth()); + resultsTable + .getColumnModel() + .getColumn(SearchResultsTableModel.Column.SCORE.getIndex()) + .setPreferredWidth(SearchResultsTableModel.Column.SCORE.getColumnWidth()); + resultsTable + .getColumnModel() + .getColumn(SearchResultsTableModel.Column.VALUE.getIndex()) + .setPreferredWidth(SearchResultsTableModel.Column.VALUE.getColumnWidth()); } else { startLbl.setText("0"); endLbl.setText("0"); @@ -590,10 +650,15 @@ public final class SearchPanelProvider implements SearchTabOperator { } private void confirmDeletion() { - new DialogOpener<>(confirmDialogFactory).open("Confirm Deletion", 400, 200, (factory) -> { - factory.setMessage(MessageUtils.getLocalizedMessage("search.message.delete_confirm")); - factory.setCallback(this::deleteDocs); - }); + new DialogOpener<>(confirmDialogFactory) + .open( + "Confirm Deletion", + 400, + 200, + (factory) -> { + factory.setMessage(MessageUtils.getLocalizedMessage("search.message.delete_confirm")); + factory.setCallback(this::deleteDocs); + }); } private void deleteDocs() { @@ -601,7 +666,8 @@ public final class SearchPanelProvider implements SearchTabOperator { if (query != null) { toolsModel.deleteDocuments(query); indexHandler.reOpen(); - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("search.message.delete_success", query.toString())); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("search.message.delete_success", query.toString())); } delBtn.setEnabled(false); } @@ -610,25 +676,47 @@ public final class SearchPanelProvider implements SearchTabOperator { JPopupMenu popup = new JPopupMenu(); // show explanation - JMenuItem item1 = new JMenuItem(MessageUtils.getLocalizedMessage("search.results.menu.explain")); - item1.addActionListener(e -> { - int docid = (int) resultsTable.getModel().getValueAt(resultsTable.getSelectedRow(), SearchResultsTableModel.Column.DOCID.getIndex()); - Explanation explanation = searchModel.explain(parse(false), docid); - new DialogOpener<>(explainDialogProvider).open("Explanation", 600, 400, - (factory) -> { - factory.setDocid(docid); - factory.setExplanation(explanation); - }); - }); + JMenuItem item1 = + new JMenuItem(MessageUtils.getLocalizedMessage("search.results.menu.explain")); + item1.addActionListener( + e -> { + int docid = + (int) + resultsTable + .getModel() + .getValueAt( + resultsTable.getSelectedRow(), + SearchResultsTableModel.Column.DOCID.getIndex()); + Explanation explanation = searchModel.explain(parse(false), docid); + new DialogOpener<>(explainDialogProvider) + .open( + "Explanation", + 600, + 400, + (factory) -> { + factory.setDocid(docid); + factory.setExplanation(explanation); + }); + }); popup.add(item1); // show all fields - JMenuItem item2 = new JMenuItem(MessageUtils.getLocalizedMessage("search.results.menu.showdoc")); - item2.addActionListener(e -> { - int docid = (int) resultsTable.getModel().getValueAt(resultsTable.getSelectedRow(), SearchResultsTableModel.Column.DOCID.getIndex()); - operatorRegistry.get(DocumentsTabOperator.class).ifPresent(operator -> operator.displayDoc(docid)); - tabSwitcher.switchTab(TabbedPaneProvider.Tab.DOCUMENTS); - }); + JMenuItem item2 = + new JMenuItem(MessageUtils.getLocalizedMessage("search.results.menu.showdoc")); + item2.addActionListener( + e -> { + int docid = + (int) + resultsTable + .getModel() + .getValueAt( + resultsTable.getSelectedRow(), + SearchResultsTableModel.Column.DOCID.getIndex()); + operatorRegistry + .get(DocumentsTabOperator.class) + .ifPresent(operator -> operator.displayDoc(docid)); + tabSwitcher.switchTab(TabbedPaneProvider.Tab.DOCUMENTS); + }); popup.add(item2); return popup; @@ -691,11 +779,12 @@ public final class SearchPanelProvider implements SearchTabOperator { void showContextMenuInResultsTable(MouseEvent e) { if (e.getClickCount() == 2 && !e.isConsumed()) { - SearchPanelProvider.this.setupResultsContextMenuPopup().show(e.getComponent(), e.getX(), e.getY()); + SearchPanelProvider.this + .setupResultsContextMenuPopup() + .show(e.getComponent(), e.getX(), e.getY()); setupResultsContextMenuPopup().show(e.getComponent(), e.getX(), e.getY()); } } - } private class Observer implements IndexObserver { @@ -703,21 +792,35 @@ public final class SearchPanelProvider implements SearchTabOperator { @Override public void openIndex(LukeState state) { searchModel = searchFactory.newInstance(state.getIndexReader()); - toolsModel = toolsFactory.newInstance(state.getIndexReader(), state.useCompound(), state.keepAllCommits()); - operatorRegistry.get(QueryParserTabOperator.class).ifPresent(operator -> { - operator.setSearchableFields(searchModel.getSearchableFieldNames()); - operator.setRangeSearchableFields(searchModel.getRangeSearchableFieldNames()); - }); - operatorRegistry.get(SortTabOperator.class).ifPresent(operator -> { - operator.setSearchModel(searchModel); - operator.setSortableFields(searchModel.getSortableFieldNames()); - }); - operatorRegistry.get(FieldValuesTabOperator.class).ifPresent(operator -> { - operator.setFields(searchModel.getFieldNames()); - }); - operatorRegistry.get(MLTTabOperator.class).ifPresent(operator -> { - operator.setFields(searchModel.getFieldNames()); - }); + toolsModel = + toolsFactory.newInstance( + state.getIndexReader(), state.useCompound(), state.keepAllCommits()); + operatorRegistry + .get(QueryParserTabOperator.class) + .ifPresent( + operator -> { + operator.setSearchableFields(searchModel.getSearchableFieldNames()); + operator.setRangeSearchableFields(searchModel.getRangeSearchableFieldNames()); + }); + operatorRegistry + .get(SortTabOperator.class) + .ifPresent( + operator -> { + operator.setSearchModel(searchModel); + operator.setSortableFields(searchModel.getSortableFieldNames()); + }); + operatorRegistry + .get(FieldValuesTabOperator.class) + .ifPresent( + operator -> { + operator.setFields(searchModel.getFieldNames()); + }); + operatorRegistry + .get(MLTTabOperator.class) + .ifPresent( + operator -> { + operator.setFields(searchModel.getFieldNames()); + }); queryStringTA.setText("*:*"); parsedQueryTA.setText(""); @@ -742,16 +845,24 @@ public final class SearchPanelProvider implements SearchTabOperator { nextBtn.setEnabled(false); prevBtn.setEnabled(false); delBtn.setEnabled(false); - TableUtils.setupTable(resultsTable, ListSelectionModel.SINGLE_SELECTION, new SearchResultsTableModel(), null, + TableUtils.setupTable( + resultsTable, + ListSelectionModel.SINGLE_SELECTION, + new SearchResultsTableModel(), + null, SearchResultsTableModel.Column.DOCID.getColumnWidth(), SearchResultsTableModel.Column.SCORE.getColumnWidth()); } - } /** tabs in the Search panel */ public enum Tab { - QPARSER(0), ANALYZER(1), SIMILARITY(2), SORT(3), VALUES(4), MLT(5); + QPARSER(0), + ANALYZER(1), + SIMILARITY(2), + SORT(3), + VALUES(4), + MLT(5); private int tabIdx; @@ -764,7 +875,8 @@ public final class SearchPanelProvider implements SearchTabOperator { } } - static final class SearchResultsTableModel extends TableModelBase { + static final class SearchResultsTableModel + extends TableModelBase { enum Column implements TableColumnInfo { DOCID("Doc ID", 0, Integer.class, 50), @@ -818,10 +930,14 @@ public final class SearchPanelProvider implements SearchTabOperator { } else { data[i][Column.SCORE.getIndex()] = 1.0f; } - List concatValues = doc.getFieldValues().entrySet().stream().map(e -> { - String v = String.join(",", Arrays.asList(e.getValue())); - return e.getKey() + "=" + v + ";"; - }).collect(Collectors.toList()); + List concatValues = + doc.getFieldValues().entrySet().stream() + .map( + e -> { + String v = String.join(",", Arrays.asList(e.getValue())); + return e.getKey() + "=" + v + ";"; + }) + .collect(Collectors.toList()); data[i][Column.VALUE.getIndex()] = String.join(" ", concatValues); } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TabSwitcherProxy.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TabSwitcherProxy.java index 42f2194c5ee..99f308b949a 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TabSwitcherProxy.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TabSwitcherProxy.java @@ -45,5 +45,4 @@ public class TabSwitcherProxy { public interface TabSwitcher { void switchTab(TabbedPaneProvider.Tab tab); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TabbedPaneProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TabbedPaneProvider.java index c5fd73a0f68..1bf5d11354f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TabbedPaneProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TabbedPaneProvider.java @@ -17,11 +17,10 @@ package org.apache.lucene.luke.app.desktop.components; +import java.io.IOException; import javax.swing.JPanel; import javax.swing.JTabbedPane; import javax.swing.JTextArea; -import java.io.IOException; - import org.apache.lucene.luke.app.DirectoryHandler; import org.apache.lucene.luke.app.DirectoryObserver; import org.apache.lucene.luke.app.IndexHandler; @@ -121,7 +120,11 @@ public final class TabbedPaneProvider implements TabSwitcherProxy.TabSwitcher { /** tabs in the main frame */ public enum Tab { - OVERVIEW(0), DOCUMENTS(1), SEARCH(2), ANALYZER(3), COMMITS(4); + OVERVIEW(0), + DOCUMENTS(1), + SEARCH(2), + ANALYZER(3), + COMMITS(4); private int tabIdx; @@ -133,5 +136,4 @@ public final class TabbedPaneProvider implements TabSwitcherProxy.TabSwitcher { return tabIdx; } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TableColumnInfo.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TableColumnInfo.java index 63cdbb10700..0f9224e3fa4 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TableColumnInfo.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TableColumnInfo.java @@ -29,5 +29,4 @@ public interface TableColumnInfo { default int getColumnWidth() { return 0; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TableModelBase.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TableModelBase.java index f8ef21a41ef..419503bb0dc 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TableModelBase.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/TableModelBase.java @@ -17,12 +17,14 @@ package org.apache.lucene.luke.app.desktop.components; -import javax.swing.table.AbstractTableModel; import java.util.Map; - +import javax.swing.table.AbstractTableModel; import org.apache.lucene.luke.app.desktop.util.TableUtils; -/** Base table model that stores table's meta data and content. This also provides some default implementation of the {@link javax.swing.table.TableModel} interface. */ +/** + * Base table model that stores table's meta data and content. This also provides some default + * implementation of the {@link javax.swing.table.TableModel} interface. + */ public abstract class TableModelBase extends AbstractTableModel { private final Map columnMap = TableUtils.columnMap(columnInfos()); @@ -67,7 +69,6 @@ public abstract class TableModelBase extends Abstract return Object.class; } - @Override public Object getValueAt(int rowIndex, int columnIndex) { return data[rowIndex][columnIndex]; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/ConfirmDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/ConfirmDialogFactory.java index d5465984edf..c7534bfbb67 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/ConfirmDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/ConfirmDialogFactory.java @@ -17,11 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dialog; @@ -31,7 +26,11 @@ import java.awt.Font; import java.awt.GridLayout; import java.awt.Window; import java.io.IOException; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.util.DialogOpener; @@ -52,7 +51,7 @@ public final class ConfirmDialogFactory implements DialogOpener.DialogFactory { private Callable callback; - public synchronized static ConfirmDialogFactory getInstance() throws IOException { + public static synchronized ConfirmDialogFactory getInstance() throws IOException { if (instance == null) { instance = new ConfirmDialogFactory(); } @@ -103,10 +102,11 @@ public final class ConfirmDialogFactory implements DialogOpener.DialogFactory { JPanel footer = new JPanel(new FlowLayout(FlowLayout.TRAILING)); footer.setOpaque(false); JButton okBtn = new JButton(MessageUtils.getLocalizedMessage("button.ok")); - okBtn.addActionListener(e -> { - callback.call(); - dialog.dispose(); - }); + okBtn.addActionListener( + e -> { + callback.call(); + dialog.dispose(); + }); footer.add(okBtn); JButton closeBtn = new JButton(MessageUtils.getLocalizedMessage("button.close")); closeBtn.addActionListener(e -> dialog.dispose()); @@ -115,5 +115,4 @@ public final class ConfirmDialogFactory implements DialogOpener.DialogFactory { return panel; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/HelpDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/HelpDialogFactory.java index b9bcf9d2f78..88cb386f574 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/HelpDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/HelpDialogFactory.java @@ -17,12 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JComponent; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; import java.awt.BorderLayout; import java.awt.Dialog; import java.awt.Dimension; @@ -30,7 +24,12 @@ import java.awt.FlowLayout; import java.awt.GridLayout; import java.awt.Window; import java.io.IOException; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JComponent; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.util.DialogOpener; @@ -49,7 +48,7 @@ public final class HelpDialogFactory implements DialogOpener.DialogFactory { private JComponent helpContent; - public synchronized static HelpDialogFactory getInstance() throws IOException { + public static synchronized HelpDialogFactory getInstance() throws IOException { if (instance == null) { instance = new HelpDialogFactory(); } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/AnalysisChainDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/AnalysisChainDialogFactory.java index 6a79e322d37..1e032169fcd 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/AnalysisChainDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/AnalysisChainDialogFactory.java @@ -17,14 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.analysis; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JList; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTextField; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dialog; @@ -35,11 +27,18 @@ import java.awt.GridBagLayout; import java.awt.Insets; import java.awt.Window; import java.io.IOException; - -import org.apache.lucene.analysis.custom.CustomAnalyzer; +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JList; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTextField; import org.apache.lucene.analysis.CharFilterFactory; import org.apache.lucene.analysis.TokenFilterFactory; import org.apache.lucene.analysis.TokenizerFactory; +import org.apache.lucene.analysis.custom.CustomAnalyzer; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.util.DialogOpener; @@ -56,7 +55,7 @@ public class AnalysisChainDialogFactory implements DialogOpener.DialogFactory { private CustomAnalyzer analyzer; - public synchronized static AnalysisChainDialogFactory getInstance() throws IOException { + public static synchronized AnalysisChainDialogFactory getInstance() throws IOException { if (instance == null) { instance = new AnalysisChainDialogFactory(); } @@ -110,11 +109,16 @@ public class AnalysisChainDialogFactory implements DialogOpener.DialogFactory { c.gridy = 0; c.weightx = 0.1; c.weighty = 0.5; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.charfilters")), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.charfilters")), c); - String[] charFilters = analyzer.getCharFilterFactories().stream().map(f -> CharFilterFactory.findSPIName(f.getClass())).toArray(String[]::new); + String[] charFilters = + analyzer.getCharFilterFactories().stream() + .map(f -> CharFilterFactory.findSPIName(f.getClass())) + .toArray(String[]::new); JList charFilterList = new JList<>(charFilters); - charFilterList.setVisibleRowCount(charFilters.length == 0 ? 1 : Math.min(charFilters.length, 5)); + charFilterList.setVisibleRowCount( + charFilters.length == 0 ? 1 : Math.min(charFilters.length, 5)); c.gridx = 1; c.gridy = 0; c.weightx = 0.5; @@ -125,7 +129,8 @@ public class AnalysisChainDialogFactory implements DialogOpener.DialogFactory { c.gridy = 1; c.weightx = 0.1; c.weighty = 0.1; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.tokenizer")), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.tokenizer")), c); String tokenizer = TokenizerFactory.findSPIName(analyzer.getTokenizerFactory().getClass()); JTextField tokenizerTF = new JTextField(tokenizer); @@ -143,11 +148,17 @@ public class AnalysisChainDialogFactory implements DialogOpener.DialogFactory { c.gridy = 2; c.weightx = 0.1; c.weighty = 0.5; - panel.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.tokenfilters")), c); + panel.add( + new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.tokenfilters")), + c); - String[] tokenFilters = analyzer.getTokenFilterFactories().stream().map(f -> TokenFilterFactory.findSPIName(f.getClass())).toArray(String[]::new); + String[] tokenFilters = + analyzer.getTokenFilterFactories().stream() + .map(f -> TokenFilterFactory.findSPIName(f.getClass())) + .toArray(String[]::new); JList tokenFilterList = new JList<>(tokenFilters); - tokenFilterList.setVisibleRowCount(tokenFilters.length == 0 ? 1 : Math.min(tokenFilters.length, 5)); + tokenFilterList.setVisibleRowCount( + tokenFilters.length == 0 ? 1 : Math.min(tokenFilters.length, 5)); tokenFilterList.setMinimumSize(new Dimension(300, 25)); c.gridx = 1; c.gridy = 2; @@ -157,5 +168,4 @@ public class AnalysisChainDialogFactory implements DialogOpener.DialogFactory { return panel; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditFiltersDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditFiltersDialogFactory.java index 5a964d68397..004e0ab635f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditFiltersDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditFiltersDialogFactory.java @@ -17,15 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.analysis; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTable; -import javax.swing.ListSelectionModel; -import javax.swing.table.TableCellRenderer; import java.awt.BorderLayout; import java.awt.Component; import java.awt.Dialog; @@ -39,7 +30,15 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTable; +import javax.swing.ListSelectionModel; +import javax.swing.table.TableCellRenderer; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; @@ -78,7 +77,7 @@ public final class EditFiltersDialogFactory implements DialogOpener.DialogFactor private EditFiltersMode mode; - public synchronized static EditFiltersDialogFactory getInstance() throws IOException { + public static synchronized EditFiltersDialogFactory getInstance() throws IOException { if (instance == null) { instance = new EditFiltersDialogFactory(); } @@ -124,37 +123,49 @@ public final class EditFiltersDialogFactory implements DialogOpener.DialogFactor header.add(targetLbl); panel.add(header, BorderLayout.PAGE_START); - TableUtils.setupTable(filtersTable, ListSelectionModel.SINGLE_SELECTION, new FiltersTableModel(selectedFilters), tableListener, + TableUtils.setupTable( + filtersTable, + ListSelectionModel.SINGLE_SELECTION, + new FiltersTableModel(selectedFilters), + tableListener, FiltersTableModel.Column.DELETE.getColumnWidth(), FiltersTableModel.Column.ORDER.getColumnWidth()); filtersTable.setShowGrid(true); - filtersTable.getColumnModel().getColumn(FiltersTableModel.Column.TYPE.getIndex()).setCellRenderer(new TypeCellRenderer()); + filtersTable + .getColumnModel() + .getColumn(FiltersTableModel.Column.TYPE.getIndex()) + .setCellRenderer(new TypeCellRenderer()); panel.add(new JScrollPane(filtersTable), BorderLayout.CENTER); JPanel footer = new JPanel(new FlowLayout(FlowLayout.TRAILING, 10, 5)); footer.setOpaque(false); JButton okBtn = new JButton(MessageUtils.getLocalizedMessage("button.ok")); - okBtn.addActionListener(e -> { - List deletedIndexes = new ArrayList<>(); - for (int i = 0; i < filtersTable.getRowCount(); i++) { - boolean deleted = (boolean) filtersTable.getValueAt(i, FiltersTableModel.Column.DELETE.getIndex()); - if (deleted) { - deletedIndexes.add(i); - } - } - operatorRegistry.get(CustomAnalyzerPanelOperator.class).ifPresent(operator -> { - switch (mode) { - case CHARFILTER: - operator.updateCharFilters(deletedIndexes); - break; - case TOKENFILTER: - operator.updateTokenFilters(deletedIndexes); - break; - } - }); - callback.call(); - dialog.dispose(); - }); + okBtn.addActionListener( + e -> { + List deletedIndexes = new ArrayList<>(); + for (int i = 0; i < filtersTable.getRowCount(); i++) { + boolean deleted = + (boolean) filtersTable.getValueAt(i, FiltersTableModel.Column.DELETE.getIndex()); + if (deleted) { + deletedIndexes.add(i); + } + } + operatorRegistry + .get(CustomAnalyzerPanelOperator.class) + .ifPresent( + operator -> { + switch (mode) { + case CHARFILTER: + operator.updateCharFilters(deletedIndexes); + break; + case TOKENFILTER: + operator.updateTokenFilters(deletedIndexes); + break; + } + }); + callback.call(); + dialog.dispose(); + }); footer.add(okBtn); JButton cancelBtn = new JButton(MessageUtils.getLocalizedMessage("button.cancel")); cancelBtn.addActionListener(e -> dialog.dispose()); @@ -188,28 +199,48 @@ public final class EditFiltersDialogFactory implements DialogOpener.DialogFactor private void showEditParamsCharFilterDialog(int selectedIndex) { int targetIndex = filtersTable.getSelectedRow(); - String selectedItem = (String) filtersTable.getValueAt(selectedIndex, FiltersTableModel.Column.TYPE.getIndex()); - Map params = operatorRegistry.get(CustomAnalyzerPanelOperator.class).map(operator -> operator.getCharFilterParams(targetIndex)).orElse(Collections.emptyMap()); - new DialogOpener<>(editParamsDialogFactory).open(dialog, MessageUtils.getLocalizedMessage("analysis.dialog.title.char_filter_params"), 400, 300, - factory -> { - factory.setMode(EditParamsMode.CHARFILTER); - factory.setTargetIndex(targetIndex); - factory.setTarget(selectedItem); - factory.setParams(params); - }); + String selectedItem = + (String) filtersTable.getValueAt(selectedIndex, FiltersTableModel.Column.TYPE.getIndex()); + Map params = + operatorRegistry + .get(CustomAnalyzerPanelOperator.class) + .map(operator -> operator.getCharFilterParams(targetIndex)) + .orElse(Collections.emptyMap()); + new DialogOpener<>(editParamsDialogFactory) + .open( + dialog, + MessageUtils.getLocalizedMessage("analysis.dialog.title.char_filter_params"), + 400, + 300, + factory -> { + factory.setMode(EditParamsMode.CHARFILTER); + factory.setTargetIndex(targetIndex); + factory.setTarget(selectedItem); + factory.setParams(params); + }); } private void showEditParamsTokenFilterDialog(int selectedIndex) { int targetIndex = filtersTable.getSelectedRow(); - String selectedItem = (String) filtersTable.getValueAt(selectedIndex, FiltersTableModel.Column.TYPE.getIndex()); - Map params = operatorRegistry.get(CustomAnalyzerPanelOperator.class).map(operator -> operator.getTokenFilterParams(targetIndex)).orElse(Collections.emptyMap()); - new DialogOpener<>(editParamsDialogFactory).open(dialog, MessageUtils.getLocalizedMessage("analysis.dialog.title.char_filter_params"), 400, 300, - factory -> { - factory.setMode(EditParamsMode.TOKENFILTER); - factory.setTargetIndex(targetIndex); - factory.setTarget(selectedItem); - factory.setParams(params); - }); + String selectedItem = + (String) filtersTable.getValueAt(selectedIndex, FiltersTableModel.Column.TYPE.getIndex()); + Map params = + operatorRegistry + .get(CustomAnalyzerPanelOperator.class) + .map(operator -> operator.getTokenFilterParams(targetIndex)) + .orElse(Collections.emptyMap()); + new DialogOpener<>(editParamsDialogFactory) + .open( + dialog, + MessageUtils.getLocalizedMessage("analysis.dialog.title.char_filter_params"), + 400, + 300, + factory -> { + factory.setMode(EditParamsMode.TOKENFILTER); + factory.setTargetIndex(targetIndex); + factory.setTarget(selectedItem); + factory.setParams(params); + }); } } @@ -292,12 +323,11 @@ public final class EditFiltersDialogFactory implements DialogOpener.DialogFactor static final class TypeCellRenderer implements TableCellRenderer { @Override - public Component getTableCellRendererComponent(JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { + public Component getTableCellRendererComponent( + JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { String[] tmp = ((String) value).split("\\."); String type = tmp[tmp.length - 1]; return new JLabel(type); } - } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditFiltersMode.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditFiltersMode.java index d5edd8b505e..9bb691adb12 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditFiltersMode.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditFiltersMode.java @@ -19,5 +19,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.analysis; /** Edit filters mode */ public enum EditFiltersMode { - CHARFILTER, TOKENFILTER; + CHARFILTER, + TOKENFILTER; } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditParamsDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditParamsDialogFactory.java index f9a30da8cd2..f67576c0a33 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditParamsDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditParamsDialogFactory.java @@ -17,14 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.analysis; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTable; -import javax.swing.ListSelectionModel; import java.awt.BorderLayout; import java.awt.Dialog; import java.awt.Dimension; @@ -36,7 +28,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTable; +import javax.swing.ListSelectionModel; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; @@ -71,7 +70,7 @@ public final class EditParamsDialogFactory implements DialogOpener.DialogFactory private Callable callback; - public synchronized static EditParamsDialogFactory getInstance() throws IOException { + public static synchronized EditParamsDialogFactory getInstance() throws IOException { if (instance == null) { instance = new EditParamsDialogFactory(); } @@ -126,7 +125,11 @@ public final class EditParamsDialogFactory implements DialogOpener.DialogFactory header.add(targetLbl); panel.add(header, BorderLayout.PAGE_START); - TableUtils.setupTable(paramsTable, ListSelectionModel.SINGLE_SELECTION, new ParamsTableModel(params), null, + TableUtils.setupTable( + paramsTable, + ListSelectionModel.SINGLE_SELECTION, + new ParamsTableModel(params), + null, ParamsTableModel.Column.DELETE.getColumnWidth(), ParamsTableModel.Column.NAME.getColumnWidth()); paramsTable.setShowGrid(true); @@ -135,28 +138,37 @@ public final class EditParamsDialogFactory implements DialogOpener.DialogFactory JPanel footer = new JPanel(new FlowLayout(FlowLayout.TRAILING, 10, 5)); footer.setOpaque(false); JButton okBtn = new JButton(MessageUtils.getLocalizedMessage("button.ok")); - okBtn.addActionListener(e -> { - Map params = new HashMap<>(); - for (int i = 0; i < paramsTable.getRowCount(); i++) { - boolean deleted = (boolean) paramsTable.getValueAt(i, ParamsTableModel.Column.DELETE.getIndex()); - String name = (String) paramsTable.getValueAt(i, ParamsTableModel.Column.NAME.getIndex()); - String value = (String) paramsTable.getValueAt(i, ParamsTableModel.Column.VALUE.getIndex()); - if (deleted || Objects.isNull(name) || name.equals("") || Objects.isNull(value) || value.equals("")) { - continue; - } - params.put(name, value); - } - updateTargetParams(params); - callback.call(); - this.params.clear(); - dialog.dispose(); - }); + okBtn.addActionListener( + e -> { + Map params = new HashMap<>(); + for (int i = 0; i < paramsTable.getRowCount(); i++) { + boolean deleted = + (boolean) paramsTable.getValueAt(i, ParamsTableModel.Column.DELETE.getIndex()); + String name = + (String) paramsTable.getValueAt(i, ParamsTableModel.Column.NAME.getIndex()); + String value = + (String) paramsTable.getValueAt(i, ParamsTableModel.Column.VALUE.getIndex()); + if (deleted + || Objects.isNull(name) + || name.equals("") + || Objects.isNull(value) + || value.equals("")) { + continue; + } + params.put(name, value); + } + updateTargetParams(params); + callback.call(); + this.params.clear(); + dialog.dispose(); + }); footer.add(okBtn); JButton cancelBtn = new JButton(MessageUtils.getLocalizedMessage("button.cancel")); - cancelBtn.addActionListener(e -> { - this.params.clear(); - dialog.dispose(); - }); + cancelBtn.addActionListener( + e -> { + this.params.clear(); + dialog.dispose(); + }); footer.add(cancelBtn); panel.add(footer, BorderLayout.PAGE_END); @@ -164,19 +176,22 @@ public final class EditParamsDialogFactory implements DialogOpener.DialogFactory } private void updateTargetParams(Map params) { - operatorRegistry.get(CustomAnalyzerPanelOperator.class).ifPresent(operator -> { - switch (mode) { - case CHARFILTER: - operator.updateCharFilterParams(targetIndex, params); - break; - case TOKENIZER: - operator.updateTokenizerParams(params); - break; - case TOKENFILTER: - operator.updateTokenFilterParams(targetIndex, params); - break; - } - }); + operatorRegistry + .get(CustomAnalyzerPanelOperator.class) + .ifPresent( + operator -> { + switch (mode) { + case CHARFILTER: + operator.updateCharFilterParams(targetIndex, params); + break; + case TOKENIZER: + operator.updateTokenizerParams(params); + break; + case TOKENFILTER: + operator.updateTokenFilterParams(targetIndex, params); + break; + } + }); } static final class ParamsTableModel extends TableModelBase { @@ -217,7 +232,6 @@ public final class EditParamsDialogFactory implements DialogOpener.DialogFactory public int getColumnWidth() { return width; } - } private static final int PARAM_SIZE = 20; @@ -249,6 +263,4 @@ public final class EditParamsDialogFactory implements DialogOpener.DialogFactory return Column.values(); } } - } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditParamsMode.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditParamsMode.java index 8e76879dc22..686d5f1c4e8 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditParamsMode.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/EditParamsMode.java @@ -19,5 +19,7 @@ package org.apache.lucene.luke.app.desktop.components.dialog.analysis; /** Edit parameters mode */ public enum EditParamsMode { - CHARFILTER, TOKENIZER, TOKENFILTER; + CHARFILTER, + TOKENIZER, + TOKENFILTER; } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java index 4112699754f..46399d00b58 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java @@ -17,14 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.analysis; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTable; -import javax.swing.ListSelectionModel; import java.awt.BorderLayout; import java.awt.Dialog; import java.awt.Dimension; @@ -33,7 +25,14 @@ import java.awt.Window; import java.io.IOException; import java.util.List; import java.util.stream.Collectors; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTable; +import javax.swing.ListSelectionModel; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.components.TableColumnInfo; @@ -58,7 +57,7 @@ public final class TokenAttributeDialogFactory implements DialogOpener.DialogFac private List attributes; - public synchronized static TokenAttributeDialogFactory getInstance() throws IOException { + public static synchronized TokenAttributeDialogFactory getInstance() throws IOException { if (instance == null) { instance = new TokenAttributeDialogFactory(); } @@ -98,10 +97,18 @@ public final class TokenAttributeDialogFactory implements DialogOpener.DialogFac header.add(new JLabel(term)); panel.add(header, BorderLayout.PAGE_START); - List attrValues = attributes.stream() - .flatMap(att -> att.getAttValues().entrySet().stream().map(e -> TokenAttValue.of(att.getAttClass(), e.getKey(), e.getValue()))) - .collect(Collectors.toList()); - TableUtils.setupTable(attributesTable, ListSelectionModel.SINGLE_SELECTION, new AttributeTableModel(attrValues), null); + List attrValues = + attributes.stream() + .flatMap( + att -> + att.getAttValues().entrySet().stream() + .map(e -> TokenAttValue.of(att.getAttClass(), e.getKey(), e.getValue()))) + .collect(Collectors.toList()); + TableUtils.setupTable( + attributesTable, + ListSelectionModel.SINGLE_SELECTION, + new AttributeTableModel(attrValues), + null); panel.add(new JScrollPane(attributesTable), BorderLayout.CENTER); JPanel footer = new JPanel(new FlowLayout(FlowLayout.TRAILING)); @@ -117,7 +124,6 @@ public final class TokenAttributeDialogFactory implements DialogOpener.DialogFac static final class AttributeTableModel extends TableModelBase { enum Column implements TableColumnInfo { - ATTR("Attribute", 0, String.class), NAME("Name", 1, String.class), VALUE("Value", 2, String.class); @@ -177,8 +183,7 @@ public final class TokenAttributeDialogFactory implements DialogOpener.DialogFac return attValue; } - private TokenAttValue() { - } + private TokenAttValue() {} String getAttClass() { return attClass; @@ -192,5 +197,4 @@ public final class TokenAttributeDialogFactory implements DialogOpener.DialogFac return value; } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/package-info.java index bd3419bd66f..4c71d8bf9fc 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/package-info.java @@ -16,4 +16,4 @@ */ /** Dialogs used in the Analysis tab */ -package org.apache.lucene.luke.app.desktop.components.dialog.analysis; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components.dialog.analysis; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java index 906f80c95b3..535dcafece6 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java @@ -17,22 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.documents; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.DefaultCellEditor; -import javax.swing.JButton; -import javax.swing.JComboBox; -import javax.swing.JComponent; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTable; -import javax.swing.JTextArea; -import javax.swing.ListSelectionModel; -import javax.swing.UIManager; -import javax.swing.table.JTableHeader; -import javax.swing.table.TableCellRenderer; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Component; @@ -51,7 +35,22 @@ import java.lang.reflect.Constructor; import java.util.List; import java.util.stream.Collectors; import java.util.stream.IntStream; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.DefaultCellEditor; +import javax.swing.JButton; +import javax.swing.JComboBox; +import javax.swing.JComponent; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTable; +import javax.swing.JTextArea; +import javax.swing.ListSelectionModel; +import javax.swing.UIManager; +import javax.swing.table.JTableHeader; +import javax.swing.table.TableCellRenderer; import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; @@ -98,13 +97,14 @@ import org.apache.lucene.luke.util.LoggerFactory; import org.apache.lucene.util.BytesRef; /** Factory of add document dialog */ -public final class AddDocumentDialogFactory implements DialogOpener.DialogFactory, AddDocumentDialogOperator { +public final class AddDocumentDialogFactory + implements DialogOpener.DialogFactory, AddDocumentDialogOperator { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static AddDocumentDialogFactory instance; - private final static int ROW_COUNT = 50; + private static final int ROW_COUNT = 50; private final Preferences prefs; @@ -136,11 +136,11 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor private JDialog dialog; - public synchronized static AddDocumentDialogFactory getInstance() throws IOException { + public static synchronized AddDocumentDialogFactory getInstance() throws IOException { if (instance == null) { instance = new AddDocumentDialogFactory(); } - return instance; + return instance; } private AddDocumentDialogFactory() throws IOException { @@ -150,7 +150,10 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor this.operatorRegistry = ComponentOperatorRegistry.getInstance(); this.indexOptionsDialogFactory = IndexOptionsDialogFactory.getInstance(); this.helpDialogFactory = HelpDialogFactory.getInstance(); - this.newFieldList = IntStream.range(0, ROW_COUNT).mapToObj(i -> NewField.newInstance()).collect(Collectors.toList()); + this.newFieldList = + IntStream.range(0, ROW_COUNT) + .mapToObj(i -> NewField.newInstance()) + .collect(Collectors.toList()); operatorRegistry.register(AddDocumentDialogOperator.class, this); indexHandler.addObserver(new Observer()); @@ -204,14 +207,16 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor analyzerHeader.setOpaque(false); analyzerHeader.add(new JLabel(MessageUtils.getLocalizedMessage("add_document.label.analyzer"))); analyzerHeader.add(analyzerNameLbl); - JLabel changeLbl = new JLabel(MessageUtils.getLocalizedMessage("add_document.hyperlink.change")); - changeLbl.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - dialog.dispose(); - tabSwitcher.switchTab(TabbedPaneProvider.Tab.ANALYZER); - } - }); + JLabel changeLbl = + new JLabel(MessageUtils.getLocalizedMessage("add_document.hyperlink.change")); + changeLbl.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + dialog.dispose(); + tabSwitcher.switchTab(TabbedPaneProvider.Tab.ANALYZER); + } + }); analyzerHeader.add(FontUtils.toLinkText(changeLbl)); panel.add(analyzerHeader); @@ -245,20 +250,46 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor private JTable fieldsTable() { JTable fieldsTable = new JTable(); - TableUtils.setupTable(fieldsTable, ListSelectionModel.SINGLE_SELECTION, new FieldsTableModel(newFieldList), null, 30, 150, 120, 80); + TableUtils.setupTable( + fieldsTable, + ListSelectionModel.SINGLE_SELECTION, + new FieldsTableModel(newFieldList), + null, + 30, + 150, + 120, + 80); fieldsTable.setShowGrid(true); JComboBox> typesCombo = new JComboBox<>(presetFieldClasses); - typesCombo.setRenderer((list, value, index, isSelected, cellHasFocus) -> new JLabel(value.getSimpleName())); - fieldsTable.getColumnModel().getColumn(FieldsTableModel.Column.TYPE.getIndex()).setCellEditor(new DefaultCellEditor(typesCombo)); + typesCombo.setRenderer( + (list, value, index, isSelected, cellHasFocus) -> new JLabel(value.getSimpleName())); + fieldsTable + .getColumnModel() + .getColumn(FieldsTableModel.Column.TYPE.getIndex()) + .setCellEditor(new DefaultCellEditor(typesCombo)); for (int i = 0; i < fieldsTable.getModel().getRowCount(); i++) { - fieldsTable.getModel().setValueAt(TextField.class, i, FieldsTableModel.Column.TYPE.getIndex()); + fieldsTable + .getModel() + .setValueAt(TextField.class, i, FieldsTableModel.Column.TYPE.getIndex()); } - fieldsTable.getColumnModel().getColumn(FieldsTableModel.Column.TYPE.getIndex()).setHeaderRenderer( - new HelpHeaderRenderer( - "About Type", "Select Field Class:", - createTypeHelpDialog(), helpDialogFactory, dialog)); - fieldsTable.getColumnModel().getColumn(FieldsTableModel.Column.TYPE.getIndex()).setCellRenderer(new TypeCellRenderer()); - fieldsTable.getColumnModel().getColumn(FieldsTableModel.Column.OPTIONS.getIndex()).setCellRenderer(new OptionsCellRenderer(dialog, indexOptionsDialogFactory, newFieldList)); + fieldsTable + .getColumnModel() + .getColumn(FieldsTableModel.Column.TYPE.getIndex()) + .setHeaderRenderer( + new HelpHeaderRenderer( + "About Type", + "Select Field Class:", + createTypeHelpDialog(), + helpDialogFactory, + dialog)); + fieldsTable + .getColumnModel() + .getColumn(FieldsTableModel.Column.TYPE.getIndex()) + .setCellRenderer(new TypeCellRenderer()); + fieldsTable + .getColumnModel() + .getColumn(FieldsTableModel.Column.OPTIONS.getIndex()) + .setCellRenderer(new OptionsCellRenderer(dialog, indexOptionsDialogFactory, newFieldList)); return fieldsTable; } @@ -271,28 +302,30 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor JPanel header = new JPanel(); header.setOpaque(false); header.setLayout(new BoxLayout(header, BoxLayout.PAGE_AXIS)); - String[] typeList = new String[]{ - "TextField", - "StringField", - "IntPoint", - "LongPoint", - "FloatPoint", - "DoublePoint", - "SortedDocValuesField", - "SortedSetDocValuesField", - "NumericDocValuesField", - "SortedNumericDocValuesField", - "StoredField", - "Field" - }; + String[] typeList = + new String[] { + "TextField", + "StringField", + "IntPoint", + "LongPoint", + "FloatPoint", + "DoublePoint", + "SortedDocValuesField", + "SortedSetDocValuesField", + "NumericDocValuesField", + "SortedNumericDocValuesField", + "StoredField", + "Field" + }; JPanel wrapper1 = new JPanel(new FlowLayout(FlowLayout.LEADING)); wrapper1.setOpaque(false); JComboBox typeCombo = new JComboBox<>(typeList); typeCombo.setSelectedItem(typeList[0]); - typeCombo.addActionListener(e -> { - String selected = (String) typeCombo.getSelectedItem(); - descTA.setText(MessageUtils.getLocalizedMessage("help.fieldtype." + selected)); - }); + typeCombo.addActionListener( + e -> { + String selected = (String) typeCombo.getSelectedItem(); + descTA.setText(MessageUtils.getLocalizedMessage("help.fieldtype." + selected)); + }); wrapper1.add(typeCombo); header.add(wrapper1); JPanel wrapper2 = new JPanel(new FlowLayout(FlowLayout.LEADING)); @@ -325,13 +358,21 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor } @SuppressWarnings({"unchecked", "rawtypes"}) - private final Class[] presetFieldClasses = new Class[]{ - TextField.class, StringField.class, - IntPoint.class, LongPoint.class, FloatPoint.class, DoublePoint.class, - SortedDocValuesField.class, SortedSetDocValuesField.class, - NumericDocValuesField.class, SortedNumericDocValuesField.class, - StoredField.class, Field.class - }; + private final Class[] presetFieldClasses = + new Class[] { + TextField.class, + StringField.class, + IntPoint.class, + LongPoint.class, + FloatPoint.class, + DoublePoint.class, + SortedDocValuesField.class, + SortedSetDocValuesField.class, + NumericDocValuesField.class, + SortedNumericDocValuesField.class, + StoredField.class, + Field.class + }; @Override public void setAnalyzer(Analyzer analyzer) { @@ -341,11 +382,12 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor private class ListenerFunctions { void addDocument(ActionEvent e) { - List validFields = newFieldList.stream() - .filter(nf -> !nf.isDeleted()) - .filter(nf -> !StringUtils.isNullOrEmpty(nf.getName())) - .filter(nf -> !StringUtils.isNullOrEmpty(nf.getValue())) - .collect(Collectors.toList()); + List validFields = + newFieldList.stream() + .filter(nf -> !nf.isDeleted()) + .filter(nf -> !StringUtils.isNullOrEmpty(nf.getName())) + .filter(nf -> !StringUtils.isNullOrEmpty(nf.getValue())) + .collect(Collectors.toList()); if (validFields.isEmpty()) { infoTA.setText("Please add one or more fields. Name and Value are both required."); return; @@ -391,12 +433,12 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor constr = nf.getType().getConstructor(String.class, double[].class); double[] values = NumericUtils.convertToDoubleArray(nf.getValue(), false); return constr.newInstance(nf.getName(), values); - } else if (nf.getType().equals(SortedDocValuesField.class) || - nf.getType().equals(SortedSetDocValuesField.class)) { + } else if (nf.getType().equals(SortedDocValuesField.class) + || nf.getType().equals(SortedSetDocValuesField.class)) { constr = nf.getType().getConstructor(String.class, BytesRef.class); return constr.newInstance(nf.getName(), new BytesRef(nf.getValue())); - } else if (nf.getType().equals(NumericDocValuesField.class) || - nf.getType().equals(SortedNumericDocValuesField.class)) { + } else if (nf.getType().equals(NumericDocValuesField.class) + || nf.getType().equals(SortedNumericDocValuesField.class)) { constr = nf.getType().getConstructor(String.class, long.class); long value = NumericUtils.tryConvertToLongValue(nf.getValue()); return constr.newInstance(nf.getName(), value); @@ -414,12 +456,16 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor private void addDocument(Document doc) { try { - Analyzer analyzer = operatorRegistry.get(AnalysisTabOperator.class) - .map(AnalysisTabOperator::getCurrentAnalyzer) - .orElse(new StandardAnalyzer()); + Analyzer analyzer = + operatorRegistry + .get(AnalysisTabOperator.class) + .map(AnalysisTabOperator::getCurrentAnalyzer) + .orElse(new StandardAnalyzer()); toolsModel.addDocument(doc, analyzer); indexHandler.reOpen(); - operatorRegistry.get(DocumentsTabOperator.class).ifPresent(DocumentsTabOperator::displayLatestDoc); + operatorRegistry + .get(DocumentsTabOperator.class) + .ifPresent(DocumentsTabOperator::displayLatestDoc); tabSwitcher.switchTab(TabbedPaneProvider.Tab.DOCUMENTS); infoTA.setText(MessageUtils.getLocalizedMessage("add_document.message.success")); addBtn.setEnabled(false); @@ -432,14 +478,15 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor throw new LukeException(e.getMessage(), e); } } - } private class Observer implements IndexObserver { @Override public void openIndex(LukeState state) { - toolsModel = toolsFactory.newInstance(state.getIndexReader(), state.useCompound(), state.keepAllCommits()); + toolsModel = + toolsFactory.newInstance( + state.getIndexReader(), state.useCompound(), state.keepAllCommits()); } @Override @@ -481,7 +528,6 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor public Class getType() { return type; } - } private final List newFieldList; @@ -533,7 +579,8 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor @SuppressWarnings("unchecked") @Override - public Component getTableCellRendererComponent(JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { + public Component getTableCellRendererComponent( + JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { String simpleName = ((Class) value).getSimpleName(); return new JLabel(simpleName); } @@ -551,7 +598,10 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor private JTable table; - public OptionsCellRenderer(JDialog dialog, IndexOptionsDialogFactory indexOptionsDialogFactory, List newFieldList) { + public OptionsCellRenderer( + JDialog dialog, + IndexOptionsDialogFactory indexOptionsDialogFactory, + List newFieldList) { this.dialog = dialog; this.indexOptionsDialogFactory = indexOptionsDialogFactory; this.newFieldList = newFieldList; @@ -559,7 +609,8 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor @Override @SuppressWarnings("unchecked") - public Component getTableCellRendererComponent(JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { + public Component getTableCellRendererComponent( + JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { if (table != null && this.table != table) { this.table = table; final JTableHeader header = table.getTableHeader(); @@ -569,25 +620,30 @@ public final class AddDocumentDialogFactory implements DialogOpener.DialogFactor panel.add(new JLabel(value.toString())); JLabel optionsLbl = new JLabel("options"); - table.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - int row = table.rowAtPoint(e.getPoint()); - int col = table.columnAtPoint(e.getPoint()); - if (row >= 0 && col == FieldsTableModel.Column.OPTIONS.getIndex()) { - String title = "Index options for:"; - new DialogOpener<>(indexOptionsDialogFactory).open(dialog, title, 500, 500, - (factory) -> { - factory.setNewField(newFieldList.get(row)); - }); - } - } - }); + table.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + int row = table.rowAtPoint(e.getPoint()); + int col = table.columnAtPoint(e.getPoint()); + if (row >= 0 && col == FieldsTableModel.Column.OPTIONS.getIndex()) { + String title = "Index options for:"; + new DialogOpener<>(indexOptionsDialogFactory) + .open( + dialog, + title, + 500, + 500, + (factory) -> { + factory.setNewField(newFieldList.get(row)); + }); + } + } + }); panel.add(FontUtils.toLinkText(optionsLbl)); } } return panel; } - } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogOperator.java index 2c29d6fd5db..caedc682986 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogOperator.java @@ -24,4 +24,3 @@ import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; public interface AddDocumentDialogOperator extends ComponentOperatorRegistry.ComponentOperator { void setAnalyzer(Analyzer analyzer); } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/DocValuesDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/DocValuesDialogFactory.java index 7bea476a606..754d58ef99f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/DocValuesDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/DocValuesDialogFactory.java @@ -17,18 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.documents; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.DefaultComboBoxModel; -import javax.swing.DefaultListModel; -import javax.swing.JButton; -import javax.swing.JComboBox; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JList; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.ListSelectionModel; import java.awt.BorderLayout; import java.awt.Dialog; import java.awt.Dimension; @@ -44,7 +32,18 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.DefaultComboBoxModel; +import javax.swing.DefaultListModel; +import javax.swing.JButton; +import javax.swing.JComboBox; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JList; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.ListSelectionModel; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.util.DialogOpener; @@ -73,7 +72,7 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory private DocValues docValues; - public synchronized static DocValuesDialogFactory getInstance() throws IOException { + public static synchronized DocValuesDialogFactory getInstance() throws IOException { if (instance == null) { instance = new DocValuesDialogFactory(); } @@ -91,14 +90,10 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory DefaultListModel values = new DefaultListModel<>(); if (docValues.getValues().size() > 0) { decodersCombo.setEnabled(false); - docValues.getValues().stream() - .map(BytesRefUtils::decode) - .forEach(values::addElement); + docValues.getValues().stream().map(BytesRefUtils::decode).forEach(values::addElement); } else if (docValues.getNumericValues().size() > 0) { decodersCombo.setEnabled(true); - docValues.getNumericValues().stream() - .map(String::valueOf) - .forEach(values::addElement); + docValues.getNumericValues().stream().map(String::valueOf).forEach(values::addElement); } valueList.setModel(values); @@ -138,7 +133,8 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory JPanel fieldHeader = new JPanel(new FlowLayout(FlowLayout.LEADING, 3, 3)); fieldHeader.setOpaque(false); - fieldHeader.add(new JLabel(MessageUtils.getLocalizedMessage("documents.docvalues.label.doc_values"))); + fieldHeader.add( + new JLabel(MessageUtils.getLocalizedMessage("documents.docvalues.label.doc_values"))); fieldHeader.add(new JLabel(field)); header.add(fieldHeader); @@ -151,7 +147,8 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory JPanel decodeHeader = new JPanel(new FlowLayout(FlowLayout.TRAILING, 3, 3)); decodeHeader.setOpaque(false); decodeHeader.add(new JLabel("decoded as")); - String[] decoders = Arrays.stream(Decoder.values()).map(Decoder::toString).toArray(String[]::new); + String[] decoders = + Arrays.stream(Decoder.values()).map(Decoder::toString).toArray(String[]::new); decodersCombo.setModel(new DefaultComboBoxModel<>(decoders)); decodersCombo.setSelectedItem(Decoder.LONG.toString()); decodersCombo.addActionListener(listeners::selectDecoder); @@ -171,13 +168,9 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory DefaultListModel values = new DefaultListModel<>(); if (docValues.getValues().size() > 0) { - docValues.getValues().stream() - .map(BytesRefUtils::decode) - .forEach(values::addElement); + docValues.getValues().stream().map(BytesRefUtils::decode).forEach(values::addElement); } else { - docValues.getNumericValues().stream() - .map(String::valueOf) - .forEach(values::addElement); + docValues.getNumericValues().stream().map(String::valueOf).forEach(values::addElement); } valueList.setModel(values); @@ -188,7 +181,9 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory JPanel footer = new JPanel(new FlowLayout(FlowLayout.TRAILING, 5, 5)); footer.setOpaque(false); - JButton copyBtn = new JButton(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("button.copy"))); + JButton copyBtn = + new JButton( + FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("button.copy"))); copyBtn.setMargin(new Insets(3, 0, 3, 0)); copyBtn.addActionListener(listeners::copyValues); footer.add(copyBtn); @@ -214,9 +209,7 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory DefaultListModel values = new DefaultListModel<>(); switch (decoder) { case LONG: - docValues.getNumericValues().stream() - .map(String::valueOf) - .forEach(values::addElement); + docValues.getNumericValues().stream().map(String::valueOf).forEach(values::addElement); break; case FLOAT: docValues.getNumericValues().stream() @@ -266,11 +259,11 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory } } - /** doc value decoders */ public enum Decoder { - - LONG("long"), FLOAT("float"), DOUBLE("double"); + LONG("long"), + FLOAT("float"), + DOUBLE("double"); private final String label; @@ -292,5 +285,4 @@ public final class DocValuesDialogFactory implements DialogOpener.DialogFactory throw new IllegalArgumentException("No such decoder: " + label); } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/IndexOptionsDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/IndexOptionsDialogFactory.java index 5366e2d4e00..bed5fbc05b5 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/IndexOptionsDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/IndexOptionsDialogFactory.java @@ -17,6 +17,13 @@ package org.apache.lucene.luke.app.desktop.components.dialog.documents; +import java.awt.Dialog; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.Insets; +import java.awt.Window; +import java.io.IOException; +import java.util.Arrays; import javax.swing.BorderFactory; import javax.swing.BoxLayout; import javax.swing.JButton; @@ -27,14 +34,6 @@ import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JSeparator; import javax.swing.JTextField; -import java.awt.Dialog; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.Insets; -import java.awt.Window; -import java.io.IOException; -import java.util.Arrays; - import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.StringField; @@ -80,7 +79,7 @@ public final class IndexOptionsDialogFactory implements DialogOpener.DialogFacto private NewField nf; - public synchronized static IndexOptionsDialogFactory getInstance() throws IOException { + public static synchronized IndexOptionsDialogFactory getInstance() throws IOException { if (instance == null) { instance = new IndexOptionsDialogFactory(); } @@ -155,7 +154,8 @@ public final class IndexOptionsDialogFactory implements DialogOpener.DialogFacto JPanel inner2 = new JPanel(new FlowLayout(FlowLayout.LEADING, 10, 1)); inner2.setOpaque(false); - JLabel idxOptLbl = new JLabel(MessageUtils.getLocalizedMessage("idx_options.label.index_options")); + JLabel idxOptLbl = + new JLabel(MessageUtils.getLocalizedMessage("idx_options.label.index_options")); inner2.add(idxOptLbl); inner2.add(idxOptCombo); panel.add(inner2); @@ -249,9 +249,9 @@ public final class IndexOptionsDialogFactory implements DialogOpener.DialogFacto dimCountTF.setText(String.valueOf(fieldType.pointDimensionCount())); dimNumBytesTF.setText(String.valueOf(fieldType.pointNumBytes())); - if (nf.getType().equals(org.apache.lucene.document.TextField.class) || - nf.getType().equals(StringField.class) || - nf.getType().equals(Field.class)) { + if (nf.getType().equals(org.apache.lucene.document.TextField.class) + || nf.getType().equals(StringField.class) + || nf.getType().equals(Field.class)) { storedCB.setEnabled(true); } else { storedCB.setEnabled(false); @@ -304,5 +304,4 @@ public final class IndexOptionsDialogFactory implements DialogOpener.DialogFacto private static String[] availableDocValuesType() { return Arrays.stream(DocValuesType.values()).map(DocValuesType::name).toArray(String[]::new); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/StoredValueDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/StoredValueDialogFactory.java index bd179f7e7ad..a919c37dd97 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/StoredValueDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/StoredValueDialogFactory.java @@ -17,13 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.documents; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTextArea; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dialog; @@ -36,7 +29,13 @@ import java.awt.datatransfer.Clipboard; import java.awt.datatransfer.StringSelection; import java.io.IOException; import java.util.Objects; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTextArea; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.util.DialogOpener; @@ -56,7 +55,7 @@ public final class StoredValueDialogFactory implements DialogOpener.DialogFactor private String value; - public synchronized static StoredValueDialogFactory getInstance() throws IOException { + public static synchronized StoredValueDialogFactory getInstance() throws IOException { if (instance == null) { instance = new StoredValueDialogFactory(); } @@ -110,13 +109,16 @@ public final class StoredValueDialogFactory implements DialogOpener.DialogFactor JPanel footer = new JPanel(new FlowLayout(FlowLayout.TRAILING, 5, 5)); footer.setOpaque(false); - JButton copyBtn = new JButton(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("button.copy"))); + JButton copyBtn = + new JButton( + FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("button.copy"))); copyBtn.setMargin(new Insets(3, 3, 3, 3)); - copyBtn.addActionListener(e -> { - Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); - StringSelection selection = new StringSelection(value); - clipboard.setContents(selection, null); - }); + copyBtn.addActionListener( + e -> { + Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); + StringSelection selection = new StringSelection(value); + clipboard.setContents(selection, null); + }); footer.add(copyBtn); JButton closeBtn = new JButton(MessageUtils.getLocalizedMessage("button.close")); @@ -127,6 +129,4 @@ public final class StoredValueDialogFactory implements DialogOpener.DialogFactor return panel; } - - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java index 2e7da587af4..8095c2c62b1 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java @@ -17,14 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.documents; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTable; -import javax.swing.ListSelectionModel; import java.awt.BorderLayout; import java.awt.Dialog; import java.awt.Dimension; @@ -35,7 +27,14 @@ import java.io.IOException; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTable; +import javax.swing.ListSelectionModel; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.components.TableColumnInfo; @@ -58,7 +57,7 @@ public final class TermVectorDialogFactory implements DialogOpener.DialogFactory private List tvEntries; - public synchronized static TermVectorDialogFactory getInstance() throws IOException { + public static synchronized TermVectorDialogFactory getInstance() throws IOException { if (instance == null) { instance = new TermVectorDialogFactory(); } @@ -98,12 +97,20 @@ public final class TermVectorDialogFactory implements DialogOpener.DialogFactory JPanel header = new JPanel(new FlowLayout(FlowLayout.LEADING, 5, 5)); header.setOpaque(false); - header.add(new JLabel(MessageUtils.getLocalizedMessage("documents.termvector.label.term_vector"))); + header.add( + new JLabel(MessageUtils.getLocalizedMessage("documents.termvector.label.term_vector"))); header.add(new JLabel(field)); panel.add(header, BorderLayout.PAGE_START); JTable tvTable = new JTable(); - TableUtils.setupTable(tvTable, ListSelectionModel.SINGLE_SELECTION, new TermVectorTableModel(tvEntries), null, 100, 50, 100); + TableUtils.setupTable( + tvTable, + ListSelectionModel.SINGLE_SELECTION, + new TermVectorTableModel(tvEntries), + null, + 100, + 50, + 100); JScrollPane scrollPane = new JScrollPane(tvTable); panel.add(scrollPane, BorderLayout.CENTER); @@ -121,7 +128,6 @@ public final class TermVectorDialogFactory implements DialogOpener.DialogFactory static final class TermVectorTableModel extends TableModelBase { enum Column implements TableColumnInfo { - TERM("Term", 0, String.class), FREQ("Freq", 1, Long.class), POSITIONS("Positions", 2, String.class), @@ -165,20 +171,27 @@ public final class TermVectorDialogFactory implements DialogOpener.DialogFactory String termText = entry.getTermText(); long freq = tvEntries.get(i).getFreq(); - String positions = String.join(",", - entry.getPositions().stream() - .map(pos -> Integer.toString(pos.getPosition())) - .collect(Collectors.toList())); - String offsets = String.join(",", - entry.getPositions().stream() - .filter(pos -> pos.getStartOffset().isPresent() && pos.getEndOffset().isPresent()) - .map(pos -> Integer.toString(pos.getStartOffset().orElse(-1)) + "-" + Integer.toString(pos.getEndOffset().orElse(-1))) - .collect(Collectors.toList()) - ); + String positions = + String.join( + ",", + entry.getPositions().stream() + .map(pos -> Integer.toString(pos.getPosition())) + .collect(Collectors.toList())); + String offsets = + String.join( + ",", + entry.getPositions().stream() + .filter( + pos -> pos.getStartOffset().isPresent() && pos.getEndOffset().isPresent()) + .map( + pos -> + Integer.toString(pos.getStartOffset().orElse(-1)) + + "-" + + Integer.toString(pos.getEndOffset().orElse(-1))) + .collect(Collectors.toList())); - data[i] = new Object[]{termText, freq, positions, offsets}; + data[i] = new Object[] {termText, freq, positions, offsets}; } - } @Override diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/package-info.java index 9c641f99469..38754396c7f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/package-info.java @@ -16,4 +16,4 @@ */ /** Dialogs used in the Documents tab */ -package org.apache.lucene.luke.app.desktop.components.dialog.documents; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components.dialog.documents; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/AboutDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/AboutDialogFactory.java index e9d9c9731a6..0ac47777b13 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/AboutDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/AboutDialogFactory.java @@ -17,18 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.menubar; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JEditorPane; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.ScrollPaneConstants; -import javax.swing.SwingUtilities; -import javax.swing.event.HyperlinkEvent; -import javax.swing.event.HyperlinkListener; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Desktop; @@ -42,7 +30,18 @@ import java.awt.Window; import java.io.IOException; import java.net.URISyntaxException; import java.util.Objects; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JEditorPane; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.ScrollPaneConstants; +import javax.swing.SwingUtilities; +import javax.swing.event.HyperlinkEvent; +import javax.swing.event.HyperlinkListener; import org.apache.lucene.LucenePackage; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; @@ -62,7 +61,7 @@ public final class AboutDialogFactory implements DialogOpener.DialogFactory { private JDialog dialog; - public synchronized static AboutDialogFactory getInstance() throws IOException { + public static synchronized AboutDialogFactory getInstance() throws IOException { if (instance == null) { instance = new AboutDialogFactory(); } @@ -143,12 +142,17 @@ public final class AboutDialogFactory implements DialogOpener.DialogFactory { editorPane.setText(LICENSE_NOTICE); editorPane.setEditable(false); editorPane.addHyperlinkListener(hyperlinkListener); - JScrollPane scrollPane = new JScrollPane(editorPane, ScrollPaneConstants.VERTICAL_SCROLLBAR_AS_NEEDED, ScrollPaneConstants.HORIZONTAL_SCROLLBAR_NEVER); + JScrollPane scrollPane = + new JScrollPane( + editorPane, + ScrollPaneConstants.VERTICAL_SCROLLBAR_AS_NEEDED, + ScrollPaneConstants.HORIZONTAL_SCROLLBAR_NEVER); scrollPane.setBorder(BorderFactory.createLineBorder(Color.gray)); - SwingUtilities.invokeLater(() -> { - // Set the scroll bar position to top - scrollPane.getVerticalScrollBar().setValue(0); - }); + SwingUtilities.invokeLater( + () -> { + // Set the scroll bar position to top + scrollPane.getVerticalScrollBar().setValue(0); + }); return scrollPane; } @@ -164,37 +168,37 @@ public final class AboutDialogFactory implements DialogOpener.DialogFactory { return panel; } - private static final String LUCENE_IMPLEMENTATION_VERSION = LucenePackage.get().getImplementationVersion(); + private static final String LUCENE_IMPLEMENTATION_VERSION = + LucenePackage.get().getImplementationVersion(); private static final String LICENSE_NOTICE = - "

[Implementation Version]

" + - "

" + (Objects.nonNull(LUCENE_IMPLEMENTATION_VERSION) ? LUCENE_IMPLEMENTATION_VERSION : "") + "

" + - "

[License]

" + - "

Luke is distributed under Apache License Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0) " + - "and includes The Elegant Icon Font (https://www.elegantthemes.com/blog/resources/elegant-icon-font) " + - "licensed under MIT (https://opensource.org/licenses/MIT)

" + - "

[Brief history]

" + - "
    " + - "
  • The original author is Andrzej Bialecki
  • " + - "
  • The project has been mavenized by Neil Ireson
  • " + - "
  • The project has been ported to Lucene trunk (marked as 5.0 at the time) by Dmitry Kan\n
  • " + - "
  • The project has been back-ported to Lucene 4.3 by sonarname
  • " + - "
  • There are updates to the (non-mavenized) project done by tarzanek
  • " + - "
  • The UI and core components has been re-implemented on top of Swing by Tomoko Uchida
  • " + - "
" - ; - - - private static final HyperlinkListener hyperlinkListener = e -> { - if (e.getEventType() == HyperlinkEvent.EventType.ACTIVATED) - if (Desktop.isDesktopSupported()) { - try { - Desktop.getDesktop().browse(e.getURL().toURI()); - } catch (IOException | URISyntaxException ex) { - throw new LukeException(ex.getMessage(), ex); - } - } - }; - + "

[Implementation Version]

" + + "

" + + (Objects.nonNull(LUCENE_IMPLEMENTATION_VERSION) ? LUCENE_IMPLEMENTATION_VERSION : "") + + "

" + + "

[License]

" + + "

Luke is distributed under Apache License Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0) " + + "and includes The Elegant Icon Font (https://www.elegantthemes.com/blog/resources/elegant-icon-font) " + + "licensed under MIT (https://opensource.org/licenses/MIT)

" + + "

[Brief history]

" + + "
    " + + "
  • The original author is Andrzej Bialecki
  • " + + "
  • The project has been mavenized by Neil Ireson
  • " + + "
  • The project has been ported to Lucene trunk (marked as 5.0 at the time) by Dmitry Kan\n
  • " + + "
  • The project has been back-ported to Lucene 4.3 by sonarname
  • " + + "
  • There are updates to the (non-mavenized) project done by tarzanek
  • " + + "
  • The UI and core components has been re-implemented on top of Swing by Tomoko Uchida
  • " + + "
"; + private static final HyperlinkListener hyperlinkListener = + e -> { + if (e.getEventType() == HyperlinkEvent.EventType.ACTIVATED) + if (Desktop.isDesktopSupported()) { + try { + Desktop.getDesktop().browse(e.getURL().toURI()); + } catch (IOException | URISyntaxException ex) { + throw new LukeException(ex.getMessage(), ex); + } + } + }; } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/CheckIndexDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/CheckIndexDialogFactory.java index 17c13076d1b..ba058c59b79 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/CheckIndexDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/CheckIndexDialogFactory.java @@ -17,16 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.menubar; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JSeparator; -import javax.swing.JTextArea; -import javax.swing.SwingWorker; import java.awt.BorderLayout; import java.awt.Dialog; import java.awt.Dimension; @@ -39,7 +29,16 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JSeparator; +import javax.swing.JTextArea; +import javax.swing.SwingWorker; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.luke.app.DirectoryHandler; @@ -95,7 +94,7 @@ public final class CheckIndexDialogFactory implements DialogOpener.DialogFactory private final ListenerFunctions listeners = new ListenerFunctions(); - public synchronized static CheckIndexDialogFactory getInstance() throws IOException { + public static synchronized CheckIndexDialogFactory getInstance() throws IOException { if (instance == null) { instance = new CheckIndexDialogFactory(); } @@ -115,7 +114,9 @@ public final class CheckIndexDialogFactory implements DialogOpener.DialogFactory } private void initialize() { - repairBtn.setText(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("checkidx.button.fix"))); + repairBtn.setText( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("checkidx.button.fix"))); repairBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); repairBtn.setMargin(new Insets(3, 3, 3, 3)); repairBtn.setEnabled(false); @@ -126,7 +127,6 @@ public final class CheckIndexDialogFactory implements DialogOpener.DialogFactory logArea.setEditable(false); } - @Override public JDialog create(Window owner, String title, int width, int height) { dialog = new JDialog(owner, title, Dialog.ModalityType.APPLICATION_MODAL); @@ -171,7 +171,10 @@ public final class CheckIndexDialogFactory implements DialogOpener.DialogFactory JPanel execButtons = new JPanel(new FlowLayout(FlowLayout.TRAILING)); execButtons.setOpaque(false); - JButton checkBtn = new JButton(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("checkidx.button.check"))); + JButton checkBtn = + new JButton( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("checkidx.button.check"))); checkBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); checkBtn.setMargin(new Insets(3, 0, 3, 0)); checkBtn.addActionListener(listeners::checkIndex); @@ -199,7 +202,8 @@ public final class CheckIndexDialogFactory implements DialogOpener.DialogFactory repair.setOpaque(false); repair.add(repairBtn); - JTextArea warnArea = new JTextArea(MessageUtils.getLocalizedMessage("checkidx.label.warn"), 3, 30); + JTextArea warnArea = + new JTextArea(MessageUtils.getLocalizedMessage("checkidx.label.warn"), 3, 30); warnArea.setLineWrap(true); warnArea.setEditable(false); warnArea.setBorder(BorderFactory.createEmptyBorder(5, 5, 5, 5)); @@ -234,7 +238,9 @@ public final class CheckIndexDialogFactory implements DialogOpener.DialogFactory @Override public void openIndex(LukeState state) { lukeState = state; - toolsModel = indexToolsFactory.newInstance(state.getIndexReader(), state.useCompound(), state.keepAllCommits()); + toolsModel = + indexToolsFactory.newInstance( + state.getIndexReader(), state.useCompound(), state.keepAllCommits()); } @Override @@ -261,46 +267,48 @@ public final class CheckIndexDialogFactory implements DialogOpener.DialogFactory private class ListenerFunctions { void checkIndex(ActionEvent e) { - ExecutorService executor = Executors.newFixedThreadPool(1, new NamedThreadFactory("check-index-dialog-check")); + ExecutorService executor = + Executors.newFixedThreadPool(1, new NamedThreadFactory("check-index-dialog-check")); - SwingWorker task = new SwingWorker() { + SwingWorker task = + new SwingWorker() { - @Override - protected CheckIndex.Status doInBackground() { - setProgress(0); - statusLbl.setText("Running..."); - indicatorLbl.setVisible(true); - TextAreaPrintStream ps; - try { - ps = new TextAreaPrintStream(logArea); - CheckIndex.Status status = toolsModel.checkIndex(ps); - ps.flush(); - return status; - } catch (Exception e) { - statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); - throw e; - } finally { - setProgress(100); - } - } - - @Override - protected void done() { - try { - CheckIndex.Status st = get(); - resultLbl.setText(createResultsMessage(st)); - indicatorLbl.setVisible(false); - statusLbl.setText("Done"); - if (!st.clean) { - repairBtn.setEnabled(true); + @Override + protected CheckIndex.Status doInBackground() { + setProgress(0); + statusLbl.setText("Running..."); + indicatorLbl.setVisible(true); + TextAreaPrintStream ps; + try { + ps = new TextAreaPrintStream(logArea); + CheckIndex.Status status = toolsModel.checkIndex(ps); + ps.flush(); + return status; + } catch (Exception e) { + statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); + throw e; + } finally { + setProgress(100); + } } - status = st; - } catch (Exception e) { - log.error("Error checking index", e); - statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); - } - } - }; + + @Override + protected void done() { + try { + CheckIndex.Status st = get(); + resultLbl.setText(createResultsMessage(st)); + indicatorLbl.setVisible(false); + statusLbl.setText("Done"); + if (!st.clean) { + repairBtn.setEnabled(true); + } + status = st; + } catch (Exception e) { + log.error("Error checking index", e); + statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); + } + } + }; executor.submit(task); executor.shutdown(); @@ -337,44 +345,45 @@ public final class CheckIndexDialogFactory implements DialogOpener.DialogFactory return; } - ExecutorService executor = Executors.newFixedThreadPool(1, new NamedThreadFactory("check-index-dialog-repair")); + ExecutorService executor = + Executors.newFixedThreadPool(1, new NamedThreadFactory("check-index-dialog-repair")); - SwingWorker task = new SwingWorker() { + SwingWorker task = + new SwingWorker() { - @Override - protected CheckIndex.Status doInBackground() { - setProgress(0); - statusLbl.setText("Running..."); - indicatorLbl.setVisible(true); - logArea.setText(""); - TextAreaPrintStream ps; - try { - ps = new TextAreaPrintStream(logArea); - toolsModel.repairIndex(status, ps); - statusLbl.setText("Done"); - ps.flush(); - return status; - } catch (Exception e) { - statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); - throw e; - } finally { - setProgress(100); - } - } + @Override + protected CheckIndex.Status doInBackground() { + setProgress(0); + statusLbl.setText("Running..."); + indicatorLbl.setVisible(true); + logArea.setText(""); + TextAreaPrintStream ps; + try { + ps = new TextAreaPrintStream(logArea); + toolsModel.repairIndex(status, ps); + statusLbl.setText("Done"); + ps.flush(); + return status; + } catch (Exception e) { + statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); + throw e; + } finally { + setProgress(100); + } + } - @Override - protected void done() { - indexHandler.open(lukeState.getIndexPath(), lukeState.getDirImpl()); - logArea.append("Repairing index done."); - resultLbl.setText(""); - indicatorLbl.setVisible(false); - repairBtn.setEnabled(false); - } - }; + @Override + protected void done() { + indexHandler.open(lukeState.getIndexPath(), lukeState.getDirImpl()); + logArea.append("Repairing index done."); + resultLbl.setText(""); + indicatorLbl.setVisible(false); + repairBtn.setEnabled(false); + } + }; executor.submit(task); executor.shutdown(); } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/CreateIndexDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/CreateIndexDialogFactory.java index 03c6262af7c..e6bdc78ee61 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/CreateIndexDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/CreateIndexDialogFactory.java @@ -17,18 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.menubar; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JFileChooser; -import javax.swing.JLabel; -import javax.swing.JOptionPane; -import javax.swing.JPanel; -import javax.swing.JSeparator; -import javax.swing.JTextArea; -import javax.swing.JTextField; -import javax.swing.SwingWorker; import java.awt.BorderLayout; import java.awt.Dialog; import java.awt.Dimension; @@ -47,7 +35,18 @@ import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JFileChooser; +import javax.swing.JLabel; +import javax.swing.JOptionPane; +import javax.swing.JPanel; +import javax.swing.JSeparator; +import javax.swing.JTextArea; +import javax.swing.JTextField; +import javax.swing.SwingWorker; import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.app.IndexHandler; import org.apache.lucene.luke.app.desktop.Preferences; @@ -99,14 +98,14 @@ public class CreateIndexDialogFactory implements DialogOpener.DialogFactory { private JDialog dialog; - public synchronized static CreateIndexDialogFactory getInstance() throws IOException { + public static synchronized CreateIndexDialogFactory getInstance() throws IOException { if (instance == null) { instance = new CreateIndexDialogFactory(); } return instance; } - private CreateIndexDialogFactory() throws IOException { + private CreateIndexDialogFactory() throws IOException { this.prefs = PreferencesFactory.getInstance(); this.indexHandler = IndexHandler.getInstance(); initialize(); @@ -117,7 +116,8 @@ public class CreateIndexDialogFactory implements DialogOpener.DialogFactory { locationTF.setText(System.getProperty("user.home")); locationTF.setEditable(false); - browseBtn.setText(FontUtils.elegantIconHtml("n", MessageUtils.getLocalizedMessage("button.browse"))); + browseBtn.setText( + FontUtils.elegantIconHtml("n", MessageUtils.getLocalizedMessage("button.browse"))); browseBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); browseBtn.setPreferredSize(new Dimension(120, 30)); browseBtn.addActionListener(listeners::browseLocationDirectory); @@ -131,7 +131,8 @@ public class CreateIndexDialogFactory implements DialogOpener.DialogFactory { clearBtn.setPreferredSize(new Dimension(70, 30)); clearBtn.addActionListener(listeners::clearDataDir); - dataBrowseBtn.setText(FontUtils.elegantIconHtml("n", MessageUtils.getLocalizedMessage("button.browse"))); + dataBrowseBtn.setText( + FontUtils.elegantIconHtml("n", MessageUtils.getLocalizedMessage("button.browse"))); dataBrowseBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); dataBrowseBtn.setPreferredSize(new Dimension(100, 30)); dataBrowseBtn.addActionListener(listeners::browseDataDirectory); @@ -205,7 +206,8 @@ public class CreateIndexDialogFactory implements DialogOpener.DialogFactory { name.add(nameLbl); description.add(name); - JTextArea descTA1 = new JTextArea(MessageUtils.getLocalizedMessage("createindex.textarea.data_help1")); + JTextArea descTA1 = + new JTextArea(MessageUtils.getLocalizedMessage("createindex.textarea.data_help1")); descTA1.setPreferredSize(new Dimension(550, 20)); descTA1.setBorder(BorderFactory.createEmptyBorder(2, 10, 10, 5)); descTA1.setOpaque(false); @@ -215,11 +217,14 @@ public class CreateIndexDialogFactory implements DialogOpener.DialogFactory { JPanel link = new JPanel(new FlowLayout(FlowLayout.LEADING, 10, 1)); link.setOpaque(false); - JLabel linkLbl = FontUtils.toLinkText(new URLLabel(MessageUtils.getLocalizedMessage("createindex.label.data_link"))); + JLabel linkLbl = + FontUtils.toLinkText( + new URLLabel(MessageUtils.getLocalizedMessage("createindex.label.data_link"))); link.add(linkLbl); description.add(link); - JTextArea descTA2 = new JTextArea(MessageUtils.getLocalizedMessage("createindex.textarea.data_help2")); + JTextArea descTA2 = + new JTextArea(MessageUtils.getLocalizedMessage("createindex.textarea.data_help2")); descTA2.setPreferredSize(new Dimension(550, 50)); descTA2.setBorder(BorderFactory.createEmptyBorder(10, 10, 10, 5)); descTA2.setOpaque(false); @@ -279,66 +284,73 @@ public class CreateIndexDialogFactory implements DialogOpener.DialogFactory { Path path = Paths.get(locationTF.getText(), dirnameTF.getText()); if (Files.exists(path)) { String message = "The directory " + path.toAbsolutePath().toString() + " already exists."; - JOptionPane.showMessageDialog(dialog, message, "Empty index path", JOptionPane.ERROR_MESSAGE); + JOptionPane.showMessageDialog( + dialog, message, "Empty index path", JOptionPane.ERROR_MESSAGE); } else { // create new index asynchronously - ExecutorService executor = Executors.newFixedThreadPool(1, new NamedThreadFactory("create-index-dialog")); + ExecutorService executor = + Executors.newFixedThreadPool(1, new NamedThreadFactory("create-index-dialog")); - SwingWorker task = new SwingWorker() { + SwingWorker task = + new SwingWorker() { - @Override - protected Void doInBackground() throws Exception { - setProgress(0); - indicatorLbl.setVisible(true); - createBtn.setEnabled(false); + @Override + protected Void doInBackground() throws Exception { + setProgress(0); + indicatorLbl.setVisible(true); + createBtn.setEnabled(false); - try { - Directory dir = FSDirectory.open(path); - IndexTools toolsModel = new IndexToolsFactory().newInstance(dir); + try { + Directory dir = FSDirectory.open(path); + IndexTools toolsModel = new IndexToolsFactory().newInstance(dir); - if (dataDirTF.getText().isEmpty()) { - // without sample documents - toolsModel.createNewIndex(); - } else { - // with sample documents - Path dataPath = Paths.get(dataDirTF.getText()); - toolsModel.createNewIndex(dataPath.toAbsolutePath().toString()); - } - - indexHandler.open(path.toAbsolutePath().toString(), null, false, false, false); - prefs.addHistory(path.toAbsolutePath().toString()); - - dirnameTF.setText(""); - closeDialog(); - } catch (Exception ex) { - // cleanup - try { - Files.walkFileTree(path, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - Files.delete(file); - return FileVisitResult.CONTINUE; + if (dataDirTF.getText().isEmpty()) { + // without sample documents + toolsModel.createNewIndex(); + } else { + // with sample documents + Path dataPath = Paths.get(dataDirTF.getText()); + toolsModel.createNewIndex(dataPath.toAbsolutePath().toString()); } - }); - Files.deleteIfExists(path); - } catch (IOException ex2) { + + indexHandler.open(path.toAbsolutePath().toString(), null, false, false, false); + prefs.addHistory(path.toAbsolutePath().toString()); + + dirnameTF.setText(""); + closeDialog(); + } catch (Exception ex) { + // cleanup + try { + Files.walkFileTree( + path, + new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) + throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + }); + Files.deleteIfExists(path); + } catch (IOException ex2) { + } + + log.error("Cannot create index", ex); + String message = "See Logs tab or log file for more details."; + JOptionPane.showMessageDialog( + dialog, message, "Cannot create index", JOptionPane.ERROR_MESSAGE); + } finally { + setProgress(100); + } + return null; } - log.error("Cannot create index", ex); - String message = "See Logs tab or log file for more details."; - JOptionPane.showMessageDialog(dialog, message, "Cannot create index", JOptionPane.ERROR_MESSAGE); - } finally { - setProgress(100); - } - return null; - } - - @Override - protected void done() { - indicatorLbl.setVisible(false); - createBtn.setEnabled(true); - } - }; + @Override + protected void done() { + indicatorLbl.setVisible(false); + createBtn.setEnabled(true); + } + }; executor.submit(task); executor.shutdown(); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/ExportTermsDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/ExportTermsDialogFactory.java index c091bea5c47..87377e3bac8 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/ExportTermsDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/ExportTermsDialogFactory.java @@ -17,16 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.menubar; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JComboBox; -import javax.swing.JDialog; -import javax.swing.JFileChooser; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JTextField; -import javax.swing.SwingWorker; import java.awt.Color; import java.awt.Dialog; import java.awt.Dimension; @@ -42,7 +32,16 @@ import java.util.Arrays; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.stream.Stream; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JComboBox; +import javax.swing.JDialog; +import javax.swing.JFileChooser; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JTextField; +import javax.swing.SwingWorker; import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.app.IndexHandler; import org.apache.lucene.luke.app.IndexObserver; @@ -61,9 +60,7 @@ import org.apache.lucene.luke.util.LoggerFactory; import org.apache.lucene.util.NamedThreadFactory; import org.apache.lucene.util.SuppressForbidden; -/** - * Factory of export terms dialog - */ +/** Factory of export terms dialog */ public final class ExportTermsDialogFactory implements DialogOpener.DialogFactory { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -94,7 +91,7 @@ public final class ExportTermsDialogFactory implements DialogOpener.DialogFactor private String selectedDelimiter; - public synchronized static ExportTermsDialogFactory getInstance() throws IOException { + public static synchronized ExportTermsDialogFactory getInstance() throws IOException { if (instance == null) { instance = new ExportTermsDialogFactory(); } @@ -105,8 +102,9 @@ public final class ExportTermsDialogFactory implements DialogOpener.DialogFactor this.prefs = PreferencesFactory.getInstance(); this.indexHandler = IndexHandler.getInstance(); indexHandler.addObserver(new Observer()); - Stream.of(Delimiter.values()).forEachOrdered(delimiterVal -> delimiterCombo.addItem(delimiterVal.getDescription())); - delimiterCombo.setSelectedItem(Delimiter.COMMA.getDescription());//Set default delimiter + Stream.of(Delimiter.values()) + .forEachOrdered(delimiterVal -> delimiterCombo.addItem(delimiterVal.getDescription())); + delimiterCombo.setSelectedItem(Delimiter.COMMA.getDescription()); // Set default delimiter } @Override @@ -230,57 +228,68 @@ public final class ExportTermsDialogFactory implements DialogOpener.DialogFactor } void export(ActionEvent e) { - ExecutorService executor = Executors.newSingleThreadExecutor(new NamedThreadFactory("export-terms-dialog")); + ExecutorService executor = + Executors.newSingleThreadExecutor(new NamedThreadFactory("export-terms-dialog")); - SwingWorker task = new SwingWorker() { + SwingWorker task = + new SwingWorker() { - String filename; + String filename; - @Override - protected Void doInBackground() { - setProgress(0); - statusLbl.setText("Exporting..."); - indicatorLbl.setVisible(true); - String field = (String) fieldCombo.getSelectedItem(); - selectedDelimiter = Delimiter.getSelectedDelimiterValue((String) delimiterCombo.getSelectedItem()); + @Override + protected Void doInBackground() { + setProgress(0); + statusLbl.setText("Exporting..."); + indicatorLbl.setVisible(true); + String field = (String) fieldCombo.getSelectedItem(); + selectedDelimiter = + Delimiter.getSelectedDelimiterValue((String) delimiterCombo.getSelectedItem()); - String directory = destDir.getText(); - try { - filename = toolsModel.exportTerms(directory, field, selectedDelimiter); - } catch (LukeException e) { - log.error("Error while exporting terms from field {}", field, e); - statusLbl.setText(MessageUtils.getLocalizedMessage("export.terms.label.error", e.getMessage())); - } catch (Exception e) { - log.error("Error while exporting terms from field {}", field, e); - statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); - throw e; - } finally { - setProgress(100); - } - return null; - } + String directory = destDir.getText(); + try { + filename = toolsModel.exportTerms(directory, field, selectedDelimiter); + } catch (LukeException e) { + log.error("Error while exporting terms from field {}", field, e); + statusLbl.setText( + MessageUtils.getLocalizedMessage("export.terms.label.error", e.getMessage())); + } catch (Exception e) { + log.error("Error while exporting terms from field {}", field, e); + statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); + throw e; + } finally { + setProgress(100); + } + return null; + } - @Override - protected void done() { - indicatorLbl.setVisible(false); - if (filename != null) { - statusLbl.setText(MessageUtils.getLocalizedMessage("export.terms.label.success", filename, "[term]" + selectedDelimiter + "[doc frequency]")); - } - } - }; + @Override + protected void done() { + indicatorLbl.setVisible(false); + if (filename != null) { + statusLbl.setText( + MessageUtils.getLocalizedMessage( + "export.terms.label.success", + filename, + "[term]" + selectedDelimiter + "[doc frequency]")); + } + } + }; executor.submit(task); executor.shutdown(); } - } private class Observer implements IndexObserver { @Override public void openIndex(LukeState state) { - toolsModel = indexToolsFactory.newInstance(state.getIndexReader(), state.useCompound(), state.keepAllCommits()); - IndexUtils.getFieldNames(state.getIndexReader()).stream().sorted().forEach(fieldCombo::addItem); + toolsModel = + indexToolsFactory.newInstance( + state.getIndexReader(), state.useCompound(), state.keepAllCommits()); + IndexUtils.getFieldNames(state.getIndexReader()).stream() + .sorted() + .forEach(fieldCombo::addItem); } @Override @@ -288,14 +297,13 @@ public final class ExportTermsDialogFactory implements DialogOpener.DialogFactor fieldCombo.removeAllItems(); toolsModel = null; } - } - /** - * Delimiters that can be selected - */ + /** Delimiters that can be selected */ private enum Delimiter { - COMMA("Comma", ","), WHITESPACE("Whitespace", " "), TAB("Tab", "\t"); + COMMA("Comma", ","), + WHITESPACE("Whitespace", " "), + TAB("Tab", "\t"); private final String description; private final String separator; @@ -321,5 +329,4 @@ public final class ExportTermsDialogFactory implements DialogOpener.DialogFactor .getSeparator(); } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OpenIndexDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OpenIndexDialogFactory.java index 7dfe3c8e26d..8ed2bd825af 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OpenIndexDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OpenIndexDialogFactory.java @@ -17,19 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.menubar; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.ButtonGroup; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JComboBox; -import javax.swing.JDialog; -import javax.swing.JFileChooser; -import javax.swing.JLabel; -import javax.swing.JOptionPane; -import javax.swing.JPanel; -import javax.swing.JRadioButton; -import javax.swing.JSeparator; import java.awt.Dialog; import java.awt.Dimension; import java.awt.FlowLayout; @@ -48,7 +35,19 @@ import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.stream.Collectors; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.ButtonGroup; +import javax.swing.JButton; +import javax.swing.JCheckBox; +import javax.swing.JComboBox; +import javax.swing.JDialog; +import javax.swing.JFileChooser; +import javax.swing.JLabel; +import javax.swing.JOptionPane; +import javax.swing.JPanel; +import javax.swing.JRadioButton; +import javax.swing.JSeparator; import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.app.DirectoryHandler; import org.apache.lucene.luke.app.IndexHandler; @@ -98,7 +97,7 @@ public final class OpenIndexDialogFactory implements DialogOpener.DialogFactory private JDialog dialog; - public synchronized static OpenIndexDialogFactory getInstance() throws IOException { + public static synchronized OpenIndexDialogFactory getInstance() throws IOException { if (instance == null) { instance = new OpenIndexDialogFactory(); } @@ -115,7 +114,8 @@ public final class OpenIndexDialogFactory implements DialogOpener.DialogFactory private void initialize() { idxPathCombo.setPreferredSize(new Dimension(360, 40)); - browseBtn.setText(FontUtils.elegantIconHtml("n", MessageUtils.getLocalizedMessage("button.browse"))); + browseBtn.setText( + FontUtils.elegantIconHtml("n", MessageUtils.getLocalizedMessage("button.browse"))); browseBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); browseBtn.setPreferredSize(new Dimension(120, 40)); browseBtn.addActionListener(listeners::browseDirectory); @@ -126,12 +126,14 @@ public final class OpenIndexDialogFactory implements DialogOpener.DialogFactory readOnlyCB.setOpaque(false); // Scanning all Directory types will take time... - ExecutorService executorService = Executors.newFixedThreadPool(1, new NamedThreadFactory("load-directory-types")); - executorService.execute(() -> { - for (String clazzName : supportedDirImpls()) { - dirImplCombo.addItem(clazzName); - } - }); + ExecutorService executorService = + Executors.newFixedThreadPool(1, new NamedThreadFactory("load-directory-types")); + executorService.execute( + () -> { + for (String clazzName : supportedDirImpls()) { + dirImplCombo.addItem(clazzName); + } + }); executorService.shutdown(); dirImplCombo.setPreferredSize(new Dimension(350, 30)); dirImplCombo.setSelectedItem(prefs.getDirImpl()); @@ -144,14 +146,14 @@ public final class OpenIndexDialogFactory implements DialogOpener.DialogFactory useCompoundCB.setSelected(prefs.isUseCompound()); useCompoundCB.setOpaque(false); - keepLastCommitRB.setText(MessageUtils.getLocalizedMessage("openindex.radio.keep_only_last_commit")); + keepLastCommitRB.setText( + MessageUtils.getLocalizedMessage("openindex.radio.keep_only_last_commit")); keepLastCommitRB.setSelected(!prefs.isKeepAllCommits()); keepLastCommitRB.setOpaque(false); keepAllCommitsRB.setText(MessageUtils.getLocalizedMessage("openindex.radio.keep_all_commits")); keepAllCommitsRB.setSelected(prefs.isKeepAllCommits()); keepAllCommitsRB.setOpaque(false); - } @Override @@ -334,24 +336,32 @@ public final class OpenIndexDialogFactory implements DialogOpener.DialogFactory String selectedPath = (String) idxPathCombo.getSelectedItem(); String dirImplClazz = (String) dirImplCombo.getSelectedItem(); if (selectedPath == null || selectedPath.length() == 0) { - String message = MessageUtils.getLocalizedMessage("openindex.message.index_path_not_selected"); - JOptionPane.showMessageDialog(dialog, message, "Empty index path", JOptionPane.ERROR_MESSAGE); + String message = + MessageUtils.getLocalizedMessage("openindex.message.index_path_not_selected"); + JOptionPane.showMessageDialog( + dialog, message, "Empty index path", JOptionPane.ERROR_MESSAGE); } else if (isNoReader()) { directoryHandler.open(selectedPath, dirImplClazz); addHistory(selectedPath); } else { - indexHandler.open(selectedPath, dirImplClazz, isReadOnly(), useCompound(), keepAllCommits()); + indexHandler.open( + selectedPath, dirImplClazz, isReadOnly(), useCompound(), keepAllCommits()); addHistory(selectedPath); } prefs.setIndexOpenerPrefs( - isReadOnly(), dirImplClazz, - isNoReader(), useCompound(), keepAllCommits()); + isReadOnly(), dirImplClazz, isNoReader(), useCompound(), keepAllCommits()); closeDialog(); } catch (LukeException ex) { - String message = ex.getMessage() + System.lineSeparator() + "See Logs tab or log file for more details."; - JOptionPane.showMessageDialog(dialog, message, "Invalid index path", JOptionPane.ERROR_MESSAGE); + String message = + ex.getMessage() + System.lineSeparator() + "See Logs tab or log file for more details."; + JOptionPane.showMessageDialog( + dialog, message, "Invalid index path", JOptionPane.ERROR_MESSAGE); } catch (Throwable cause) { - JOptionPane.showMessageDialog(dialog, MessageUtils.getLocalizedMessage("message.error.unknown"), "Unknown Error", JOptionPane.ERROR_MESSAGE); + JOptionPane.showMessageDialog( + dialog, + MessageUtils.getLocalizedMessage("message.error.unknown"), + "Unknown Error", + JOptionPane.ERROR_MESSAGE); log.error("Error opening index or directory", cause); } } @@ -379,7 +389,5 @@ public final class OpenIndexDialogFactory implements DialogOpener.DialogFactory private void addHistory(String indexPath) throws IOException { prefs.addHistory(indexPath); } - } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OptimizeIndexDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OptimizeIndexDialogFactory.java index 947525c8278..238961321b6 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OptimizeIndexDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OptimizeIndexDialogFactory.java @@ -17,6 +17,18 @@ package org.apache.lucene.luke.app.desktop.components.dialog.menubar; +import java.awt.BorderLayout; +import java.awt.Dialog; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.GridLayout; +import java.awt.Insets; +import java.awt.Window; +import java.awt.event.ActionEvent; +import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import javax.swing.BorderFactory; import javax.swing.BoxLayout; import javax.swing.JButton; @@ -30,19 +42,6 @@ import javax.swing.JSpinner; import javax.swing.JTextArea; import javax.swing.SpinnerNumberModel; import javax.swing.SwingWorker; -import java.awt.BorderLayout; -import java.awt.Dialog; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.GridLayout; -import java.awt.Insets; -import java.awt.Window; -import java.awt.event.ActionEvent; -import java.io.IOException; -import java.lang.invoke.MethodHandles; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.app.IndexHandler; import org.apache.lucene.luke.app.IndexObserver; @@ -89,7 +88,7 @@ public final class OptimizeIndexDialogFactory implements DialogOpener.DialogFact private IndexTools toolsModel; - public synchronized static OptimizeIndexDialogFactory getInstance() throws IOException { + public static synchronized OptimizeIndexDialogFactory getInstance() throws IOException { if (instance == null) { instance = new OptimizeIndexDialogFactory(); } @@ -165,7 +164,10 @@ public final class OptimizeIndexDialogFactory implements DialogOpener.DialogFact JPanel execButtons = new JPanel(new FlowLayout(FlowLayout.TRAILING)); execButtons.setOpaque(false); - JButton optimizeBtn = new JButton(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("optimize.button.optimize"))); + JButton optimizeBtn = + new JButton( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("optimize.button.optimize"))); optimizeBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); optimizeBtn.setMargin(new Insets(3, 0, 3, 0)); optimizeBtn.addActionListener(listeners::optimize); @@ -206,55 +208,56 @@ public final class OptimizeIndexDialogFactory implements DialogOpener.DialogFact private class ListenerFunctions { void optimize(ActionEvent e) { - ExecutorService executor = Executors.newFixedThreadPool(1, new NamedThreadFactory("optimize-index-dialog")); + ExecutorService executor = + Executors.newFixedThreadPool(1, new NamedThreadFactory("optimize-index-dialog")); - SwingWorker task = new SwingWorker() { + SwingWorker task = + new SwingWorker() { - @Override - protected Void doInBackground() { - setProgress(0); - statusLbl.setText("Running..."); - indicatorLbl.setVisible(true); - TextAreaPrintStream ps; - try { - ps = new TextAreaPrintStream(logArea); - toolsModel.optimize(expungeCB.isSelected(), (int) maxSegSpnr.getValue(), ps); - ps.flush(); - } catch (Exception e) { - statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); - throw e; - } finally { - setProgress(100); - } - return null; - } + @Override + protected Void doInBackground() { + setProgress(0); + statusLbl.setText("Running..."); + indicatorLbl.setVisible(true); + TextAreaPrintStream ps; + try { + ps = new TextAreaPrintStream(logArea); + toolsModel.optimize(expungeCB.isSelected(), (int) maxSegSpnr.getValue(), ps); + ps.flush(); + } catch (Exception e) { + statusLbl.setText(MessageUtils.getLocalizedMessage("message.error.unknown")); + throw e; + } finally { + setProgress(100); + } + return null; + } - @Override - protected void done() { - indicatorLbl.setVisible(false); - statusLbl.setText("Done"); - indexHandler.reOpen(); - } - }; + @Override + protected void done() { + indicatorLbl.setVisible(false); + statusLbl.setText("Done"); + indexHandler.reOpen(); + } + }; executor.submit(task); executor.shutdown(); } - } private class Observer implements IndexObserver { @Override public void openIndex(LukeState state) { - toolsModel = indexToolsFactory.newInstance(state.getIndexReader(), state.useCompound(), state.keepAllCommits()); + toolsModel = + indexToolsFactory.newInstance( + state.getIndexReader(), state.useCompound(), state.keepAllCommits()); } @Override public void closeIndex() { toolsModel = null; } - } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/package-info.java index 72a2d3fc7d5..f108adef33d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/package-info.java @@ -16,4 +16,4 @@ */ /** Dialogs used in the menu bar */ -package org.apache.lucene.luke.app.desktop.components.dialog.menubar; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components.dialog.menubar; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/package-info.java index 44ad40b04fd..06ffb2fbf48 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/package-info.java @@ -16,4 +16,4 @@ */ /** Dialogs */ -package org.apache.lucene.luke.app.desktop.components.dialog; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components.dialog; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/search/ExplainDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/search/ExplainDialogFactory.java index 66d558d2866..470d85e0a5d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/search/ExplainDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/search/ExplainDialogFactory.java @@ -17,15 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.dialog.search; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JDialog; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTree; -import javax.swing.tree.DefaultMutableTreeNode; -import javax.swing.tree.DefaultTreeCellRenderer; import java.awt.BorderLayout; import java.awt.Dialog; import java.awt.Dimension; @@ -39,7 +30,15 @@ import java.awt.datatransfer.StringSelection; import java.io.IOException; import java.util.Objects; import java.util.stream.IntStream; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JDialog; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTree; +import javax.swing.tree.DefaultMutableTreeNode; +import javax.swing.tree.DefaultTreeCellRenderer; import org.apache.lucene.luke.app.desktop.Preferences; import org.apache.lucene.luke.app.desktop.PreferencesFactory; import org.apache.lucene.luke.app.desktop.util.DialogOpener; @@ -60,11 +59,11 @@ public final class ExplainDialogFactory implements DialogOpener.DialogFactory { private Explanation explanation; - public synchronized static ExplainDialogFactory getInstance() throws IOException { + public static synchronized ExplainDialogFactory getInstance() throws IOException { if (instance == null) { instance = new ExplainDialogFactory(); } - return instance; + return instance; } private ExplainDialogFactory() throws IOException { @@ -112,13 +111,16 @@ public final class ExplainDialogFactory implements DialogOpener.DialogFactory { JPanel footer = new JPanel(new FlowLayout(FlowLayout.TRAILING, 5, 5)); footer.setOpaque(false); - JButton copyBtn = new JButton(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("button.copy"))); + JButton copyBtn = + new JButton( + FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("button.copy"))); copyBtn.setMargin(new Insets(3, 3, 3, 3)); - copyBtn.addActionListener(e -> { - Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); - StringSelection selection = new StringSelection(explanationToString()); - clipboard.setContents(selection, null); - }); + copyBtn.addActionListener( + e -> { + Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); + StringSelection selection = new StringSelection(explanationToString()); + clipboard.setContents(selection, null); + }); footer.add(copyBtn); JButton closeBtn = new JButton(MessageUtils.getLocalizedMessage("button.close")); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/search/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/search/package-info.java index 7af5fb1f80b..ce6d3b1656e 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/search/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/search/package-info.java @@ -16,4 +16,4 @@ */ /** Dialogs used in the Search tab */ -package org.apache.lucene.luke.app.desktop.components.dialog.search; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components.dialog.search; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelOperator.java index 54451beaae2..abcdb1b4759 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelOperator.java @@ -19,7 +19,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.analysis; import java.util.List; import java.util.Map; - import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.models.analysis.Analysis; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java index 42c7c4dce33..54b6b69eb48 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java @@ -17,17 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.analysis; -import javax.swing.BorderFactory; -import javax.swing.DefaultComboBoxModel; -import javax.swing.JButton; -import javax.swing.JComboBox; -import javax.swing.JFileChooser; -import javax.swing.JLabel; -import javax.swing.JList; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JSeparator; -import javax.swing.JTextField; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dimension; @@ -52,7 +41,17 @@ import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.IntStream; - +import javax.swing.BorderFactory; +import javax.swing.DefaultComboBoxModel; +import javax.swing.JButton; +import javax.swing.JComboBox; +import javax.swing.JFileChooser; +import javax.swing.JLabel; +import javax.swing.JList; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JSeparator; +import javax.swing.JTextField; import org.apache.lucene.luke.app.desktop.MessageBroker; import org.apache.lucene.luke.app.desktop.components.AnalysisTabOperator; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; @@ -91,7 +90,7 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe private final JLabel loadJarLbl = new JLabel(); - private final JList selectedCfList = new JList<>(new String[]{}); + private final JList selectedCfList = new JList<>(new String[] {}); private final JButton cfEditBtn = new JButton(); @@ -103,7 +102,7 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe private final JComboBox tokFactoryCombo = new JComboBox<>(); - private final JList selectedTfList = new JList<>(new String[]{}); + private final JList selectedTfList = new JList<>(new String[] {}); private final JButton tfEditBtn = new JButton(); @@ -156,23 +155,28 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe confDirTF.setColumns(30); confDirTF.setPreferredSize(new Dimension(200, 30)); panel.add(confDirTF); - confDirBtn.setText(FontUtils.elegantIconHtml("n", MessageUtils.getLocalizedMessage("analysis.button.browse"))); + confDirBtn.setText( + FontUtils.elegantIconHtml( + "n", MessageUtils.getLocalizedMessage("analysis.button.browse"))); confDirBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); confDirBtn.setMargin(new Insets(3, 3, 3, 3)); confDirBtn.addActionListener(listeners::chooseConfigDir); panel.add(confDirBtn); - buildBtn.setText(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("analysis.button.build_analyzser"))); + buildBtn.setText( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("analysis.button.build_analyzser"))); buildBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); buildBtn.setMargin(new Insets(3, 3, 3, 3)); buildBtn.addActionListener(listeners::buildAnalyzer); panel.add(buildBtn); loadJarLbl.setText(MessageUtils.getLocalizedMessage("analysis.hyperlink.load_jars")); - loadJarLbl.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - listeners.loadExternalJars(e); - } - }); + loadJarLbl.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + listeners.loadExternalJars(e); + } + }); panel.add(FontUtils.toLinkText(loadJarLbl)); return panel; @@ -202,7 +206,8 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe sepc.gridwidth = GridBagConstraints.REMAINDER; // char filters - JLabel cfLbl = new JLabel(MessageUtils.getLocalizedMessage("analysis_custom.label.charfilters")); + JLabel cfLbl = + new JLabel(MessageUtils.getLocalizedMessage("analysis_custom.label.charfilters")); cfLbl.setBorder(BorderFactory.createEmptyBorder(3, 10, 3, 3)); c.gridx = 0; c.gridy = 0; @@ -234,7 +239,9 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe c.anchor = GridBagConstraints.LINE_END; panel.add(selectedPanel, c); - cfEditBtn.setText(FontUtils.elegantIconHtml("j", MessageUtils.getLocalizedMessage("analysis_custom.label.edit"))); + cfEditBtn.setText( + FontUtils.elegantIconHtml( + "j", MessageUtils.getLocalizedMessage("analysis_custom.label.edit"))); cfEditBtn.setMargin(new Insets(2, 4, 2, 4)); cfEditBtn.setEnabled(false); cfEditBtn.addActionListener(listeners::editCharFilters); @@ -248,7 +255,10 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe c.anchor = GridBagConstraints.CENTER; panel.add(cfEditBtn, c); - JLabel cfAddLabel = new JLabel(FontUtils.elegantIconHtml("L", MessageUtils.getLocalizedMessage("analysis_custom.label.add"))); + JLabel cfAddLabel = + new JLabel( + FontUtils.elegantIconHtml( + "L", MessageUtils.getLocalizedMessage("analysis_custom.label.add"))); cfAddLabel.setHorizontalAlignment(JLabel.LEFT); c.fill = GridBagConstraints.HORIZONTAL; c.gridx = 1; @@ -276,7 +286,8 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe panel.add(new JSeparator(JSeparator.HORIZONTAL), sepc); // tokenizer - JLabel tokLabel = new JLabel(MessageUtils.getLocalizedMessage("analysis_custom.label.tokenizer")); + JLabel tokLabel = + new JLabel(MessageUtils.getLocalizedMessage("analysis_custom.label.tokenizer")); tokLabel.setBorder(BorderFactory.createEmptyBorder(3, 10, 3, 3)); c.gridx = 0; c.gridy = 4; @@ -310,7 +321,9 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe c.anchor = GridBagConstraints.LINE_END; panel.add(selectedTokTF, c); - tokEditBtn.setText(FontUtils.elegantIconHtml("j", MessageUtils.getLocalizedMessage("analysis_custom.label.edit"))); + tokEditBtn.setText( + FontUtils.elegantIconHtml( + "j", MessageUtils.getLocalizedMessage("analysis_custom.label.edit"))); tokEditBtn.setMargin(new Insets(2, 4, 2, 4)); tokEditBtn.addActionListener(listeners::editTokenizer); c.fill = GridBagConstraints.NONE; @@ -323,7 +336,10 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe c.anchor = GridBagConstraints.CENTER; panel.add(tokEditBtn, c); - JLabel setTokLabel = new JLabel(FontUtils.elegantIconHtml("", MessageUtils.getLocalizedMessage("analysis_custom.label.set"))); + JLabel setTokLabel = + new JLabel( + FontUtils.elegantIconHtml( + "", MessageUtils.getLocalizedMessage("analysis_custom.label.set"))); setTokLabel.setHorizontalAlignment(JLabel.LEFT); c.fill = GridBagConstraints.HORIZONTAL; c.gridx = 1; @@ -351,7 +367,8 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe panel.add(new JSeparator(JSeparator.HORIZONTAL), sepc); // token filters - JLabel tfLbl = new JLabel(MessageUtils.getLocalizedMessage("analysis_custom.label.tokenfilters")); + JLabel tfLbl = + new JLabel(MessageUtils.getLocalizedMessage("analysis_custom.label.tokenfilters")); tfLbl.setBorder(BorderFactory.createEmptyBorder(3, 10, 3, 3)); c.gridx = 0; c.gridy = 8; @@ -383,7 +400,9 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe c.anchor = GridBagConstraints.LINE_END; panel.add(selectedTfPanel, c); - tfEditBtn.setText(FontUtils.elegantIconHtml("j", MessageUtils.getLocalizedMessage("analysis_custom.label.edit"))); + tfEditBtn.setText( + FontUtils.elegantIconHtml( + "j", MessageUtils.getLocalizedMessage("analysis_custom.label.edit"))); tfEditBtn.setMargin(new Insets(2, 4, 2, 4)); tfEditBtn.setEnabled(false); tfEditBtn.addActionListener(listeners::editTokenFilters); @@ -397,7 +416,10 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe c.anchor = GridBagConstraints.CENTER; panel.add(tfEditBtn, c); - JLabel tfAddLabel = new JLabel(FontUtils.elegantIconHtml("L", MessageUtils.getLocalizedMessage("analysis_custom.label.add"))); + JLabel tfAddLabel = + new JLabel( + FontUtils.elegantIconHtml( + "L", MessageUtils.getLocalizedMessage("analysis_custom.label.add"))); tfAddLabel.setHorizontalAlignment(JLabel.LEFT); c.fill = GridBagConstraints.HORIZONTAL; c.gridx = 1; @@ -442,15 +464,15 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe int ret = fileChooser.showOpenDialog(containerPanel); if (ret == JFileChooser.APPROVE_OPTION) { File[] files = fileChooser.getSelectedFiles(); - analysisModel.addExternalJars(Arrays.stream(files).map(File::getAbsolutePath).collect(Collectors.toList())); - operatorRegistry.get(CustomAnalyzerPanelOperator.class).ifPresent(operator -> - operator.resetAnalysisComponents() - ); + analysisModel.addExternalJars( + Arrays.stream(files).map(File::getAbsolutePath).collect(Collectors.toList())); + operatorRegistry + .get(CustomAnalyzerPanelOperator.class) + .ifPresent(operator -> operator.resetAnalysisComponents()); messageBroker.showStatusMessage("External jars were added."); } } - private void buildAnalyzer() { List charFilters = ListUtils.getAllItems(selectedCfList); assert charFilters.size() == cfParamsList.size(); @@ -461,24 +483,26 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe String tokenizerName = selectedTokTF.getText(); CustomAnalyzerConfig.Builder builder = new CustomAnalyzerConfig.Builder(tokenizerName, tokParams).configDir(confDirTF.getText()); - IntStream.range(0, charFilters.size()).forEach(i -> - builder.addCharFilterConfig(charFilters.get(i), cfParamsList.get(i)) - ); - IntStream.range(0, tokenFilters.size()).forEach(i -> - builder.addTokenFilterConfig(tokenFilters.get(i), tfParamsList.get(i)) - ); + IntStream.range(0, charFilters.size()) + .forEach(i -> builder.addCharFilterConfig(charFilters.get(i), cfParamsList.get(i))); + IntStream.range(0, tokenFilters.size()) + .forEach(i -> builder.addTokenFilterConfig(tokenFilters.get(i), tfParamsList.get(i))); CustomAnalyzerConfig config = builder.build(); - operatorRegistry.get(AnalysisTabOperator.class).ifPresent(operator -> { - operator.setAnalyzerByCustomConfiguration(config); - messageBroker.showStatusMessage(MessageUtils.getLocalizedMessage("analysis.message.build_success")); - buildBtn.setEnabled(false); - }); - + operatorRegistry + .get(AnalysisTabOperator.class) + .ifPresent( + operator -> { + operator.setAnalyzerByCustomConfiguration(config); + messageBroker.showStatusMessage( + MessageUtils.getLocalizedMessage("analysis.message.build_success")); + buildBtn.setEnabled(false); + }); } private void addCharFilter() { - if (Objects.isNull(cfFactoryCombo.getSelectedItem()) || cfFactoryCombo.getSelectedItem() == "") { + if (Objects.isNull(cfFactoryCombo.getSelectedItem()) + || cfFactoryCombo.getSelectedItem() == "") { return; } @@ -490,8 +514,12 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe assert selectedCfList.getModel().getSize() == cfParamsList.size(); - showEditParamsDialog(MessageUtils.getLocalizedMessage("analysis.dialog.title.char_filter_params"), - EditParamsMode.CHARFILTER, targetIndex, selectedItem, cfParamsList.get(cfParamsList.size() - 1), + showEditParamsDialog( + MessageUtils.getLocalizedMessage("analysis.dialog.title.char_filter_params"), + EditParamsMode.CHARFILTER, + targetIndex, + selectedItem, + cfParamsList.get(cfParamsList.size() - 1), () -> { selectedCfList.setModel(new DefaultComboBoxModel<>(updatedList.toArray(new String[0]))); cfFactoryCombo.setSelectedItem(""); @@ -501,13 +529,18 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe } private void setTokenizer() { - if (Objects.isNull(tokFactoryCombo.getSelectedItem()) || tokFactoryCombo.getSelectedItem() == "") { + if (Objects.isNull(tokFactoryCombo.getSelectedItem()) + || tokFactoryCombo.getSelectedItem() == "") { return; } String selectedItem = (String) tokFactoryCombo.getSelectedItem(); - showEditParamsDialog(MessageUtils.getLocalizedMessage("analysis.dialog.title.tokenizer_params"), - EditParamsMode.TOKENIZER, -1, selectedItem, Collections.emptyMap(), + showEditParamsDialog( + MessageUtils.getLocalizedMessage("analysis.dialog.title.tokenizer_params"), + EditParamsMode.TOKENIZER, + -1, + selectedItem, + Collections.emptyMap(), () -> { selectedTokTF.setText(selectedItem); tokFactoryCombo.setSelectedItem(""); @@ -516,7 +549,8 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe } private void addTokenFilter() { - if (Objects.isNull(tfFactoryCombo.getSelectedItem()) || tfFactoryCombo.getSelectedItem() == "") { + if (Objects.isNull(tfFactoryCombo.getSelectedItem()) + || tfFactoryCombo.getSelectedItem() == "") { return; } @@ -528,30 +562,47 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe assert selectedTfList.getModel().getSize() == tfParamsList.size(); - showEditParamsDialog(MessageUtils.getLocalizedMessage("analysis.dialog.title.token_filter_params"), - EditParamsMode.TOKENFILTER, targetIndex, selectedItem, tfParamsList.get(tfParamsList.size() - 1), + showEditParamsDialog( + MessageUtils.getLocalizedMessage("analysis.dialog.title.token_filter_params"), + EditParamsMode.TOKENFILTER, + targetIndex, + selectedItem, + tfParamsList.get(tfParamsList.size() - 1), () -> { - selectedTfList.setModel(new DefaultComboBoxModel<>(updatedList.toArray(new String[updatedList.size()]))); + selectedTfList.setModel( + new DefaultComboBoxModel<>(updatedList.toArray(new String[updatedList.size()]))); tfFactoryCombo.setSelectedItem(""); tfEditBtn.setEnabled(true); buildBtn.setEnabled(true); }); } - private void showEditParamsDialog(String title, EditParamsMode mode, int targetIndex, String selectedItem, Map params, Callable callback) { - new DialogOpener<>(editParamsDialogFactory).open(title, 400, 300, - (factory) -> { - factory.setMode(mode); - factory.setTargetIndex(targetIndex); - factory.setTarget(selectedItem); - factory.setParams(params); - factory.setCallback(callback); - }); + private void showEditParamsDialog( + String title, + EditParamsMode mode, + int targetIndex, + String selectedItem, + Map params, + Callable callback) { + new DialogOpener<>(editParamsDialogFactory) + .open( + title, + 400, + 300, + (factory) -> { + factory.setMode(mode); + factory.setTargetIndex(targetIndex); + factory.setTarget(selectedItem); + factory.setParams(params); + factory.setCallback(callback); + }); } private void editCharFilters() { List filters = ListUtils.getAllItems(selectedCfList); - showEditFiltersDialog(EditFiltersMode.CHARFILTER, filters, + showEditFiltersDialog( + EditFiltersMode.CHARFILTER, + filters, () -> { cfEditBtn.setEnabled(selectedCfList.getModel().getSize() > 0); buildBtn.setEnabled(true); @@ -560,31 +611,44 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe private void editTokenizer() { String selectedItem = selectedTokTF.getText(); - showEditParamsDialog(MessageUtils.getLocalizedMessage("analysis.dialog.title.tokenizer_params"), - EditParamsMode.TOKENIZER, -1, selectedItem, tokParams, () -> { + showEditParamsDialog( + MessageUtils.getLocalizedMessage("analysis.dialog.title.tokenizer_params"), + EditParamsMode.TOKENIZER, + -1, + selectedItem, + tokParams, + () -> { buildBtn.setEnabled(true); }); } private void editTokenFilters() { List filters = ListUtils.getAllItems(selectedTfList); - showEditFiltersDialog(EditFiltersMode.TOKENFILTER, filters, + showEditFiltersDialog( + EditFiltersMode.TOKENFILTER, + filters, () -> { tfEditBtn.setEnabled(selectedTfList.getModel().getSize() > 0); buildBtn.setEnabled(true); }); } - private void showEditFiltersDialog(EditFiltersMode mode, List selectedFilters, Callable callback) { - String title = (mode == EditFiltersMode.CHARFILTER) ? - MessageUtils.getLocalizedMessage("analysis.dialog.title.selected_char_filter") : - MessageUtils.getLocalizedMessage("analysis.dialog.title.selected_token_filter"); - new DialogOpener<>(editFiltersDialogFactory).open(title, 400, 300, - (factory) -> { - factory.setMode(mode); - factory.setSelectedFilters(selectedFilters); - factory.setCallback(callback); - }); + private void showEditFiltersDialog( + EditFiltersMode mode, List selectedFilters, Callable callback) { + String title = + (mode == EditFiltersMode.CHARFILTER) + ? MessageUtils.getLocalizedMessage("analysis.dialog.title.selected_char_filter") + : MessageUtils.getLocalizedMessage("analysis.dialog.title.selected_token_filter"); + new DialogOpener<>(editFiltersDialogFactory) + .open( + title, + 400, + 300, + (factory) -> { + factory.setMode(mode); + factory.setSelectedFilters(selectedFilters); + factory.setCallback(callback); + }); } @Override @@ -620,7 +684,8 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe Collection tokenFilters = analysisModel.getAvailableTokenFilters(); String[] tokenFilterNames = new String[tokenFilters.size() + 1]; tokenFilterNames[0] = ""; - System.arraycopy(tokenFilters.toArray(new String[0]), 0, tokenFilterNames, 1, tokenFilters.size()); + System.arraycopy( + tokenFilters.toArray(new String[0]), 0, tokenFilterNames, 1, tokenFilters.size()); tfFactoryCombo.setModel(new DefaultComboBoxModel<>(tokenFilterNames)); } @@ -628,16 +693,18 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe public void updateCharFilters(List deletedIndexes) { // update filters List filters = ListUtils.getAllItems(selectedCfList); - String[] updatedFilters = IntStream.range(0, filters.size()) - .filter(i -> !deletedIndexes.contains(i)) - .mapToObj(filters::get) - .toArray(String[]::new); + String[] updatedFilters = + IntStream.range(0, filters.size()) + .filter(i -> !deletedIndexes.contains(i)) + .mapToObj(filters::get) + .toArray(String[]::new); selectedCfList.setModel(new DefaultComboBoxModel<>(updatedFilters)); // update parameters map for each filter - List> updatedParamList = IntStream.range(0, cfParamsList.size()) - .filter(i -> !deletedIndexes.contains(i)) - .mapToObj(cfParamsList::get) - .collect(Collectors.toList()); + List> updatedParamList = + IntStream.range(0, cfParamsList.size()) + .filter(i -> !deletedIndexes.contains(i)) + .mapToObj(cfParamsList::get) + .collect(Collectors.toList()); cfParamsList.clear(); cfParamsList.addAll(updatedParamList); assert selectedCfList.getModel().getSize() == cfParamsList.size(); @@ -647,16 +714,18 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe public void updateTokenFilters(List deletedIndexes) { // update filters List filters = ListUtils.getAllItems(selectedTfList); - String[] updatedFilters = IntStream.range(0, filters.size()) - .filter(i -> !deletedIndexes.contains(i)) - .mapToObj(filters::get) - .toArray(String[]::new); + String[] updatedFilters = + IntStream.range(0, filters.size()) + .filter(i -> !deletedIndexes.contains(i)) + .mapToObj(filters::get) + .toArray(String[]::new); selectedTfList.setModel(new DefaultComboBoxModel<>(updatedFilters)); // update parameters map for each filter - List> updatedParamList = IntStream.range(0, tfParamsList.size()) - .filter(i -> !deletedIndexes.contains(i)) - .mapToObj(tfParamsList::get) - .collect(Collectors.toList()); + List> updatedParamList = + IntStream.range(0, tfParamsList.size()) + .filter(i -> !deletedIndexes.contains(i)) + .mapToObj(tfParamsList::get) + .collect(Collectors.toList()); tfParamsList.clear(); tfParamsList.addAll(updatedParamList); assert selectedTfList.getModel().getSize() == tfParamsList.size(); @@ -745,7 +814,5 @@ public final class CustomAnalyzerPanelProvider implements CustomAnalyzerPanelOpe void editTokenFilters(ActionEvent e) { CustomAnalyzerPanelProvider.this.editTokenFilters(); } - } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/PresetAnalyzerPanelOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/PresetAnalyzerPanelOperator.java index 856de6357e1..3e42a94a44e 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/PresetAnalyzerPanelOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/PresetAnalyzerPanelOperator.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.analysis; import java.util.Collection; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/PresetAnalyzerPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/PresetAnalyzerPanelProvider.java index f8210821a3a..95824e4cdca 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/PresetAnalyzerPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/PresetAnalyzerPanelProvider.java @@ -17,18 +17,17 @@ package org.apache.lucene.luke.app.desktop.components.fragments.analysis; +import java.awt.BorderLayout; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.event.ActionEvent; +import java.util.Collection; import javax.swing.BorderFactory; import javax.swing.ComboBoxModel; import javax.swing.DefaultComboBoxModel; import javax.swing.JComboBox; import javax.swing.JLabel; import javax.swing.JPanel; -import java.awt.BorderLayout; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.event.ActionEvent; -import java.util.Collection; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.luke.app.desktop.components.AnalysisTabOperator; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; @@ -86,11 +85,10 @@ public final class PresetAnalyzerPanelProvider implements PresetAnalyzerPanelOpe private class ListenerFunctions { void setAnalyzer(ActionEvent e) { - operatorRegistry.get(AnalysisTabOperator.class).ifPresent(operator -> - operator.setAnalyzerByType((String) analyzersCB.getSelectedItem()) - ); + operatorRegistry + .get(AnalysisTabOperator.class) + .ifPresent( + operator -> operator.setAnalyzerByType((String) analyzersCB.getSelectedItem())); } - } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelOperator.java index 5641479cbc3..3e9702c7720 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelOperator.java @@ -17,12 +17,12 @@ package org.apache.lucene.luke.app.desktop.components.fragments.analysis; - import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.models.analysis.Analysis; /** Operator of the simple analyze result panel */ -public interface SimpleAnalyzeResultPanelOperator extends ComponentOperatorRegistry.ComponentOperator { +public interface SimpleAnalyzeResultPanelOperator + extends ComponentOperatorRegistry.ComponentOperator { void setAnalysisModel(Analysis analysisModel); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java index 5e0c077dd69..63e3daa1bc1 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java @@ -17,19 +17,17 @@ package org.apache.lucene.luke.app.desktop.components.fragments.analysis; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTable; -import javax.swing.ListSelectionModel; - import java.awt.BorderLayout; import java.awt.FlowLayout; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.util.List; import java.util.stream.Collectors; - +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JTable; +import javax.swing.ListSelectionModel; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.app.desktop.components.TableColumnInfo; import org.apache.lucene.luke.app.desktop.components.TableModelBase; @@ -69,7 +67,10 @@ public class SimpleAnalyzeResultPanelProvider implements SimpleAnalyzeResultPane hint.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.hint.show_attributes"))); panel.add(hint, BorderLayout.PAGE_START); - TableUtils.setupTable(tokensTable, ListSelectionModel.SINGLE_SELECTION, new TokensTableModel(), + TableUtils.setupTable( + tokensTable, + ListSelectionModel.SINGLE_SELECTION, + new TokensTableModel(), new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { @@ -93,15 +94,22 @@ public class SimpleAnalyzeResultPanelProvider implements SimpleAnalyzeResultPane tokens = analysisModel.analyze(text); tokensTable.setModel(new TokensTableModel(tokens)); tokensTable.setShowGrid(true); - tokensTable.getColumnModel().getColumn(TokensTableModel.Column.TERM.getIndex()) + tokensTable + .getColumnModel() + .getColumn(TokensTableModel.Column.TERM.getIndex()) .setPreferredWidth(TokensTableModel.Column.TERM.getColumnWidth()); - tokensTable.getColumnModel().getColumn(TokensTableModel.Column.ATTR.getIndex()) + tokensTable + .getColumnModel() + .getColumn(TokensTableModel.Column.ATTR.getIndex()) .setPreferredWidth(TokensTableModel.Column.ATTR.getColumnWidth()); } @Override public void clearTable() { - TableUtils.setupTable(tokensTable, ListSelectionModel.SINGLE_SELECTION, new TokensTableModel(), + TableUtils.setupTable( + tokensTable, + ListSelectionModel.SINGLE_SELECTION, + new TokensTableModel(), null, TokensTableModel.Column.TERM.getColumnWidth(), TokensTableModel.Column.ATTR.getColumnWidth()); @@ -110,11 +118,15 @@ public class SimpleAnalyzeResultPanelProvider implements SimpleAnalyzeResultPane private void showAttributeValues(int selectedIndex) { String term = tokens.get(selectedIndex).getTerm(); List attributes = tokens.get(selectedIndex).getAttributes(); - new DialogOpener<>(tokenAttrDialogFactory).open("Token Attributes", 650, 400, - factory -> { - factory.setTerm(term); - factory.setAttributes(attributes); - }); + new DialogOpener<>(tokenAttrDialogFactory) + .open( + "Token Attributes", + 650, + 400, + factory -> { + factory.setTerm(term); + factory.setAttributes(attributes); + }); } private class ListenerFunctions { @@ -180,10 +192,13 @@ public class SimpleAnalyzeResultPanelProvider implements SimpleAnalyzeResultPane for (int i = 0; i < tokens.size(); i++) { Analysis.Token token = tokens.get(i); data[i][Column.TERM.getIndex()] = token.getTerm(); - List attValues = token.getAttributes().stream() - .flatMap(att -> att.getAttValues().entrySet().stream() - .map(e -> e.getKey() + "=" + e.getValue())) - .collect(Collectors.toList()); + List attValues = + token.getAttributes().stream() + .flatMap( + att -> + att.getAttValues().entrySet().stream() + .map(e -> e.getKey() + "=" + e.getValue())) + .collect(Collectors.toList()); data[i][Column.ATTR.getIndex()] = String.join(",", attValues); } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/StepByStepAnalyzeResultPanelOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/StepByStepAnalyzeResultPanelOperator.java index 2311e59a0d9..20f189134ea 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/StepByStepAnalyzeResultPanelOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/StepByStepAnalyzeResultPanelOperator.java @@ -21,7 +21,8 @@ import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.models.analysis.Analysis; /** Operator of the Step by step analyze result panel */ -public interface StepByStepAnalyzeResultPanelOperator extends ComponentOperatorRegistry.ComponentOperator { +public interface StepByStepAnalyzeResultPanelOperator + extends ComponentOperatorRegistry.ComponentOperator { void setAnalysisModel(Analysis analysisModel); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/StepByStepAnalyzeResultPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/StepByStepAnalyzeResultPanelProvider.java index 2ef696b430f..ccae64d3b66 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/StepByStepAnalyzeResultPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/StepByStepAnalyzeResultPanelProvider.java @@ -17,14 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.analysis; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JSplitPane; -import javax.swing.JTable; -import javax.swing.ListSelectionModel; -import javax.swing.table.AbstractTableModel; - import java.awt.BorderLayout; import java.awt.Dimension; import java.awt.FlowLayout; @@ -33,7 +25,13 @@ import java.awt.event.MouseEvent; import java.util.List; import java.util.Map; import java.util.TreeMap; - +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JSplitPane; +import javax.swing.JTable; +import javax.swing.ListSelectionModel; +import javax.swing.table.AbstractTableModel; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.app.desktop.components.TableColumnInfo; import org.apache.lucene.luke.app.desktop.components.TableModelBase; @@ -76,17 +74,27 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe JPanel hint = new JPanel(new FlowLayout(FlowLayout.LEADING)); hint.setOpaque(false); - hint.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.hint.show_attributes_step_by_step"))); + hint.add( + new JLabel(MessageUtils.getLocalizedMessage("analysis.hint.show_attributes_step_by_step"))); panel.add(hint, BorderLayout.PAGE_START); - TableUtils.setupTable(charfilterTextsRowHeader, ListSelectionModel.SINGLE_SELECTION, new RowHeaderTableModel(), + TableUtils.setupTable( + charfilterTextsRowHeader, + ListSelectionModel.SINGLE_SELECTION, + new RowHeaderTableModel(), null); - TableUtils.setupTable(charfilterTextsTable, ListSelectionModel.SINGLE_SELECTION, new CharfilterTextTableModel(), + TableUtils.setupTable( + charfilterTextsTable, + ListSelectionModel.SINGLE_SELECTION, + new CharfilterTextTableModel(), null); - TableUtils.setupTable(namedTokensRowHeader, ListSelectionModel.SINGLE_SELECTION, new RowHeaderTableModel(), - null); - TableUtils.setupTable(namedTokensTable, ListSelectionModel.SINGLE_SELECTION, new NamedTokensTableModel(), + TableUtils.setupTable( + namedTokensRowHeader, ListSelectionModel.SINGLE_SELECTION, new RowHeaderTableModel(), null); + TableUtils.setupTable( + namedTokensTable, + ListSelectionModel.SINGLE_SELECTION, + new NamedTokensTableModel(), new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { @@ -94,7 +102,11 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe } }); namedTokensTable.setColumnSelectionAllowed(true); - JSplitPane inner = new JSplitPane(JSplitPane.VERTICAL_SPLIT, initResultScroll(charfilterTextsTable, charfilterTextsRowHeader), initResultScroll(namedTokensTable, namedTokensRowHeader)); + JSplitPane inner = + new JSplitPane( + JSplitPane.VERTICAL_SPLIT, + initResultScroll(charfilterTextsTable, charfilterTextsRowHeader), + initResultScroll(namedTokensTable, namedTokensRowHeader)); inner.setDividerLocation(60); panel.add(inner, BorderLayout.CENTER); @@ -110,7 +122,6 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe return scroll; } - @Override public void setAnalysisModel(Analysis analysisModel) { this.analysisModel = analysisModel; @@ -119,11 +130,13 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe @Override public void executeAnalysisStepByStep(String text) { result = analysisModel.analyzeStepByStep(text); - RowHeaderTableModel charfilterTextsHeaderModel = new RowHeaderTableModel(result.getCharfilteredTexts()); + RowHeaderTableModel charfilterTextsHeaderModel = + new RowHeaderTableModel(result.getCharfilteredTexts()); charfilterTextsRowHeader.setModel(charfilterTextsHeaderModel); charfilterTextsRowHeader.setShowGrid(true); - CharfilterTextTableModel charfilterTextTableModel = new CharfilterTextTableModel(result.getCharfilteredTexts()); + CharfilterTextTableModel charfilterTextTableModel = + new CharfilterTextTableModel(result.getCharfilteredTexts()); charfilterTextsTable.setModel(charfilterTextTableModel); charfilterTextsTable.setShowGrid(true); @@ -135,36 +148,48 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe namedTokensTable.setModel(tableModel); namedTokensTable.setShowGrid(true); for (int i = 0; i < tableModel.getColumnCount(); i++) { - namedTokensTable.getColumnModel().getColumn(i).setPreferredWidth(tableModel.getColumnWidth(i)); + namedTokensTable + .getColumnModel() + .getColumn(i) + .setPreferredWidth(tableModel.getColumnWidth(i)); } } @Override public void clearTable() { - TableUtils.setupTable(charfilterTextsRowHeader, ListSelectionModel.SINGLE_SELECTION, new RowHeaderTableModel(), + TableUtils.setupTable( + charfilterTextsRowHeader, + ListSelectionModel.SINGLE_SELECTION, + new RowHeaderTableModel(), null); - TableUtils.setupTable(charfilterTextsTable, ListSelectionModel.SINGLE_SELECTION, new CharfilterTextTableModel(), + TableUtils.setupTable( + charfilterTextsTable, + ListSelectionModel.SINGLE_SELECTION, + new CharfilterTextTableModel(), null); - TableUtils.setupTable(namedTokensRowHeader, ListSelectionModel.SINGLE_SELECTION, new RowHeaderTableModel(), - null); - TableUtils.setupTable(namedTokensTable, ListSelectionModel.SINGLE_SELECTION, new NamedTokensTableModel(), - null); + TableUtils.setupTable( + namedTokensRowHeader, ListSelectionModel.SINGLE_SELECTION, new RowHeaderTableModel(), null); + TableUtils.setupTable( + namedTokensTable, ListSelectionModel.SINGLE_SELECTION, new NamedTokensTableModel(), null); } private void showAttributeValues(int rowIndex, int columnIndex) { - Analysis.NamedTokens namedTokens = - this.result.getNamedTokens().get(rowIndex); + Analysis.NamedTokens namedTokens = this.result.getNamedTokens().get(rowIndex); List tokens = namedTokens.getTokens(); if (rowIndex <= tokens.size()) { String term = "\"" + tokens.get(columnIndex).getTerm() + "\" BY " + namedTokens.getName(); List attributes = tokens.get(columnIndex).getAttributes(); - new DialogOpener<>(tokenAttrDialogFactory).open("Token Attributes", 650, 400, - factory -> { - factory.setTerm(term); - factory.setAttributes(attributes); - }); + new DialogOpener<>(tokenAttrDialogFactory) + .open( + "Token Attributes", + 650, + 400, + factory -> { + factory.setTerm(term); + factory.setAttributes(attributes); + }); } } @@ -184,7 +209,7 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe } } - /** Table model for row header (display charfilter/tokenizer/filter name) */ + /** Table model for row header (display charfilter/tokenizer/filter name) */ private static class RowHeaderTableModel extends TableModelBase { enum Column implements TableColumnInfo { @@ -241,7 +266,8 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe } /** Table model for charfilter result */ - private static class CharfilterTextTableModel extends TableModelBase { + private static class CharfilterTextTableModel + extends TableModelBase { enum Column implements TableColumnInfo { TEXT("Text", 0, String.class, 1000); @@ -338,7 +364,6 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe private final Object[][] data; - NamedTokensTableModel() { this.data = new Object[0][0]; } @@ -381,7 +406,6 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe return columnMap.size(); } - @Override public String getColumnName(int colIndex) { if (columnMap.containsKey(colIndex)) { @@ -411,5 +435,4 @@ public class StepByStepAnalyzeResultPanelProvider implements StepByStepAnalyzeRe private static String shortenName(String name) { return name.substring(name.lastIndexOf('.') + 1); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/package-info.java index 20cbe7b84f5..35e492cb51d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/package-info.java @@ -16,4 +16,4 @@ */ /** UI parts embedded in the Analysis tab */ -package org.apache.lucene.luke.app.desktop.components.fragments.analysis; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components.fragments.analysis; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/package-info.java index 382d73aaf69..bd57f896b3d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/package-info.java @@ -16,4 +16,4 @@ */ /** UI parts embedded in tabs */ -package org.apache.lucene.luke.app.desktop.components.fragments; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components.fragments; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/AnalyzerPaneProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/AnalyzerPaneProvider.java index 9f74a4df323..75c7c2728f2 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/AnalyzerPaneProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/AnalyzerPaneProvider.java @@ -17,15 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; -import javax.swing.BorderFactory; -import javax.swing.BoxLayout; -import javax.swing.DefaultListModel; -import javax.swing.JLabel; -import javax.swing.JList; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JSeparator; -import javax.swing.JTextField; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dimension; @@ -35,7 +26,15 @@ import java.awt.GridBagLayout; import java.awt.Insets; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; - +import javax.swing.BorderFactory; +import javax.swing.BoxLayout; +import javax.swing.DefaultListModel; +import javax.swing.JLabel; +import javax.swing.JList; +import javax.swing.JPanel; +import javax.swing.JScrollPane; +import javax.swing.JSeparator; +import javax.swing.JTextField; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.custom.CustomAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; @@ -90,13 +89,15 @@ public final class AnalyzerPaneProvider implements AnalyzerTabOperator { panel.add(analyzerNameLbl); - JLabel changeLbl = new JLabel(MessageUtils.getLocalizedMessage("search_analyzer.hyperlink.change")); - changeLbl.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - tabSwitcher.switchTab(TabbedPaneProvider.Tab.ANALYZER); - } - }); + JLabel changeLbl = + new JLabel(MessageUtils.getLocalizedMessage("search_analyzer.hyperlink.change")); + changeLbl.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + tabSwitcher.switchTab(TabbedPaneProvider.Tab.ANALYZER); + } + }); panel.add(FontUtils.toLinkText(changeLbl)); return panel; @@ -122,7 +123,8 @@ public final class AnalyzerPaneProvider implements AnalyzerTabOperator { c.gridx = 0; c.gridy = 0; c.weightx = 0.1; - center.add(new JLabel(MessageUtils.getLocalizedMessage("search_analyzer.label.charfilters")), c); + center.add( + new JLabel(MessageUtils.getLocalizedMessage("search_analyzer.label.charfilters")), c); charFilterList.setVisibleRowCount(3); JScrollPane charFilterSP = new JScrollPane(charFilterList); @@ -147,7 +149,8 @@ public final class AnalyzerPaneProvider implements AnalyzerTabOperator { c.gridx = 0; c.gridy = 2; c.weightx = 0.1; - center.add(new JLabel(MessageUtils.getLocalizedMessage("search_analyzer.label.tokenfilters")), c); + center.add( + new JLabel(MessageUtils.getLocalizedMessage("search_analyzer.label.tokenfilters")), c); tokenFilterList.setVisibleRowCount(3); JScrollPane tokenFilterSP = new JScrollPane(tokenFilterList); @@ -195,6 +198,4 @@ public final class AnalyzerPaneProvider implements AnalyzerTabOperator { tokenFilterList.setBackground(Color.lightGray); } } - - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/AnalyzerTabOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/AnalyzerTabOperator.java index 55aec09566f..193a7a3df1c 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/AnalyzerTabOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/AnalyzerTabOperator.java @@ -24,4 +24,3 @@ import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; public interface AnalyzerTabOperator extends ComponentOperatorRegistry.ComponentOperator { void setAnalyzer(Analyzer analyzer); } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/FieldValuesPaneProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/FieldValuesPaneProvider.java index 1217bf90329..24371d13745 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/FieldValuesPaneProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/FieldValuesPaneProvider.java @@ -17,6 +17,12 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; +import java.awt.BorderLayout; +import java.awt.GridLayout; +import java.awt.event.ActionEvent; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; import javax.swing.BorderFactory; import javax.swing.BoxLayout; import javax.swing.JCheckBox; @@ -26,13 +32,6 @@ import javax.swing.JScrollPane; import javax.swing.JTable; import javax.swing.ListSelectionModel; import javax.swing.event.TableModelEvent; -import java.awt.BorderLayout; -import java.awt.GridLayout; -import java.awt.event.ActionEvent; -import java.util.Collection; -import java.util.HashSet; -import java.util.Set; - import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.app.desktop.components.TableColumnInfo; import org.apache.lucene.luke.app.desktop.components.TableModelBase; @@ -81,7 +80,11 @@ public final class FieldValuesPaneProvider implements FieldValuesTabOperator { header.add(loadAllCB); panel.add(header, BorderLayout.PAGE_START); - TableUtils.setupTable(fieldsTable, ListSelectionModel.SINGLE_SELECTION, new FieldsTableModel(), null, + TableUtils.setupTable( + fieldsTable, + ListSelectionModel.SINGLE_SELECTION, + new FieldsTableModel(), + null, FieldsTableModel.Column.LOAD.getColumnWidth()); fieldsTable.setShowGrid(true); fieldsTable.setPreferredScrollableViewportSize(fieldsTable.getPreferredSize()); @@ -93,8 +96,14 @@ public final class FieldValuesPaneProvider implements FieldValuesTabOperator { @Override public void setFields(Collection fields) { fieldsTable.setModel(new FieldsTableModel(fields)); - fieldsTable.getColumnModel().getColumn(FieldsTableModel.Column.LOAD.getIndex()).setMinWidth(FieldsTableModel.Column.LOAD.getColumnWidth()); - fieldsTable.getColumnModel().getColumn(FieldsTableModel.Column.LOAD.getIndex()).setMaxWidth(FieldsTableModel.Column.LOAD.getColumnWidth()); + fieldsTable + .getColumnModel() + .getColumn(FieldsTableModel.Column.LOAD.getIndex()) + .setMinWidth(FieldsTableModel.Column.LOAD.getColumnWidth()); + fieldsTable + .getColumnModel() + .getColumn(FieldsTableModel.Column.LOAD.getIndex()) + .setMaxWidth(FieldsTableModel.Column.LOAD.getColumnWidth()); fieldsTable.getModel().addTableModelListener(listners::tableDataChenged); } @@ -102,9 +111,11 @@ public final class FieldValuesPaneProvider implements FieldValuesTabOperator { public Set getFieldsToLoad() { Set fieldsToLoad = new HashSet<>(); for (int row = 0; row < fieldsTable.getRowCount(); row++) { - boolean loaded = (boolean) fieldsTable.getValueAt(row, FieldsTableModel.Column.LOAD.getIndex()); + boolean loaded = + (boolean) fieldsTable.getValueAt(row, FieldsTableModel.Column.LOAD.getIndex()); if (loaded) { - fieldsToLoad.add((String) fieldsTable.getValueAt(row, FieldsTableModel.Column.FIELD.getIndex())); + fieldsToLoad.add( + (String) fieldsTable.getValueAt(row, FieldsTableModel.Column.FIELD.getIndex())); } } return fieldsToLoad; @@ -203,4 +214,4 @@ public final class FieldValuesPaneProvider implements FieldValuesTabOperator { return Column.values(); } } -} \ No newline at end of file +} diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/FieldValuesTabOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/FieldValuesTabOperator.java index 0b317651c06..8060d8b2ac1 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/FieldValuesTabOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/FieldValuesTabOperator.java @@ -19,7 +19,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; import java.util.Collection; import java.util.Set; - import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; /** Operator of the FieldValues tab */ diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/MLTPaneProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/MLTPaneProvider.java index ad791a40347..f124d6d907c 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/MLTPaneProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/MLTPaneProvider.java @@ -17,6 +17,16 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; +import java.awt.BorderLayout; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.GridLayout; +import java.awt.event.ActionEvent; +import java.awt.event.MouseAdapter; +import java.awt.event.MouseEvent; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; import javax.swing.BorderFactory; import javax.swing.BoxLayout; import javax.swing.JCheckBox; @@ -28,17 +38,6 @@ import javax.swing.JSeparator; import javax.swing.JTable; import javax.swing.ListSelectionModel; import javax.swing.event.TableModelEvent; -import java.awt.BorderLayout; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.GridLayout; -import java.awt.event.ActionEvent; -import java.awt.event.MouseAdapter; -import java.awt.event.MouseEvent; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; @@ -141,12 +140,13 @@ public final class MLTPaneProvider implements MLTTabOperator { panel.add(analyzerLbl); JLabel changeLbl = new JLabel(MessageUtils.getLocalizedMessage("search_mlt.hyperlink.change")); - changeLbl.addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - tabSwitcher.switchTab(TabbedPaneProvider.Tab.ANALYZER); - } - }); + changeLbl.addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + tabSwitcher.switchTab(TabbedPaneProvider.Tab.ANALYZER); + } + }); panel.add(FontUtils.toLinkText(changeLbl)); return panel; @@ -169,7 +169,12 @@ public final class MLTPaneProvider implements MLTTabOperator { header.add(loadAllCB); panel.add(header, BorderLayout.PAGE_START); - TableUtils.setupTable(fieldsTable, ListSelectionModel.SINGLE_SELECTION, new MLTFieldsTableModel(), null, MLTFieldsTableModel.Column.SELECT.getColumnWidth()); + TableUtils.setupTable( + fieldsTable, + ListSelectionModel.SINGLE_SELECTION, + new MLTFieldsTableModel(), + null, + MLTFieldsTableModel.Column.SELECT.getColumnWidth()); fieldsTable.setPreferredScrollableViewportSize(fieldsTable.getPreferredSize()); panel.add(new JScrollPane(fieldsTable), BorderLayout.CENTER); @@ -184,8 +189,14 @@ public final class MLTPaneProvider implements MLTTabOperator { @Override public void setFields(Collection fields) { fieldsTable.setModel(new MLTFieldsTableModel(fields)); - fieldsTable.getColumnModel().getColumn(MLTFieldsTableModel.Column.SELECT.getIndex()).setMinWidth(MLTFieldsTableModel.Column.SELECT.getColumnWidth()); - fieldsTable.getColumnModel().getColumn(MLTFieldsTableModel.Column.SELECT.getIndex()).setMaxWidth(MLTFieldsTableModel.Column.SELECT.getColumnWidth()); + fieldsTable + .getColumnModel() + .getColumn(MLTFieldsTableModel.Column.SELECT.getIndex()) + .setMinWidth(MLTFieldsTableModel.Column.SELECT.getColumnWidth()); + fieldsTable + .getColumnModel() + .getColumn(MLTFieldsTableModel.Column.SELECT.getIndex()) + .setMaxWidth(MLTFieldsTableModel.Column.SELECT.getColumnWidth()); fieldsTable.getModel().addTableModelListener(listeners::tableDataChenged); } @@ -193,9 +204,11 @@ public final class MLTPaneProvider implements MLTTabOperator { public MLTConfig getConfig() { List fields = new ArrayList<>(); for (int row = 0; row < fieldsTable.getRowCount(); row++) { - boolean selected = (boolean) fieldsTable.getValueAt(row, MLTFieldsTableModel.Column.SELECT.getIndex()); + boolean selected = + (boolean) fieldsTable.getValueAt(row, MLTFieldsTableModel.Column.SELECT.getIndex()); if (selected) { - fields.add((String) fieldsTable.getValueAt(row, MLTFieldsTableModel.Column.FIELD.getIndex())); + fields.add( + (String) fieldsTable.getValueAt(row, MLTFieldsTableModel.Column.FIELD.getIndex())); } } @@ -300,4 +313,4 @@ public final class MLTPaneProvider implements MLTTabOperator { return Column.values(); } } -} \ No newline at end of file +} diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/MLTTabOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/MLTTabOperator.java index 1180bc772d0..7ad8ccda388 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/MLTTabOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/MLTTabOperator.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; import java.util.Collection; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.models.search.MLTConfig; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/QueryParserPaneProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/QueryParserPaneProvider.java index f565339853d..4dc585ea461 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/QueryParserPaneProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/QueryParserPaneProvider.java @@ -17,6 +17,16 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; +import java.awt.Color; +import java.awt.FlowLayout; +import java.awt.GridLayout; +import java.awt.event.ActionEvent; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.TimeZone; import javax.swing.BorderFactory; import javax.swing.BoxLayout; import javax.swing.ButtonGroup; @@ -32,17 +42,6 @@ import javax.swing.JSeparator; import javax.swing.JTable; import javax.swing.JTextField; import javax.swing.ListSelectionModel; -import java.awt.Color; -import java.awt.FlowLayout; -import java.awt.GridLayout; -import java.awt.event.ActionEvent; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.TimeZone; - import org.apache.lucene.document.DateTools; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.app.desktop.components.TableColumnInfo; @@ -60,7 +59,11 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { private final JComboBox dfCB = new JComboBox<>(); - private final JComboBox defOpCombo = new JComboBox<>(new String[]{QueryParserConfig.Operator.OR.name(), QueryParserConfig.Operator.AND.name()}); + private final JComboBox defOpCombo = + new JComboBox<>( + new String[] { + QueryParserConfig.Operator.OR.name(), QueryParserConfig.Operator.AND.name() + }); private final JCheckBox posIncCB = new JCheckBox(); @@ -201,8 +204,10 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { JPanel genMTPQ = new JPanel(new FlowLayout(FlowLayout.LEADING)); genMTPQ.setOpaque(false); genMTPQ.setBorder(BorderFactory.createEmptyBorder(0, 20, 0, 0)); - genMultiTermSynonymsPhraseQueryCB.setText(MessageUtils.getLocalizedMessage("search_parser.checkbox.gen_mts")); - genMultiTermSynonymsPhraseQueryCB.setEnabled(config.isAutoGenerateMultiTermSynonymsPhraseQuery()); + genMultiTermSynonymsPhraseQueryCB.setText( + MessageUtils.getLocalizedMessage("search_parser.checkbox.gen_mts")); + genMultiTermSynonymsPhraseQueryCB.setEnabled( + config.isAutoGenerateMultiTermSynonymsPhraseQuery()); genMultiTermSynonymsPhraseQueryCB.setOpaque(false); genMTPQ.add(genMultiTermSynonymsPhraseQueryCB); panel.add(genMTPQ); @@ -210,7 +215,8 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { JPanel slop = new JPanel(new FlowLayout(FlowLayout.LEADING)); slop.setOpaque(false); slop.setBorder(BorderFactory.createEmptyBorder(0, 20, 0, 0)); - JLabel slopLabel = new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.phrase_slop")); + JLabel slopLabel = + new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.phrase_slop")); slop.add(slopLabel); slopFTF.setColumns(5); slopFTF.setValue(config.getPhraseSlop()); @@ -234,7 +240,8 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { JPanel minSim = new JPanel(new FlowLayout(FlowLayout.LEADING)); minSim.setOpaque(false); minSim.setBorder(BorderFactory.createEmptyBorder(0, 20, 0, 0)); - JLabel minSimLabel = new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.fuzzy_minsim")); + JLabel minSimLabel = + new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.fuzzy_minsim")); minSim.add(minSimLabel); minSimFTF.setColumns(5); minSimFTF.setValue(config.getFuzzyMinSim()); @@ -245,7 +252,8 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { JPanel prefLen = new JPanel(new FlowLayout(FlowLayout.LEADING)); prefLen.setOpaque(false); prefLen.setBorder(BorderFactory.createEmptyBorder(0, 20, 0, 0)); - JLabel prefLenLabel = new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.fuzzy_preflen")); + JLabel prefLenLabel = + new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.fuzzy_preflen")); prefLen.add(prefLenLabel); prefLenFTF.setColumns(5); prefLenFTF.setValue(config.getFuzzyPrefixLength()); @@ -271,7 +279,9 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { resolution.setBorder(BorderFactory.createEmptyBorder(0, 20, 0, 0)); JLabel resLabel = new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.date_res")); resolution.add(resLabel); - Arrays.stream(DateTools.Resolution.values()).map(DateTools.Resolution::name).forEach(dateResCB::addItem); + Arrays.stream(DateTools.Resolution.values()) + .map(DateTools.Resolution::name) + .forEach(dateResCB::addItem); dateResCB.setSelectedItem(config.getDateResolution().name()); dateResCB.setOpaque(false); resolution.add(dateResCB); @@ -303,15 +313,22 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { JPanel header = new JPanel(new FlowLayout(FlowLayout.LEADING)); header.setOpaque(false); - header.add(new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.pointrange_query"))); + header.add( + new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.pointrange_query"))); panel.add(header); JPanel headerNote = new JPanel(new FlowLayout(FlowLayout.LEADING)); headerNote.setOpaque(false); - headerNote.add(new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.pointrange_hint"))); + headerNote.add( + new JLabel(MessageUtils.getLocalizedMessage("search_parser.label.pointrange_hint"))); panel.add(headerNote); - TableUtils.setupTable(pointRangeQueryTable, ListSelectionModel.SINGLE_SELECTION, new PointTypesTableModel(), null, PointTypesTableModel.Column.FIELD.getColumnWidth()); + TableUtils.setupTable( + pointRangeQueryTable, + ListSelectionModel.SINGLE_SELECTION, + new PointTypesTableModel(), + null, + PointTypesTableModel.Column.FIELD.getColumnWidth()); pointRangeQueryTable.setShowGrid(true); JScrollPane scrollPane = new JScrollPane(pointRangeQueryTable); panel.add(scrollPane); @@ -331,23 +348,33 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { public void setRangeSearchableFields(Collection rangeSearchableFields) { pointRangeQueryTable.setModel(new PointTypesTableModel(rangeSearchableFields)); pointRangeQueryTable.setShowGrid(true); - String[] numTypes = Arrays.stream(PointTypesTableModel.NumType.values()) - .map(PointTypesTableModel.NumType::name) - .toArray(String[]::new); + String[] numTypes = + Arrays.stream(PointTypesTableModel.NumType.values()) + .map(PointTypesTableModel.NumType::name) + .toArray(String[]::new); JComboBox numTypesCombo = new JComboBox<>(numTypes); numTypesCombo.setRenderer((list, value, index, isSelected, cellHasFocus) -> new JLabel(value)); - pointRangeQueryTable.getColumnModel().getColumn(PointTypesTableModel.Column.TYPE.getIndex()).setCellEditor(new DefaultCellEditor(numTypesCombo)); - pointRangeQueryTable.getColumnModel().getColumn(PointTypesTableModel.Column.TYPE.getIndex()).setCellRenderer( - (table, value, isSelected, hasFocus, row, column) -> new JLabel((String) value) - ); - pointRangeQueryTable.getColumnModel().getColumn(PointTypesTableModel.Column.FIELD.getIndex()).setPreferredWidth(PointTypesTableModel.Column.FIELD.getColumnWidth()); - pointRangeQueryTable.setPreferredScrollableViewportSize(pointRangeQueryTable.getPreferredSize()); + pointRangeQueryTable + .getColumnModel() + .getColumn(PointTypesTableModel.Column.TYPE.getIndex()) + .setCellEditor(new DefaultCellEditor(numTypesCombo)); + pointRangeQueryTable + .getColumnModel() + .getColumn(PointTypesTableModel.Column.TYPE.getIndex()) + .setCellRenderer( + (table, value, isSelected, hasFocus, row, column) -> new JLabel((String) value)); + pointRangeQueryTable + .getColumnModel() + .getColumn(PointTypesTableModel.Column.FIELD.getIndex()) + .setPreferredWidth(PointTypesTableModel.Column.FIELD.getColumnWidth()); + pointRangeQueryTable.setPreferredScrollableViewportSize( + pointRangeQueryTable.getPreferredSize()); // set default type to Integer for (int i = 0; i < rangeSearchableFields.size(); i++) { - pointRangeQueryTable.setValueAt(PointTypesTableModel.NumType.INT.name(), i, PointTypesTableModel.Column.TYPE.getIndex()); + pointRangeQueryTable.setValueAt( + PointTypesTableModel.NumType.INT.name(), i, PointTypesTableModel.Column.TYPE.getIndex()); } - } @Override @@ -358,8 +385,12 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { Map> typeMap = new HashMap<>(); for (int row = 0; row < pointRangeQueryTable.getModel().getRowCount(); row++) { - String field = (String) pointRangeQueryTable.getValueAt(row, PointTypesTableModel.Column.FIELD.getIndex()); - String type = (String) pointRangeQueryTable.getValueAt(row, PointTypesTableModel.Column.TYPE.getIndex()); + String field = + (String) + pointRangeQueryTable.getValueAt(row, PointTypesTableModel.Column.FIELD.getIndex()); + String type = + (String) + pointRangeQueryTable.getValueAt(row, PointTypesTableModel.Column.TYPE.getIndex()); switch (PointTypesTableModel.NumType.valueOf(type)) { case INT: typeMap.put(field, Integer.class); @@ -432,13 +463,11 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { genPhraseQueryCB.setSelected(false); } } - } static final class PointTypesTableModel extends TableModelBase { enum Column implements TableColumnInfo { - FIELD("Field", 0, String.class, 300), TYPE("Numeric Type", 1, NumType.class, 150); @@ -476,9 +505,10 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { } enum NumType { - - INT, LONG, FLOAT, DOUBLE - + INT, + LONG, + FLOAT, + DOUBLE } PointTypesTableModel() { @@ -509,5 +539,4 @@ public final class QueryParserPaneProvider implements QueryParserTabOperator { return Column.values(); } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/QueryParserTabOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/QueryParserTabOperator.java index 1a398721703..14cc79c1f18 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/QueryParserTabOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/QueryParserTabOperator.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; import java.util.Collection; - import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.models.search.QueryParserConfig; @@ -32,4 +31,3 @@ public interface QueryParserTabOperator extends ComponentOperatorRegistry.Compon String getDefaultField(); } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SimilarityPaneProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SimilarityPaneProvider.java index 8c7cd114c69..31c910abb66 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SimilarityPaneProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SimilarityPaneProvider.java @@ -17,6 +17,11 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; +import java.awt.Color; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.GridLayout; +import java.awt.event.ActionEvent; import javax.swing.BorderFactory; import javax.swing.BoxLayout; import javax.swing.JCheckBox; @@ -24,12 +29,6 @@ import javax.swing.JFormattedTextField; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JScrollPane; -import java.awt.Color; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.GridLayout; -import java.awt.event.ActionEvent; - import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.app.desktop.util.MessageUtils; import org.apache.lucene.luke.app.desktop.util.StyleConstants; @@ -78,12 +77,14 @@ public final class SimilarityPaneProvider implements SimilarityTabOperator { tfidfCB.setOpaque(false); panel.add(tfidfCB); - discardOverlapsCB.setText(MessageUtils.getLocalizedMessage("search_similarity.checkbox.discount_overlaps")); + discardOverlapsCB.setText( + MessageUtils.getLocalizedMessage("search_similarity.checkbox.discount_overlaps")); discardOverlapsCB.setSelected(config.isUseClassicSimilarity()); discardOverlapsCB.setOpaque(false); panel.add(discardOverlapsCB); - JLabel bm25Label = new JLabel(MessageUtils.getLocalizedMessage("search_similarity.label.bm25_params")); + JLabel bm25Label = + new JLabel(MessageUtils.getLocalizedMessage("search_similarity.label.bm25_params")); panel.add(bm25Label); JPanel bm25Params = new JPanel(new FlowLayout(FlowLayout.LEADING)); @@ -141,5 +142,4 @@ public final class SimilarityPaneProvider implements SimilarityTabOperator { } } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SortPaneProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SortPaneProvider.java index d86215971a7..3497c8cf429 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SortPaneProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SortPaneProvider.java @@ -17,12 +17,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; -import javax.swing.BorderFactory; -import javax.swing.JButton; -import javax.swing.JComboBox; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; import java.awt.Dimension; import java.awt.FlowLayout; import java.awt.GridLayout; @@ -31,7 +25,12 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; - +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JComboBox; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JScrollPane; import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.app.desktop.components.SearchTabOperator; import org.apache.lucene.luke.app.desktop.util.MessageUtils; @@ -169,10 +168,20 @@ public final class SortPaneProvider implements SortTabOperator { List li = new ArrayList<>(); if (!StringUtils.isNullOrEmpty((String) fieldCombo1.getSelectedItem())) { - searchModel.getSortType((String) fieldCombo1.getSelectedItem(), (String) typeCombo1.getSelectedItem(), isReverse(orderCombo1)).ifPresent(li::add); + searchModel + .getSortType( + (String) fieldCombo1.getSelectedItem(), + (String) typeCombo1.getSelectedItem(), + isReverse(orderCombo1)) + .ifPresent(li::add); } if (!StringUtils.isNullOrEmpty((String) fieldCombo2.getSelectedItem())) { - searchModel.getSortType((String) fieldCombo2.getSelectedItem(), (String) typeCombo2.getSelectedItem(), isReverse(orderCombo2)).ifPresent(li::add); + searchModel + .getSortType( + (String) fieldCombo2.getSelectedItem(), + (String) typeCombo2.getSelectedItem(), + isReverse(orderCombo2)) + .ifPresent(li::add); } return new Sort(li.toArray(new SortField[0])); } @@ -192,22 +201,26 @@ public final class SortPaneProvider implements SortTabOperator { resetExactHitsCnt(); } - private void resetField(JComboBox fieldCombo, JComboBox typeCombo, JComboBox orderCombo) { + private void resetField( + JComboBox fieldCombo, JComboBox typeCombo, JComboBox orderCombo) { typeCombo.removeAllItems(); if (StringUtils.isNullOrEmpty((String) fieldCombo.getSelectedItem())) { typeCombo.addItem(""); typeCombo.setEnabled(false); orderCombo.setEnabled(false); } else { - List sortFields = searchModel.guessSortTypes((String) fieldCombo.getSelectedItem()); + List sortFields = + searchModel.guessSortTypes((String) fieldCombo.getSelectedItem()); sortFields.stream() - .map(sf -> { - if (sf instanceof SortedNumericSortField) { - return ((SortedNumericSortField) sf).getNumericType().name(); - } else { - return sf.getType().name(); - } - }).forEach(typeCombo::addItem); + .map( + sf -> { + if (sf instanceof SortedNumericSortField) { + return ((SortedNumericSortField) sf).getNumericType().name(); + } else { + return sf.getType().name(); + } + }) + .forEach(typeCombo::addItem); typeCombo.setEnabled(true); orderCombo.setEnabled(true); } @@ -232,21 +245,25 @@ public final class SortPaneProvider implements SortTabOperator { } private void resetExactHitsCnt() { - operatorRegistry.get(SearchTabOperator.class).ifPresent(operator -> { - if (StringUtils.isNullOrEmpty((String) fieldCombo1.getSelectedItem()) && - StringUtils.isNullOrEmpty((String) fieldCombo2.getSelectedItem())) { - operator.enableExactHitsCB(true); - operator.setExactHits(false); - } else { - operator.enableExactHitsCB(false); - operator.setExactHits(true); - } - }); + operatorRegistry + .get(SearchTabOperator.class) + .ifPresent( + operator -> { + if (StringUtils.isNullOrEmpty((String) fieldCombo1.getSelectedItem()) + && StringUtils.isNullOrEmpty((String) fieldCombo2.getSelectedItem())) { + operator.enableExactHitsCB(true); + operator.setExactHits(false); + } else { + operator.enableExactHitsCB(false); + operator.setExactHits(true); + } + }); } } enum Order { - ASC, DESC; + ASC, + DESC; static String[] names() { return Arrays.stream(values()).map(Order::name).toArray(String[]::new); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SortTabOperator.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SortTabOperator.java index bdaa027cc60..b74b9d35b7a 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SortTabOperator.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/SortTabOperator.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.app.desktop.components.fragments.search; import java.util.Collection; - import org.apache.lucene.luke.app.desktop.components.ComponentOperatorRegistry; import org.apache.lucene.luke.models.search.Search; import org.apache.lucene.search.Sort; @@ -31,4 +30,3 @@ public interface SortTabOperator extends ComponentOperatorRegistry.ComponentOper Sort getSort(); } - diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/package-info.java index dfa87f59cb4..8c194853f1a 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/search/package-info.java @@ -16,4 +16,4 @@ */ /** UI parts embedded in tabs */ -package org.apache.lucene.luke.app.desktop.components.fragments.search; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components.fragments.search; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/package-info.java index fefd0c88925..a42cd7bc191 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/package-info.java @@ -16,4 +16,4 @@ */ /** UI components of the desktop Luke */ -package org.apache.lucene.luke.app.desktop.components; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.components; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/dto/documents/NewField.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/dto/documents/NewField.java index 44162a07d80..f4b28d60f21 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/dto/documents/NewField.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/dto/documents/NewField.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.app.desktop.dto.documents; import java.util.Objects; - import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -62,8 +61,7 @@ public final class NewField { return f; } - private NewField() { - } + private NewField() {} public boolean isDeleted() { return deleted; @@ -144,5 +142,4 @@ public final class NewField { public void setValue(String value) { this.value = Objects.requireNonNull(value); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/dto/documents/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/dto/documents/package-info.java index 0f08238ddd1..addb3493781 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/dto/documents/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/dto/documents/package-info.java @@ -16,4 +16,4 @@ */ /** DTO classes */ -package org.apache.lucene.luke.app.desktop.dto.documents; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.dto.documents; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/package-info.java index c4c36bd22a9..a46af3f7573 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/package-info.java @@ -16,4 +16,4 @@ */ /** Views (UIs) for Luke */ -package org.apache.lucene.luke.app.desktop; \ No newline at end of file +package org.apache.lucene.luke.app.desktop; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/DialogOpener.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/DialogOpener.java index 49e1b4f6c59..e61c82ab285 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/DialogOpener.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/DialogOpener.java @@ -17,10 +17,9 @@ package org.apache.lucene.luke.app.desktop.util; -import javax.swing.JDialog; import java.awt.Window; import java.util.function.Consumer; - +import javax.swing.JDialog; import org.apache.lucene.luke.app.desktop.LukeMain; /** An utility class for opening a dialog */ @@ -32,13 +31,18 @@ public class DialogOpener { this.factory = factory; } - public void open(String title, int width, int height, Consumer initializer, - String... styleSheets) { + public void open( + String title, int width, int height, Consumer initializer, String... styleSheets) { open(LukeMain.getOwnerFrame(), title, width, height, initializer, styleSheets); } - public void open(Window owner, String title, int width, int height, Consumer initializer, - String... styleSheets) { + public void open( + Window owner, + String title, + int width, + int height, + Consumer initializer, + String... styleSheets) { initializer.accept(factory); JDialog dialog = factory.create(owner, title, width, height); dialog.setVisible(true); @@ -48,5 +52,4 @@ public class DialogOpener { public interface DialogFactory { JDialog create(Window owner, String title, int width, int height); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ExceptionHandler.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ExceptionHandler.java index 60a4bd1372a..481047c631d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ExceptionHandler.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ExceptionHandler.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.app.desktop.util; import java.lang.invoke.MethodHandles; - import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.app.desktop.MessageBroker; import org.apache.lucene.luke.models.LukeException; @@ -40,5 +39,4 @@ public final class ExceptionHandler { messageBroker.showUnknownErrorMessage(); } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/FontUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/FontUtils.java index c4f47588815..02c201ea448 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/FontUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/FontUtils.java @@ -17,18 +17,19 @@ package org.apache.lucene.luke.app.desktop.util; -import javax.swing.JLabel; import java.awt.Font; import java.awt.FontFormatException; import java.awt.font.TextAttribute; import java.io.IOException; import java.io.InputStream; import java.util.Map; +import javax.swing.JLabel; /** Font utilities */ public class FontUtils { - public static final String TTF_RESOURCE_NAME = "org/apache/lucene/luke/app/desktop/font/ElegantIcons.ttf"; + public static final String TTF_RESOURCE_NAME = + "org/apache/lucene/luke/app/desktop/font/ElegantIcons.ttf"; @SuppressWarnings("unchecked") public static JLabel toLinkText(JLabel label) { @@ -46,8 +47,8 @@ public class FontUtils { } /** - * Generates HTML text with embedded Elegant Icon Font. - * See: https://www.elegantthemes.com/blog/resources/elegant-icon-font + * Generates HTML text with embedded Elegant Icon Font. See: + * https://www.elegantthemes.com/blog/resources/elegant-icon-font * * @param iconRef HTML numeric character reference of the icon */ @@ -65,7 +66,5 @@ public class FontUtils { return "" + iconRef + " " + text + ""; } - private FontUtils() { - } - + private FontUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/HelpHeaderRenderer.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/HelpHeaderRenderer.java index 41c7f079e50..a1bb464accd 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/HelpHeaderRenderer.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/HelpHeaderRenderer.java @@ -17,6 +17,11 @@ package org.apache.lucene.luke.app.desktop.util; +import java.awt.Component; +import java.awt.FlowLayout; +import java.awt.event.MouseAdapter; +import java.awt.event.MouseEvent; +import java.util.Objects; import javax.swing.JComponent; import javax.swing.JDialog; import javax.swing.JLabel; @@ -25,17 +30,9 @@ import javax.swing.JTable; import javax.swing.UIManager; import javax.swing.table.JTableHeader; import javax.swing.table.TableCellRenderer; -import java.awt.Component; -import java.awt.FlowLayout; -import java.awt.event.MouseAdapter; -import java.awt.event.MouseEvent; -import java.util.Objects; - import org.apache.lucene.luke.app.desktop.components.dialog.HelpDialogFactory; -/** - * Cell render class for table header with help dialog. - */ +/** Cell render class for table header with help dialog. */ public final class HelpHeaderRenderer implements TableCellRenderer { private JTable table; @@ -52,12 +49,17 @@ public final class HelpHeaderRenderer implements TableCellRenderer { private final JDialog parent; - public HelpHeaderRenderer(String title, String desc, JComponent helpContent, HelpDialogFactory helpDialogFactory) { + public HelpHeaderRenderer( + String title, String desc, JComponent helpContent, HelpDialogFactory helpDialogFactory) { this(title, desc, helpContent, helpDialogFactory, null); } - public HelpHeaderRenderer(String title, String desc, JComponent helpContent, HelpDialogFactory helpDialogFactory, - JDialog parent) { + public HelpHeaderRenderer( + String title, + String desc, + JComponent helpContent, + HelpDialogFactory helpDialogFactory, + JDialog parent) { this.title = title; this.desc = desc; this.helpContent = helpContent; @@ -67,7 +69,8 @@ public final class HelpHeaderRenderer implements TableCellRenderer { @Override @SuppressWarnings("unchecked") - public Component getTableCellRendererComponent(JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { + public Component getTableCellRendererComponent( + JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { if (table != null && this.table != table) { this.table = table; final JTableHeader header = table.getTableHeader(); @@ -78,13 +81,17 @@ public final class HelpHeaderRenderer implements TableCellRenderer { // add label with mouse click listener // when the label is clicked, help dialog will be displayed. - JLabel helpLabel = new JLabel(FontUtils.elegantIconHtml("t", MessageUtils.getLocalizedMessage("label.help"))); + JLabel helpLabel = + new JLabel( + FontUtils.elegantIconHtml( + "t", MessageUtils.getLocalizedMessage("label.help"))); helpLabel.setHorizontalAlignment(JLabel.LEFT); helpLabel.setIconTextGap(5); panel.add(FontUtils.toLinkText(helpLabel)); // add mouse listener to JTableHeader object. - // see: https://stackoverflow.com/questions/7137786/how-can-i-put-a-control-in-the-jtableheader-of-a-jtable + // see: + // https://stackoverflow.com/questions/7137786/how-can-i-put-a-control-in-the-jtableheader-of-a-jtable header.addMouseListener(new HelpClickListener(column)); } } @@ -110,20 +117,28 @@ public final class HelpHeaderRenderer implements TableCellRenderer { if (column == this.column && e.getClickCount() == 1 && column != -1) { // only when the targeted column header is clicked, pop up the dialog if (Objects.nonNull(parent)) { - new DialogOpener<>(helpDialogFactory).open(parent, title, 600, 350, - (factory) -> { - factory.setDesc(desc); - factory.setContent(helpContent); - }); + new DialogOpener<>(helpDialogFactory) + .open( + parent, + title, + 600, + 350, + (factory) -> { + factory.setDesc(desc); + factory.setContent(helpContent); + }); } else { - new DialogOpener<>(helpDialogFactory).open(title, 600, 350, - (factory) -> { - factory.setDesc(desc); - factory.setContent(helpContent); - }); + new DialogOpener<>(helpDialogFactory) + .open( + title, + 600, + 350, + (factory) -> { + factory.setDesc(desc); + factory.setContent(helpContent); + }); } } } - } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ImageUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ImageUtils.java index d7989f9353e..79bfaa6c7cd 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ImageUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ImageUtils.java @@ -17,8 +17,8 @@ package org.apache.lucene.luke.app.desktop.util; -import javax.swing.ImageIcon; import java.awt.Image; +import javax.swing.ImageIcon; /** Image utilities */ public class ImageUtils { @@ -33,13 +33,14 @@ public class ImageUtils { java.net.URL imgURL = ImageUtils.class.getClassLoader().getResource(IMAGE_BASE_DIR + name); if (imgURL != null) { ImageIcon originalIcon = new ImageIcon(imgURL, description); - ImageIcon icon = new ImageIcon(originalIcon.getImage().getScaledInstance(width, height, Image.SCALE_DEFAULT)); + ImageIcon icon = + new ImageIcon( + originalIcon.getImage().getScaledInstance(width, height, Image.SCALE_DEFAULT)); return icon; } else { return null; } } - private ImageUtils() { - } + private ImageUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java index cc756eaffa3..fc69261037b 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java @@ -17,12 +17,12 @@ package org.apache.lucene.luke.app.desktop.util; -import javax.swing.JList; -import javax.swing.ListModel; import java.util.List; import java.util.function.IntFunction; import java.util.stream.Collectors; import java.util.stream.IntStream; +import javax.swing.JList; +import javax.swing.ListModel; /** List model utilities */ public class ListUtils { @@ -37,7 +37,5 @@ public class ListUtils { return IntStream.range(0, model.getSize()).mapToObj(mapFunc).collect(Collectors.toList()); } - private ListUtils() { - } - + private ListUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/MessageUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/MessageUtils.java index cc6989159c9..f8fd192eb77 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/MessageUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/MessageUtils.java @@ -26,12 +26,11 @@ import java.util.Locale; import java.util.PropertyResourceBundle; import java.util.ResourceBundle; -/** - * Utilities for accessing message resources. - */ +/** Utilities for accessing message resources. */ public class MessageUtils { - public static final String MESSAGE_BUNDLE_BASENAME = "org/apache/lucene/luke/app/desktop/messages/messages"; + public static final String MESSAGE_BUNDLE_BASENAME = + "org/apache/lucene/luke/app/desktop/messages/messages"; public static String getLocalizedMessage(String key) { return bundle.getString(key); @@ -43,19 +42,23 @@ public class MessageUtils { } // https://stackoverflow.com/questions/4659929/how-to-use-utf-8-in-resource-properties-with-resourcebundle - private static ResourceBundle.Control UTF8_RESOURCEBUNDLE_CONTROL = new ResourceBundle.Control() { - @Override - public ResourceBundle newBundle(String baseName, Locale locale, String format, ClassLoader loader, boolean reload) throws IllegalAccessException, InstantiationException, IOException { - String bundleName = toBundleName(baseName, locale); - String resourceName = toResourceName(bundleName, "properties"); - try (InputStream is = loader.getResourceAsStream(resourceName)) { - return new PropertyResourceBundle(new InputStreamReader(is, StandardCharsets.UTF_8)); - } - } - }; + private static ResourceBundle.Control UTF8_RESOURCEBUNDLE_CONTROL = + new ResourceBundle.Control() { + @Override + public ResourceBundle newBundle( + String baseName, Locale locale, String format, ClassLoader loader, boolean reload) + throws IllegalAccessException, InstantiationException, IOException { + String bundleName = toBundleName(baseName, locale); + String resourceName = toResourceName(bundleName, "properties"); + try (InputStream is = loader.getResourceAsStream(resourceName)) { + return new PropertyResourceBundle(new InputStreamReader(is, StandardCharsets.UTF_8)); + } + } + }; - private static ResourceBundle bundle = ResourceBundle.getBundle(MESSAGE_BUNDLE_BASENAME, Locale.ENGLISH, UTF8_RESOURCEBUNDLE_CONTROL); + private static ResourceBundle bundle = + ResourceBundle.getBundle( + MESSAGE_BUNDLE_BASENAME, Locale.ENGLISH, UTF8_RESOURCEBUNDLE_CONTROL); - private MessageUtils() { - } + private MessageUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/NumericUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/NumericUtils.java index ae2ef5ac341..fa4d3e7c96f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/NumericUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/NumericUtils.java @@ -22,39 +22,42 @@ import java.util.Arrays; /** Utilities for handling numeric values */ public class NumericUtils { - public static int[] convertToIntArray(String value, boolean ignoreException) throws NumberFormatException { + public static int[] convertToIntArray(String value, boolean ignoreException) + throws NumberFormatException { if (StringUtils.isNullOrEmpty(value)) { - return new int[]{0}; + return new int[] {0}; } try { return Arrays.stream(value.trim().split(",")).mapToInt(Integer::parseInt).toArray(); } catch (NumberFormatException e) { if (ignoreException) { - return new int[]{0}; + return new int[] {0}; } else { throw e; } } } - public static long[] convertToLongArray(String value, boolean ignoreException) throws NumberFormatException { + public static long[] convertToLongArray(String value, boolean ignoreException) + throws NumberFormatException { if (StringUtils.isNullOrEmpty(value)) { - return new long[]{0}; + return new long[] {0}; } try { return Arrays.stream(value.trim().split(",")).mapToLong(Long::parseLong).toArray(); } catch (NumberFormatException e) { if (ignoreException) { - return new long[]{0}; + return new long[] {0}; } else { throw e; } } } - public static float[] convertToFloatArray(String value, boolean ignoreException) throws NumberFormatException { + public static float[] convertToFloatArray(String value, boolean ignoreException) + throws NumberFormatException { if (StringUtils.isNullOrEmpty(value)) { - return new float[]{0}; + return new float[] {0}; } try { String[] strVals = value.trim().split(","); @@ -65,22 +68,23 @@ public class NumericUtils { return values; } catch (NumberFormatException e) { if (ignoreException) { - return new float[]{0}; + return new float[] {0}; } else { throw e; } } } - public static double[] convertToDoubleArray(String value, boolean ignoreException) throws NumberFormatException { + public static double[] convertToDoubleArray(String value, boolean ignoreException) + throws NumberFormatException { if (StringUtils.isNullOrEmpty(value)) { - return new double[]{0}; + return new double[] {0}; } try { return Arrays.stream(value.trim().split(",")).mapToDouble(Double::parseDouble).toArray(); } catch (NumberFormatException e) { if (ignoreException) { - return new double[]{0}; + return new double[] {0}; } else { throw e; } @@ -98,6 +102,5 @@ public class NumericUtils { } } - private NumericUtils() { - } + private NumericUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/StringUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/StringUtils.java index 23a4f79c2d4..792cb4c58cf 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/StringUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/StringUtils.java @@ -26,6 +26,5 @@ public class StringUtils { return Objects.isNull(s) || s.equals(""); } - private StringUtils() { - } + private StringUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/StyleConstants.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/StyleConstants.java index 3b70265cf87..3b0f984963d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/StyleConstants.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/StyleConstants.java @@ -37,7 +37,5 @@ public class StyleConstants { public static int TABLE_ROW_MARGIN_DEFAULT = 3; - private StyleConstants() { - } - + private StyleConstants() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TabUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TabUtils.java index c3dc7a1e479..a47de061496 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TabUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TabUtils.java @@ -17,9 +17,9 @@ package org.apache.lucene.luke.app.desktop.util; +import java.awt.Graphics; import javax.swing.JTabbedPane; import javax.swing.UIManager; -import java.awt.Graphics; /** Tab utilities */ public class TabUtils { @@ -31,11 +31,11 @@ public class TabUtils { return; } // https://coderanch.com/t/600541/java/JtabbedPane-transparency - tabbedPane.setUI(new javax.swing.plaf.metal.MetalTabbedPaneUI() { - protected void paintContentBorder(Graphics g, int tabPlacement, int selectedIndex) { - } - }); + tabbedPane.setUI( + new javax.swing.plaf.metal.MetalTabbedPaneUI() { + protected void paintContentBorder(Graphics g, int tabPlacement, int selectedIndex) {} + }); } - private TabUtils(){} + private TabUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java index cea72aea1fd..a14cfb5ca7d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java @@ -17,23 +17,26 @@ package org.apache.lucene.luke.app.desktop.util; -import javax.swing.JTable; -import javax.swing.table.DefaultTableModel; -import javax.swing.table.TableModel; import java.awt.Color; import java.awt.event.MouseListener; import java.util.Arrays; import java.util.TreeMap; import java.util.function.UnaryOperator; import java.util.stream.Collectors; - +import javax.swing.JTable; +import javax.swing.table.DefaultTableModel; +import javax.swing.table.TableModel; import org.apache.lucene.luke.app.desktop.components.TableColumnInfo; /** Table utilities */ public class TableUtils { - public static void setupTable(JTable table, int selectionModel, TableModel model, MouseListener mouseListener, - int... colWidth) { + public static void setupTable( + JTable table, + int selectionModel, + TableModel model, + MouseListener mouseListener, + int... colWidth) { table.setFillsViewportHeight(true); table.setFont(StyleConstants.FONT_MONOSPACE_LARGE); table.setRowHeight(StyleConstants.TABLE_ROW_HEIGHT_DEFAULT); @@ -72,14 +75,16 @@ public class TableUtils { } public static String[] columnNames(T[] columns) { - return columnMap(columns).entrySet().stream().map(e -> e.getValue().getColName()).toArray(String[]::new); + return columnMap(columns).entrySet().stream() + .map(e -> e.getValue().getColName()) + .toArray(String[]::new); } public static TreeMap columnMap(T[] columns) { - return Arrays.stream(columns).collect(Collectors.toMap(T::getIndex, UnaryOperator.identity(), (e1, e2) -> e1, TreeMap::new)); - } - - private TableUtils() { + return Arrays.stream(columns) + .collect( + Collectors.toMap(T::getIndex, UnaryOperator.identity(), (e1, e2) -> e1, TreeMap::new)); } + private TableUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TextAreaAppender.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TextAreaAppender.java index b7b1d421383..3d6964ae00c 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TextAreaAppender.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TextAreaAppender.java @@ -17,13 +17,12 @@ package org.apache.lucene.luke.app.desktop.util; -import javax.swing.JTextArea; -import javax.swing.SwingUtilities; import java.io.Serializable; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; - +import javax.swing.JTextArea; +import javax.swing.SwingUtilities; import org.apache.logging.log4j.core.Appender; import org.apache.logging.log4j.core.Core; import org.apache.logging.log4j.core.Filter; @@ -36,7 +35,11 @@ import org.apache.logging.log4j.core.config.plugins.Plugin; import org.apache.logging.log4j.core.config.plugins.PluginBuilderFactory; /** Log appender for text areas */ -@Plugin(name = "TextArea", category = Core.CATEGORY_NAME, elementType = Appender.ELEMENT_TYPE, printObject = true) +@Plugin( + name = "TextArea", + category = Core.CATEGORY_NAME, + elementType = Appender.ELEMENT_TYPE, + printObject = true) public final class TextAreaAppender extends AbstractAppender { private static JTextArea textArea; @@ -45,8 +48,11 @@ public final class TextAreaAppender extends AbstractAppender { private static final Lock readLock = rwLock.readLock(); private static final Lock writeLock = rwLock.writeLock(); - protected TextAreaAppender(String name, Filter filter, - org.apache.logging.log4j.core.Layout layout, final boolean ignoreExceptions) { + protected TextAreaAppender( + String name, + Filter filter, + org.apache.logging.log4j.core.Layout layout, + final boolean ignoreExceptions) { super(name, filter, layout, ignoreExceptions, Property.EMPTY_ARRAY); } @@ -70,11 +76,12 @@ public final class TextAreaAppender extends AbstractAppender { // just ignore any events logged before the area is available return; } - + final String message = ((StringLayout) getLayout()).toSerializable(event); - SwingUtilities.invokeLater(() -> { - textArea.append(message); - }); + SwingUtilities.invokeLater( + () -> { + textArea.append(message); + }); } finally { readLock.unlock(); } @@ -98,5 +105,4 @@ public final class TextAreaAppender extends AbstractAppender { public static > B newBuilder() { return new Builder().asBuilder(); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TextAreaPrintStream.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TextAreaPrintStream.java index d826b656799..bb59fac9bf6 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TextAreaPrintStream.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TextAreaPrintStream.java @@ -17,10 +17,10 @@ package org.apache.lucene.luke.app.desktop.util; -import javax.swing.JTextArea; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.nio.charset.StandardCharsets; +import javax.swing.JTextArea; /** PrintStream for text areas */ public final class TextAreaPrintStream extends PrintStream { diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/URLLabel.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/URLLabel.java index 4b6e71bf0fe..51e188e9af5 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/URLLabel.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/URLLabel.java @@ -17,7 +17,6 @@ package org.apache.lucene.luke.app.desktop.util; -import javax.swing.JLabel; import java.awt.Cursor; import java.awt.Desktop; import java.awt.event.MouseAdapter; @@ -26,7 +25,7 @@ import java.io.IOException; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; - +import javax.swing.JLabel; import org.apache.lucene.luke.models.LukeException; /** JLabel extension for representing urls */ @@ -45,12 +44,13 @@ public final class URLLabel extends JLabel { setCursor(Cursor.getPredefinedCursor(Cursor.HAND_CURSOR)); - addMouseListener(new MouseAdapter() { - @Override - public void mouseClicked(MouseEvent e) { - openUrl(link); - } - }); + addMouseListener( + new MouseAdapter() { + @Override + public void mouseClicked(MouseEvent e) { + openUrl(link); + } + }); } private void openUrl(URL link) { diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFile.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFile.java index fd723ba78b7..7969de90cde 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFile.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFile.java @@ -32,5 +32,4 @@ public interface IniFile { String getString(String section, String option); Boolean getBoolean(String section, String option); - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFileReader.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFileReader.java index 21bb85ada49..608642c71de 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFileReader.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFileReader.java @@ -25,5 +25,4 @@ import java.util.Map; public interface IniFileReader { Map readSections(Path path) throws IOException; - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFileWriter.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFileWriter.java index 9977046e3a7..da7c686c604 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFileWriter.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/IniFileWriter.java @@ -25,5 +25,4 @@ import java.util.Map; public interface IniFileWriter { void writeSections(Path path, Map sections) throws IOException; - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/OptionMap.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/OptionMap.java index f7783d70609..a2357db15fa 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/OptionMap.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/OptionMap.java @@ -29,5 +29,4 @@ public class OptionMap extends LinkedHashMap { Boolean getAsBoolean(String key) { return Boolean.parseBoolean(get(key)); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFile.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFile.java index 3c539f81f7c..da32d236a19 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFile.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFile.java @@ -46,7 +46,9 @@ public class SimpleIniFile implements IniFile { public synchronized void put(String section, String option, Object value) { if (checkString(section) && checkString(option) && Objects.nonNull(value)) { sections.putIfAbsent(section, new OptionMap()); - sections.get(section).put(option, (value instanceof String) ? (String) value : String.valueOf(value)); + sections + .get(section) + .put(option, (value instanceof String) ? (String) value : String.valueOf(value)); } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileReader.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileReader.java index 00a03636040..2113a4514f9 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileReader.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileReader.java @@ -35,29 +35,30 @@ public class SimpleIniFileReader implements IniFileReader { final Map sections = new LinkedHashMap<>(); try (BufferedReader r = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { - r.lines().forEach(line -> { - line = line.trim(); + r.lines() + .forEach( + line -> { + line = line.trim(); - if (isSectionLine(line)) { - // set section if this is a valid section string - currentSection = line.substring(1, line.length()-1); - sections.putIfAbsent(currentSection, new OptionMap()); - } else if (!currentSection.equals("")) { - // put option if this is a valid option string - String[] ary = line.split("=", 2); - if (ary.length == 2 && !ary[0].trim().equals("") && !ary[1].trim().equals("")) { - sections.get(currentSection).put(ary[0].trim(), ary[1].trim()); - } - } - - }); + if (isSectionLine(line)) { + // set section if this is a valid section string + currentSection = line.substring(1, line.length() - 1); + sections.putIfAbsent(currentSection, new OptionMap()); + } else if (!currentSection.equals("")) { + // put option if this is a valid option string + String[] ary = line.split("=", 2); + if (ary.length == 2 && !ary[0].trim().equals("") && !ary[1].trim().equals("")) { + sections.get(currentSection).put(ary[0].trim(), ary[1].trim()); + } + } + }); } return sections; } private boolean isSectionLine(String line) { - return line.startsWith("[") && line.endsWith("]") - && line.substring(1, line.length()-1).matches("^[a-zA-Z0-9]+$"); + return line.startsWith("[") + && line.endsWith("]") + && line.substring(1, line.length() - 1).matches("^[a-zA-Z0-9]+$"); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/package-info.java index d03b86fa42f..cfbddd15889 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/inifile/package-info.java @@ -16,4 +16,4 @@ */ /** Ini file parser / writer */ -package org.apache.lucene.luke.app.desktop.util.inifile; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.util.inifile; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/lang/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/lang/package-info.java index 5cf30577ae9..cbef8ff4e29 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/lang/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/lang/package-info.java @@ -16,4 +16,4 @@ */ /** Syntax sugars / helpers */ -package org.apache.lucene.luke.app.desktop.util.lang; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.util.lang; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/package-info.java index bd43e1e5f96..61d38ec9b33 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/package-info.java @@ -16,4 +16,4 @@ */ /** Utilities for the UI components */ -package org.apache.lucene.luke.app.desktop.util; \ No newline at end of file +package org.apache.lucene.luke.app.desktop.util; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/app/package-info.java index 8e7ea9e4f4d..e0f12f28ff2 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/package-info.java @@ -16,4 +16,4 @@ */ /** Views (UIs) for Luke */ -package org.apache.lucene.luke.app; \ No newline at end of file +package org.apache.lucene.luke.app; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/LukeException.java b/lucene/luke/src/java/org/apache/lucene/luke/models/LukeException.java index d8bcbfa34ae..0520814baa4 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/LukeException.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/LukeException.java @@ -31,5 +31,4 @@ public class LukeException extends RuntimeException { public LukeException(String message) { super(message); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/LukeModel.java b/lucene/luke/src/java/org/apache/lucene/luke/models/LukeModel.java index 524426cfc5a..d0968039718 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/LukeModel.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/LukeModel.java @@ -20,7 +20,6 @@ package org.apache.lucene.luke.models; import java.io.IOException; import java.util.Collection; import java.util.Objects; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexReader; @@ -29,7 +28,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; /** - * Abstract model class. It holds index reader object and provides basic features for all concrete sub classes. + * Abstract model class. It holds index reader object and provides basic features for all concrete + * sub classes. */ public abstract class LukeModel { @@ -60,12 +60,11 @@ public abstract class LukeModel { this.liveDocs = IndexUtils.getLiveDocs(reader); } - protected LukeModel (Directory dir) { + protected LukeModel(Directory dir) { this.dir = Objects.requireNonNull(dir); } public Collection getFieldNames() { return IndexUtils.getFieldNames(reader); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/Analysis.java b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/Analysis.java index 2bcdaeefde1..676e2178c01 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/Analysis.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/Analysis.java @@ -21,21 +21,16 @@ import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Objects; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharFilterFactory; import org.apache.lucene.analysis.TokenFilterFactory; import org.apache.lucene.analysis.TokenizerFactory; import org.apache.lucene.luke.models.LukeException; -/** - * A dedicated interface for Luke's Analysis tab. - */ +/** A dedicated interface for Luke's Analysis tab. */ public interface Analysis { - /** - * Holder for a token. - */ + /** Holder for a token. */ class Token { private final String term; private final List attributes; @@ -45,24 +40,18 @@ public interface Analysis { this.attributes = Objects.requireNonNull(attributes); } - /** - * Returns the string representation of this token. - */ + /** Returns the string representation of this token. */ public String getTerm() { return term; } - /** - * Returns attributes of this token. - */ + /** Returns attributes of this token. */ public List getAttributes() { return List.copyOf(attributes); } } - /** - * Holder for a token attribute. - */ + /** Holder for a token attribute. */ class TokenAttribute { private final String attClass; private final Map attValues; @@ -72,22 +61,17 @@ public interface Analysis { this.attValues = Objects.requireNonNull(attValues); } - /** - * Returns attribute class name. - */ + /** Returns attribute class name. */ public String getAttClass() { return attClass; } - /** - * Returns value of this attribute. - */ + /** Returns value of this attribute. */ public Map getAttValues() { return Map.copyOf(attValues); } } - /** Base class for named object */ abstract class NamedObject { private final String name; @@ -101,9 +85,7 @@ public interface Analysis { } } - /** - * Holder for a pair tokenizer/filter and token list - */ + /** Holder for a pair tokenizer/filter and token list */ class NamedTokens extends NamedObject { private final List tokens; @@ -117,9 +99,7 @@ public interface Analysis { } } - /** - * Holder for a charfilter name and text that output by the charfilter - */ + /** Holder for a charfilter name and text that output by the charfilter */ class CharfilteredText extends NamedObject { private final String text; @@ -133,14 +113,13 @@ public interface Analysis { } } - /** - * Step-by-step analysis result holder. - */ + /** Step-by-step analysis result holder. */ class StepByStepResult { private List charfilteredTexts; private List namedTokens; - public StepByStepResult(List charfilteredTexts, List namedTokens) { + public StepByStepResult( + List charfilteredTexts, List namedTokens) { this.charfilteredTexts = charfilteredTexts; this.namedTokens = namedTokens; } @@ -154,24 +133,16 @@ public interface Analysis { } } - /** - * Returns built-in {@link Analyzer}s. - */ + /** Returns built-in {@link Analyzer}s. */ Collection> getPresetAnalyzerTypes(); - /** - * Returns available char filter names. - */ + /** Returns available char filter names. */ Collection getAvailableCharFilters(); - /** - * Returns available tokenizer names. - */ + /** Returns available tokenizer names. */ Collection getAvailableTokenizers(); - /** - * Returns available token filter names. - */ + /** Returns available token filter names. */ Collection getAvailableTokenFilters(); /** @@ -203,25 +174,26 @@ public interface Analysis { /** * Returns current analyzer. + * * @throws LukeException - if current analyzer not set */ Analyzer currentAnalyzer(); /** - * Adds external jar files to classpath and loads custom {@link CharFilterFactory}s, {@link TokenizerFactory}s, or {@link TokenFilterFactory}s. + * Adds external jar files to classpath and loads custom {@link CharFilterFactory}s, {@link + * TokenizerFactory}s, or {@link TokenFilterFactory}s. * * @param jarFiles - list of paths to jar file * @throws LukeException - if an internal error occurs when loading jars */ void addExternalJars(List jarFiles); - /** * Analyzes given text with the current Analyzer. * * @param text - text string to analyze - * @return the list of text by charfilter and the list of pair of Tokenizer/TokenFilter name and tokens + * @return the list of text by charfilter and the list of pair of Tokenizer/TokenFilter name and + * tokens */ StepByStepResult analyzeStepByStep(String text); - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisFactory.java index 8fa49c6162c..4027a2f835e 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisFactory.java @@ -23,5 +23,4 @@ public class AnalysisFactory { public Analysis newInstance() { return new AnalysisImpl(); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java index 8c58df976a2..aca2497d4be 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java @@ -37,15 +37,14 @@ import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; - import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.custom.CustomAnalyzer; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.CharFilterFactory; import org.apache.lucene.analysis.TokenFilterFactory; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.TokenizerFactory; +import org.apache.lucene.analysis.custom.CustomAnalyzer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.luke.models.LukeException; import org.apache.lucene.luke.util.reflection.ClassScanner; import org.apache.lucene.util.AttributeImpl; @@ -66,7 +65,8 @@ public final class AnalysisImpl implements Analysis { for (String jarFile : jarFiles) { Path path = FileSystems.getDefault().getPath(jarFile); if (!Files.exists(path) || !jarFile.endsWith(".jar")) { - throw new LukeException(String.format(Locale.ENGLISH, "Invalid jar file path: %s", jarFile)); + throw new LukeException( + String.format(Locale.ENGLISH, "Invalid jar file path: %s", jarFile)); } try { URL url = path.toUri().toURL(); @@ -77,8 +77,8 @@ public final class AnalysisImpl implements Analysis { } // reload available tokenizers, charfilters, and tokenfilters - URLClassLoader classLoader = new URLClassLoader( - urls.toArray(new URL[0]), this.getClass().getClassLoader()); + URLClassLoader classLoader = + new URLClassLoader(urls.toArray(new URL[0]), this.getClass().getClassLoader()); CharFilterFactory.reloadCharFilters(classLoader); TokenizerFactory.reloadTokenizers(classLoader); TokenFilterFactory.reloadTokenFilters(classLoader); @@ -113,11 +113,14 @@ public final class AnalysisImpl implements Analysis { @Override public Collection getAvailableTokenFilters() { - return TokenFilterFactory.availableTokenFilters().stream().sorted().collect(Collectors.toList()); + return TokenFilterFactory.availableTokenFilters().stream() + .sorted() + .collect(Collectors.toList()); } private List> getInstantiableSubTypesBuiltIn(Class superType) { - ClassScanner scanner = new ClassScanner("org.apache.lucene.analysis", getClass().getClassLoader()); + ClassScanner scanner = + new ClassScanner("org.apache.lucene.analysis", getClass().getClassLoader()); Set> types = scanner.scanSubTypes(superType); return types.stream() .filter(type -> !Modifier.isAbstract(type.getModifiers())) @@ -157,13 +160,13 @@ public final class AnalysisImpl implements Analysis { private List copyAttributes(TokenStream tokenStream, CharTermAttribute charAtt) { List attributes = new ArrayList<>(); Iterator itr = tokenStream.getAttributeImplsIterator(); - while(itr.hasNext()) { + while (itr.hasNext()) { AttributeImpl att = itr.next(); Map attValues = new LinkedHashMap<>(); - att.reflectWith((attClass, key, value) -> { - if (value != null) - attValues.put(key, value.toString()); - }); + att.reflectWith( + (attClass, key, value) -> { + if (value != null) attValues.put(key, value.toString()); + }); attributes.add(new TokenAttribute(att.getClass().getSimpleName(), attValues)); } return attributes; @@ -178,7 +181,8 @@ public final class AnalysisImpl implements Analysis { this.analyzer = clazz.getConstructor().newInstance(); return analyzer; } catch (ReflectiveOperationException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to instantiate class: %s", analyzerType), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Failed to instantiate class: %s", analyzerType), e); } } @@ -187,12 +191,15 @@ public final class AnalysisImpl implements Analysis { Objects.requireNonNull(config); try { // create builder - CustomAnalyzer.Builder builder = config.getConfigDir() - .map(path -> CustomAnalyzer.builder(FileSystems.getDefault().getPath(path))) - .orElse(CustomAnalyzer.builder()); + CustomAnalyzer.Builder builder = + config + .getConfigDir() + .map(path -> CustomAnalyzer.builder(FileSystems.getDefault().getPath(path))) + .orElse(CustomAnalyzer.builder()); // set tokenizer - builder.withTokenizer(config.getTokenizerConfig().getName(), config.getTokenizerConfig().getParams()); + builder.withTokenizer( + config.getTokenizerConfig().getName(), config.getTokenizerConfig().getParams()); // add char filters for (CustomAnalyzerConfig.ComponentConfig cfConf : config.getCharFilterConfigs()) { @@ -221,7 +228,7 @@ public final class AnalysisImpl implements Analysis { } @Override - public StepByStepResult analyzeStepByStep(String text){ + public StepByStepResult analyzeStepByStep(String text) { Objects.requireNonNull(text); if (analyzer == null) { throw new LukeException("Analyzer is not set."); @@ -235,7 +242,7 @@ public final class AnalysisImpl implements Analysis { List charfilteredTexts = new ArrayList<>(); try { - CustomAnalyzer customAnalyzer = (CustomAnalyzer)analyzer; + CustomAnalyzer customAnalyzer = (CustomAnalyzer) analyzer; final List charFilterFactories = customAnalyzer.getCharFilterFactories(); Reader reader = new StringReader(text); String charFilteredSource = text; @@ -246,26 +253,32 @@ public final class AnalysisImpl implements Analysis { Reader readerForWriteOut = new StringReader(charFilteredSource); readerForWriteOut = charFilterFactory.create(readerForWriteOut); charFilteredSource = writeCharStream(readerForWriteOut); - charfilteredTexts.add(new CharfilteredText(CharFilterFactory.findSPIName(charFilterFactory.getClass()), charFilteredSource)); + charfilteredTexts.add( + new CharfilteredText( + CharFilterFactory.findSPIName(charFilterFactory.getClass()), charFilteredSource)); } reader = cs; } final TokenizerFactory tokenizerFactory = customAnalyzer.getTokenizerFactory(); - final List tokenFilterFactories = customAnalyzer.getTokenFilterFactories(); + final List tokenFilterFactories = + customAnalyzer.getTokenFilterFactories(); TokenStream tokenStream = tokenizerFactory.create(); - ((Tokenizer)tokenStream).setReader(reader); + ((Tokenizer) tokenStream).setReader(reader); List tokens = new ArrayList<>(); List attributeSources = analyzeTokenStream(tokenStream, tokens); - namedTokens.add(new NamedTokens(TokenizerFactory.findSPIName(tokenizerFactory.getClass()), tokens)); + namedTokens.add( + new NamedTokens(TokenizerFactory.findSPIName(tokenizerFactory.getClass()), tokens)); - ListBasedTokenStream listBasedTokenStream = new ListBasedTokenStream(tokenStream, attributeSources); + ListBasedTokenStream listBasedTokenStream = + new ListBasedTokenStream(tokenStream, attributeSources); for (TokenFilterFactory tokenFilterFactory : tokenFilterFactories) { tokenStream = tokenFilterFactory.create(listBasedTokenStream); tokens = new ArrayList<>(); attributeSources = analyzeTokenStream(tokenStream, tokens); - namedTokens.add(new NamedTokens(TokenFilterFactory.findSPIName(tokenFilterFactory.getClass()), tokens)); + namedTokens.add( + new NamedTokens(TokenFilterFactory.findSPIName(tokenFilterFactory.getClass()), tokens)); try { listBasedTokenStream.close(); } catch (IOException e) { @@ -290,7 +303,6 @@ public final class AnalysisImpl implements Analysis { * Analyzes the given TokenStream, collecting the Tokens it produces. * * @param tokenStream TokenStream to analyze - * * @return List of tokens produced from the TokenStream */ private List analyzeTokenStream(TokenStream tokenStream, List result) { @@ -313,10 +325,10 @@ public final class AnalysisImpl implements Analysis { } /** - * TokenStream that iterates over a list of pre-existing Tokens - * see org.apache.solr.handler.AnalysisRequestHandlerBase#ListBasedTokenStream + * TokenStream that iterates over a list of pre-existing Tokens see + * org.apache.solr.handler.AnalysisRequestHandlerBase#ListBasedTokenStream */ - protected final static class ListBasedTokenStream extends TokenStream { + protected static final class ListBasedTokenStream extends TokenStream { private final List tokens; private Iterator tokenIterator; @@ -360,21 +372,21 @@ public final class AnalysisImpl implements Analysis { } } - private static String writeCharStream(Reader input ){ + private static String writeCharStream(Reader input) { final int BUFFER_SIZE = 1024; char[] buf = new char[BUFFER_SIZE]; int len = 0; StringBuilder sb = new StringBuilder(); do { try { - len = input.read( buf, 0, BUFFER_SIZE ); + len = input.read(buf, 0, BUFFER_SIZE); } catch (IOException e) { throw new RuntimeException("Error occurred while iterating over charfiltering", e); } - if( len > 0 ) + if (len > 0) { sb.append(buf, 0, len); - } while( len == BUFFER_SIZE ); + } + } while (len == BUFFER_SIZE); return sb.toString(); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/CustomAnalyzerConfig.java b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/CustomAnalyzerConfig.java index 0aa134cd99a..aaa29c47937 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/CustomAnalyzerConfig.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/CustomAnalyzerConfig.java @@ -24,9 +24,7 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -/** - * Configurations for a custom analyzer. - */ +/** Configurations for a custom analyzer. */ public final class CustomAnalyzerConfig { private final String configDir; @@ -81,30 +79,22 @@ public final class CustomAnalyzerConfig { this.tokenFilterConfigs = builder.tokenFilterConfigs; } - /** - * Returns directory path for configuration files, or empty. - */ + /** Returns directory path for configuration files, or empty. */ Optional getConfigDir() { return Optional.ofNullable(configDir); } - /** - * Returns Tokenizer configurations. - */ + /** Returns Tokenizer configurations. */ ComponentConfig getTokenizerConfig() { return tokenizerConfig; } - /** - * Returns CharFilters configurations. - */ + /** Returns CharFilters configurations. */ List getCharFilterConfigs() { return List.copyOf(charFilterConfigs); } - /** - * Returns TokenFilters configurations. - */ + /** Returns TokenFilters configurations. */ List getTokenFilterConfigs() { return List.copyOf(tokenFilterConfigs); } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/package-info.java index 52a9c0c087d..95db49aa46a 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/package-info.java @@ -16,4 +16,4 @@ */ /** Models and APIs for the Analysis tab */ -package org.apache.lucene.luke.models.analysis; \ No newline at end of file +package org.apache.lucene.luke.models.analysis; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Commit.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Commit.java index 73f1594a11c..af43bb9a338 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Commit.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Commit.java @@ -18,13 +18,10 @@ package org.apache.lucene.luke.models.commits; import java.io.IOException; - import org.apache.lucene.index.IndexCommit; import org.apache.lucene.luke.models.util.IndexUtils; -/** - * Holder for a commit. - */ +/** Holder for a commit. */ public final class Commit { private long generation; @@ -63,6 +60,5 @@ public final class Commit { return userData; } - private Commit() { - } + private Commit() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Commits.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Commits.java index dbd8abe17cf..e558cc5c74b 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Commits.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Commits.java @@ -20,23 +20,22 @@ package org.apache.lucene.luke.models.commits; import java.util.List; import java.util.Map; import java.util.Optional; - import org.apache.lucene.codecs.Codec; import org.apache.lucene.luke.models.LukeException; -/** - * A dedicated interface for Luke's Commits tab. - */ +/** A dedicated interface for Luke's Commits tab. */ public interface Commits { /** * Returns commits that exists in this Directory. + * * @throws LukeException - if an internal error occurs when accessing index */ List listCommits(); /** * Returns a commit of the specified generation. + * * @param commitGen - generation * @throws LukeException - if an internal error occurs when accessing index */ @@ -44,6 +43,7 @@ public interface Commits { /** * Returns index files for the specified generation. + * * @param commitGen - generation * @throws LukeException - if an internal error occurs when accessing index */ @@ -51,6 +51,7 @@ public interface Commits { /** * Returns segments for the specified generation. + * * @param commitGen - generation * @throws LukeException - if an internal error occurs when accessing index */ @@ -58,6 +59,7 @@ public interface Commits { /** * Returns internal codec attributes map for the specified segment. + * * @param commitGen - generation * @param name - segment name * @throws LukeException - if an internal error occurs when accessing index @@ -66,6 +68,7 @@ public interface Commits { /** * Returns diagnotics for the specified segment. + * * @param commitGen - generation * @param name - segment name * @throws LukeException - if an internal error occurs when accessing index @@ -74,6 +77,7 @@ public interface Commits { /** * Returns codec for the specified segment. + * * @param commitGen - generation * @param name - segment name * @throws LukeException - if an internal error occurs when accessing index diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsFactory.java index 22d959d8621..de29900bf63 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsFactory.java @@ -30,5 +30,4 @@ public class CommitsFactory { public Commits newInstance(DirectoryReader reader, String indexPath) { return new CommitsImpl(reader, indexPath); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java index f1dcf43547a..59b50a2b81d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java @@ -27,7 +27,6 @@ import java.util.Map; import java.util.Optional; import java.util.TreeMap; import java.util.stream.Collectors; - import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.DirectoryReader; @@ -60,7 +59,8 @@ public final class CommitsImpl extends LukeModel implements Commits { } /** - * Constructs a CommitsImpl that holds the {@link Directory} wrapped in the given {@link DirectoryReader}. + * Constructs a CommitsImpl that holds the {@link Directory} wrapped in the given {@link + * DirectoryReader}. * * @param reader - the index reader * @param indexPath - the path to index directory @@ -86,9 +86,8 @@ public final class CommitsImpl extends LukeModel implements Commits { @Override public List listCommits() throws LukeException { - List commits = getCommitMap().values().stream() - .map(Commit::of) - .collect(Collectors.toList()); + List commits = + getCommitMap().values().stream().map(Commit::of).collect(Collectors.toList()); Collections.reverse(commits); return commits; } @@ -122,7 +121,9 @@ public final class CommitsImpl extends LukeModel implements Commits { .sorted(Comparator.comparing(File::getFileName)) .collect(Collectors.toList()); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to load files for commit generation %d", commitGen), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Failed to load files for commit generation %d", commitGen), + e); } } @@ -139,12 +140,16 @@ public final class CommitsImpl extends LukeModel implements Commits { .sorted(Comparator.comparing(Segment::getName)) .collect(Collectors.toList()); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to load segment infos for commit generation %d", commitGen), e); + throw new LukeException( + String.format( + Locale.ENGLISH, "Failed to load segment infos for commit generation %d", commitGen), + e); } } @Override - public Map getSegmentAttributes(long commitGen, String name) throws LukeException { + public Map getSegmentAttributes(long commitGen, String name) + throws LukeException { try { SegmentInfos infos = findSegmentInfos(commitGen); if (infos == null) { @@ -157,12 +162,16 @@ public final class CommitsImpl extends LukeModel implements Commits { .map(seg -> seg.info.getAttributes()) .orElse(Collections.emptyMap()); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to load segment infos for commit generation %d", commitGen), e); + throw new LukeException( + String.format( + Locale.ENGLISH, "Failed to load segment infos for commit generation %d", commitGen), + e); } } @Override - public Map getSegmentDiagnostics(long commitGen, String name) throws LukeException { + public Map getSegmentDiagnostics(long commitGen, String name) + throws LukeException { try { SegmentInfos infos = findSegmentInfos(commitGen); if (infos == null) { @@ -175,7 +184,10 @@ public final class CommitsImpl extends LukeModel implements Commits { .map(seg -> seg.info.getDiagnostics()) .orElse(Collections.emptyMap()); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to load segment infos for commit generation %d", commitGen), e); + throw new LukeException( + String.format( + Locale.ENGLISH, "Failed to load segment infos for commit generation %d", commitGen), + e); } } @@ -192,7 +204,10 @@ public final class CommitsImpl extends LukeModel implements Commits { .findAny() .map(seg -> seg.info.getCodec()); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to load segment infos for commit generation %d", commitGen), e); + throw new LukeException( + String.format( + Locale.ENGLISH, "Failed to load segment infos for commit generation %d", commitGen), + e); } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/File.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/File.java index 8038b39be3b..099e38dc72e 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/File.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/File.java @@ -21,9 +21,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; -/** - * Holder for a index file. - */ +/** Holder for a index file. */ public final class File { private String fileName; private String displaySize; @@ -47,6 +45,5 @@ public final class File { return displaySize; } - private File() { - } + private File() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Segment.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Segment.java index cea86e2ec9f..217002ab205 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Segment.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/Segment.java @@ -18,12 +18,9 @@ package org.apache.lucene.luke.models.commits; import java.io.IOException; - import org.apache.lucene.index.SegmentCommitInfo; -/** - * Holder for a segment. - */ +/** Holder for a segment. */ public final class Segment { private String name; @@ -90,6 +87,5 @@ public final class Segment { return useCompoundFile; } - private Segment() { - } + private Segment() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/package-info.java index 87ed8a0158d..5ade5f9b666 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/package-info.java @@ -16,4 +16,4 @@ */ /** Models and APIs for the Commits tab */ -package org.apache.lucene.luke.models.commits; \ No newline at end of file +package org.apache.lucene.luke.models.commits; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocValues.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocValues.java index ac1eff7e5ef..30d28bb6613 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocValues.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocValues.java @@ -19,13 +19,10 @@ package org.apache.lucene.luke.models.documents; import java.util.List; import java.util.stream.Collectors; - import org.apache.lucene.index.DocValuesType; import org.apache.lucene.util.BytesRef; -/** - * Holder for doc values. - */ +/** Holder for doc values. */ public final class DocValues { private final DocValuesType dvType; @@ -36,6 +33,7 @@ public final class DocValues { /** * Returns a new doc values entry representing the specified doc values type and values. + * * @param dvType - doc values type * @param values - (string) values * @param numericValues numeric values @@ -51,34 +49,33 @@ public final class DocValues { this.numericValues = numericValues; } - /** - * Returns the type of this doc values. - */ + /** Returns the type of this doc values. */ public DocValuesType getDvType() { return dvType; } - /** - * Returns the list of (string) values. - */ + /** Returns the list of (string) values. */ public List getValues() { return values; } - /** - * Returns the list of numeric values. - */ + /** Returns the list of numeric values. */ public List getNumericValues() { return numericValues; } @Override public String toString() { - String numValuesStr = numericValues.stream().map(String::valueOf).collect(Collectors.joining(",")); - return "DocValues{" + - "dvType=" + dvType + - ", values=" + values + - ", numericValues=[" + numValuesStr + "]" + - '}'; + String numValuesStr = + numericValues.stream().map(String::valueOf).collect(Collectors.joining(",")); + return "DocValues{" + + "dvType=" + + dvType + + ", values=" + + values + + ", numericValues=[" + + numValuesStr + + "]" + + '}'; } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocValuesAdapter.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocValuesAdapter.java index 79a87e18099..83245fae8d9 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocValuesAdapter.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocValuesAdapter.java @@ -23,7 +23,6 @@ import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Optional; - import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexReader; @@ -34,9 +33,7 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.luke.models.util.IndexUtils; import org.apache.lucene.util.BytesRef; -/** - * An utility class to access to the doc values. - */ +/** An utility class to access to the doc values. */ final class DocValuesAdapter { private final IndexReader reader; @@ -46,8 +43,8 @@ final class DocValuesAdapter { } /** - * Returns the doc values for the specified field in the specified document. - * Empty Optional instance is returned if no doc values is available for the field. + * Returns the doc values for the specified field in the specified document. Empty Optional + * instance is returned if no doc values is available for the field. * * @param docid - document id * @param field - field name @@ -78,10 +75,11 @@ final class DocValuesAdapter { BinaryDocValues bvalues = IndexUtils.getBinaryDocValues(reader, field); if (bvalues.advanceExact(docid)) { - DocValues dv = DocValues.of( - dvType, - Collections.singletonList(BytesRef.deepCopyOf(bvalues.binaryValue())), - Collections.emptyList()); + DocValues dv = + DocValues.of( + dvType, + Collections.singletonList(BytesRef.deepCopyOf(bvalues.binaryValue())), + Collections.emptyList()); return Optional.of(dv); } @@ -89,23 +87,21 @@ final class DocValuesAdapter { } private Optional createNumericDocValues(int docid, String field, DocValuesType dvType) - throws IOException{ + throws IOException { NumericDocValues nvalues = IndexUtils.getNumericDocValues(reader, field); if (nvalues.advanceExact(docid)) { - DocValues dv = DocValues.of( - dvType, - Collections.emptyList(), - Collections.singletonList(nvalues.longValue()) - ); + DocValues dv = + DocValues.of( + dvType, Collections.emptyList(), Collections.singletonList(nvalues.longValue())); return Optional.of(dv); } return Optional.empty(); } - private Optional createSortedNumericDocValues(int docid, String field, DocValuesType dvType) - throws IOException { + private Optional createSortedNumericDocValues( + int docid, String field, DocValuesType dvType) throws IOException { SortedNumericDocValues snvalues = IndexUtils.getSortedNumericDocValues(reader, field); if (snvalues.advanceExact(docid)) { @@ -116,11 +112,7 @@ final class DocValuesAdapter { numericValues.add(snvalues.nextValue()); } - DocValues dv = DocValues.of( - dvType, - Collections.emptyList(), - numericValues - ); + DocValues dv = DocValues.of(dvType, Collections.emptyList(), numericValues); return Optional.of(dv); } @@ -132,19 +124,19 @@ final class DocValuesAdapter { SortedDocValues svalues = IndexUtils.getSortedDocValues(reader, field); if (svalues.advanceExact(docid)) { - DocValues dv = DocValues.of( - dvType, - Collections.singletonList(BytesRef.deepCopyOf(svalues.binaryValue())), - Collections.emptyList() - ); + DocValues dv = + DocValues.of( + dvType, + Collections.singletonList(BytesRef.deepCopyOf(svalues.binaryValue())), + Collections.emptyList()); return Optional.of(dv); } return Optional.empty(); } - private Optional createSortedSetDocValues(int docid, String field, DocValuesType dvType) - throws IOException { + private Optional createSortedSetDocValues( + int docid, String field, DocValuesType dvType) throws IOException { SortedSetDocValues ssvalues = IndexUtils.getSortedSetDocvalues(reader, field); if (ssvalues.advanceExact(docid)) { @@ -155,11 +147,7 @@ final class DocValuesAdapter { values.add(BytesRef.deepCopyOf(ssvalues.lookupOrd(ord))); } - DocValues dv = DocValues.of( - dvType, - values, - Collections.emptyList() - ); + DocValues dv = DocValues.of(dvType, values, Collections.emptyList()); return Optional.of(dv); } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentField.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentField.java index 44737a35b22..a27c8dba58f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentField.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentField.java @@ -19,7 +19,6 @@ package org.apache.lucene.luke.models.documents; import java.io.IOException; import java.util.Objects; - import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexOptions; @@ -29,9 +28,7 @@ import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.util.BytesRef; -/** - * Holder for a document field's information and data. - */ +/** Holder for a document field's information and data. */ public final class DocumentField { // field name @@ -57,8 +54,7 @@ public final class DocumentField { private int pointDimensionCount; private int pointNumBytes; - static DocumentField of(FieldInfo finfo, IndexReader reader, int docId) - throws IOException { + static DocumentField of(FieldInfo finfo, IndexReader reader, int docId) throws IOException { return of(finfo, null, reader, docId); } @@ -154,16 +150,22 @@ public final class DocumentField { @Override public String toString() { - return "DocumentField{" + - "name='" + name + '\'' + - ", idxOptions=" + idxOptions + - ", hasTermVectors=" + hasTermVectors + - ", isStored=" + isStored + - ", dvType=" + dvType + - ", pointDimensionCount=" + pointDimensionCount + - '}'; + return "DocumentField{" + + "name='" + + name + + '\'' + + ", idxOptions=" + + idxOptions + + ", hasTermVectors=" + + hasTermVectors + + ", isStored=" + + isStored + + ", dvType=" + + dvType + + ", pointDimensionCount=" + + pointDimensionCount + + '}'; } - private DocumentField() { - } + private DocumentField() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/Documents.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/Documents.java index d3735412e21..625a2f91e26 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/Documents.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/Documents.java @@ -20,27 +20,22 @@ package org.apache.lucene.luke.models.documents; import java.util.Collection; import java.util.List; import java.util.Optional; - import org.apache.lucene.index.Term; import org.apache.lucene.luke.models.LukeException; -/** - * A dedicated interface for Luke's Documents tab. - */ +/** A dedicated interface for Luke's Documents tab. */ public interface Documents { - /** - * Returns one greater than the largest possible document number. - */ + /** Returns one greater than the largest possible document number. */ int getMaxDoc(); - /** - * Returns field names in this index. - */ + /** Returns field names in this index. */ Collection getFieldNames(); /** - * Returns true if the document with the specified docid is not deleted, otherwise false. + * Returns true if the document with the specified docid is not deleted, otherwise + * false. + * * @param docid - document id */ boolean isLive(int docid); @@ -53,14 +48,12 @@ public interface Documents { */ List getDocumentFields(int docid); - /** - * Returns the current target field name. - */ + /** Returns the current target field name. */ String getCurrentField(); /** - * Returns the first indexed term in the specified field. - * Empty Optional instance is returned if no terms are available for the field. + * Returns the first indexed term in the specified field. Empty Optional instance is returned if + * no terms are available for the field. * * @param field - field name * @throws LukeException - if an internal error occurs when accessing index @@ -68,8 +61,9 @@ public interface Documents { Optional firstTerm(String field); /** - * Increments the terms iterator and returns the next indexed term for the target field. - * Empty Optional instance is returned if the terms iterator has not been positioned yet, or has been exhausted. + * Increments the terms iterator and returns the next indexed term for the target field. Empty + * Optional instance is returned if the terms iterator has not been positioned yet, or has been + * exhausted. * * @return next term, if exists, or empty * @throws LukeException - if an internal error occurs when accessing index @@ -77,8 +71,9 @@ public interface Documents { Optional nextTerm(); /** - * Seeks to the specified term, if it exists, or to the next (ceiling) term. Returns the term that was found. - * Empty Optional instance is returned if the terms iterator has not been positioned yet, or has been exhausted. + * Seeks to the specified term, if it exists, or to the next (ceiling) term. Returns the term that + * was found. Empty Optional instance is returned if the terms iterator has not been positioned + * yet, or has been exhausted. * * @param termText - term to seek * @return found term, if exists, or empty @@ -87,8 +82,9 @@ public interface Documents { Optional seekTerm(String termText); /** - * Returns the first document id (posting) associated with the current term. - * Empty Optional instance is returned if the terms iterator has not been positioned yet, or the postings iterator has been exhausted. + * Returns the first document id (posting) associated with the current term. Empty Optional + * instance is returned if the terms iterator has not been positioned yet, or the postings + * iterator has been exhausted. * * @return document id, if exists, or empty * @throws LukeException - if an internal error occurs when accessing index @@ -96,8 +92,9 @@ public interface Documents { Optional firstTermDoc(); /** - * Increments the postings iterator and returns the next document id (posting) for the current term. - * Empty Optional instance is returned if the terms iterator has not been positioned yet, or the postings iterator has been exhausted. + * Increments the postings iterator and returns the next document id (posting) for the current + * term. Empty Optional instance is returned if the terms iterator has not been positioned yet, or + * the postings iterator has been exhausted. * * @return document id, if exists, or empty * @throws LukeException - if an internal error occurs when accessing index @@ -112,16 +109,17 @@ public interface Documents { List getTermPositions(); /** - * Returns the document frequency for the current term (the number of documents containing the current term.) - * Empty Optional instance is returned if the terms iterator has not been positioned yet. + * Returns the document frequency for the current term (the number of documents containing the + * current term.) Empty Optional instance is returned if the terms iterator has not been + * positioned yet. * * @throws LukeException - if an internal error occurs when accessing index */ Optional getDocFreq(); /** - * Returns the term vectors for the specified field in the specified document. - * If no term vector is available for the field, empty list is returned. + * Returns the term vectors for the specified field in the specified document. If no term vector + * is available for the field, empty list is returned. * * @param docid - document id * @param field - field name @@ -131,8 +129,8 @@ public interface Documents { List getTermVectors(int docid, String field); /** - * Returns the doc values for the specified field in the specified document. - * Empty Optional instance is returned if no doc values is available for the field. + * Returns the doc values for the specified field in the specified document. Empty Optional + * instance is returned if no doc values is available for the field. * * @param docid - document id * @param field - field name diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsFactory.java index 96b0a6fb6e9..4ccb55d63e0 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsFactory.java @@ -25,5 +25,4 @@ public class DocumentsFactory { public Documents newInstance(IndexReader reader) { return new DocumentsImpl(reader); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsImpl.java index d2f6c9b5f86..84daec9cc2d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsImpl.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Locale; import java.util.Objects; import java.util.Optional; - import org.apache.logging.log4j.Logger; import org.apache.lucene.document.Document; import org.apache.lucene.index.FieldInfo; @@ -59,6 +58,7 @@ public final class DocumentsImpl extends LukeModel implements Documents { /** * Constructs an DocumentsImpl that holds given {@link IndexReader}. + * * @param reader - the index reader */ public DocumentsImpl(IndexReader reader) { @@ -103,7 +103,8 @@ public final class DocumentsImpl extends LukeModel implements Documents { } } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Fields information not available for doc %d.", docid), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Fields information not available for doc %d.", docid), e); } return res; @@ -143,7 +144,8 @@ public final class DocumentsImpl extends LukeModel implements Documents { } catch (IOException e) { resetTermsIterator(); - throw new LukeException(String.format(Locale.ENGLISH, "Terms not available for field: %s.", field), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Terms not available for field: %s.", field), e); } finally { // discard current postings enum resetPostingsIterator(); @@ -170,7 +172,8 @@ public final class DocumentsImpl extends LukeModel implements Documents { } } catch (IOException e) { resetTermsIterator(); - throw new LukeException(String.format(Locale.ENGLISH, "Terms not available for field: %s.", curField), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Terms not available for field: %s.", curField), e); } finally { // discard current postings enum resetPostingsIterator(); @@ -201,7 +204,8 @@ public final class DocumentsImpl extends LukeModel implements Documents { } } catch (IOException e) { resetTermsIterator(); - throw new LukeException(String.format(Locale.ENGLISH, "Terms not available for field: %s.", curField), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Terms not available for field: %s.", curField), e); } finally { // discard current postings enum resetPostingsIterator(); @@ -222,14 +226,18 @@ public final class DocumentsImpl extends LukeModel implements Documents { if (penum.nextDoc() == PostingsEnum.NO_MORE_DOCS) { // no docs available for this term resetPostingsIterator(); - log.warn("No docs available for term: {} in field: {}.", BytesRefUtils.decode(tenum.term()), curField); + log.warn( + "No docs available for term: {} in field: {}.", + BytesRefUtils.decode(tenum.term()), + curField); return Optional.empty(); } else { return Optional.of(penum.docID()); } } catch (IOException e) { resetPostingsIterator(); - throw new LukeException(String.format(Locale.ENGLISH, "Term docs not available for field: %s.", curField), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Term docs not available for field: %s.", curField), e); } } @@ -246,7 +254,10 @@ public final class DocumentsImpl extends LukeModel implements Documents { // end of the iterator resetPostingsIterator(); if (log.isInfoEnabled()) { - log.info("Reached the end of the postings iterator for term: {} in field: {}", BytesRefUtils.decode(tenum.term()), curField); + log.info( + "Reached the end of the postings iterator for term: {} in field: {}", + BytesRefUtils.decode(tenum.term()), + curField); } return Optional.empty(); } else { @@ -254,7 +265,8 @@ public final class DocumentsImpl extends LukeModel implements Documents { } } catch (IOException e) { resetPostingsIterator(); - throw new LukeException(String.format(Locale.ENGLISH, "Term docs not available for field: %s.", curField), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Term docs not available for field: %s.", curField), e); } } @@ -282,13 +294,13 @@ public final class DocumentsImpl extends LukeModel implements Documents { } } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Postings not available for field %s.", curField), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Postings not available for field %s.", curField), e); } return res; } - @Override public Optional getDocFreq() { if (tenum == null) { @@ -300,7 +312,8 @@ public final class DocumentsImpl extends LukeModel implements Documents { try { return Optional.of(tenum.docFreq()); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH,"Doc frequency not available for field: %s.", curField), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Doc frequency not available for field: %s.", curField), e); } } @@ -309,7 +322,10 @@ public final class DocumentsImpl extends LukeModel implements Documents { try { return tvAdapter.getTermVector(docid, field); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Term vector not available for doc: #%d and field: %s", docid, field), e); + throw new LukeException( + String.format( + Locale.ENGLISH, "Term vector not available for doc: #%d and field: %s", docid, field), + e); } } @@ -318,7 +334,10 @@ public final class DocumentsImpl extends LukeModel implements Documents { try { return dvAdapter.getDocValues(docid, field); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Doc values not available for doc: #%d and field: %s", docid, field), e); + throw new LukeException( + String.format( + Locale.ENGLISH, "Doc values not available for doc: #%d and field: %s", docid, field), + e); } } @@ -345,5 +364,4 @@ public final class DocumentsImpl extends LukeModel implements Documents { private void setPostingsIterator(PostingsEnum penum) { this.penum = penum; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermPosting.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermPosting.java index 84d7af1b264..c5255c04e90 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermPosting.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermPosting.java @@ -18,13 +18,10 @@ package org.apache.lucene.luke.models.documents; import java.io.IOException; - import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.util.BytesRef; -/** - * Holder for a term's position information, and optionally, offsets and payloads. - */ +/** Holder for a term's position information, and optionally, offsets and payloads. */ public final class TermPosting { // position @@ -77,14 +74,17 @@ public final class TermPosting { @Override public String toString() { - return "TermPosting{" + - "position=" + position + - ", startOffset=" + startOffset + - ", endOffset=" + endOffset + - ", payload=" + payload + - '}'; + return "TermPosting{" + + "position=" + + position + + ", startOffset=" + + startOffset + + ", endOffset=" + + endOffset + + ", payload=" + + payload + + '}'; } - private TermPosting() { - } + private TermPosting() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorEntry.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorEntry.java index 643d299f1be..b09a36de18a 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorEntry.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorEntry.java @@ -23,13 +23,13 @@ import java.util.List; import java.util.Objects; import java.util.OptionalInt; import java.util.stream.Collectors; - import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.luke.util.BytesRefUtils; /** - * Holder for term vector entry representing the term and their number of occurrences, and optionally, positions in the document field. + * Holder for term vector entry representing the term and their number of occurrences, and + * optionally, positions in the document field. */ public final class TermVectorEntry { @@ -72,50 +72,47 @@ public final class TermVectorEntry { this.positions = positions; } - /** - * Returns the string representation for this term. - */ + /** Returns the string representation for this term. */ public String getTermText() { return termText; } - /** - * Returns the number of occurrences of this term in the document field. - */ + /** Returns the number of occurrences of this term in the document field. */ public long getFreq() { return freq; } - /** - * Returns the list of positions for this term in the document field. - */ + /** Returns the list of positions for this term in the document field. */ public List getPositions() { return positions; } @Override public String toString() { - String positionsStr = positions.stream() - .map(TermVectorPosition::toString) - .collect(Collectors.joining(",")); + String positionsStr = + positions.stream().map(TermVectorPosition::toString).collect(Collectors.joining(",")); - return "TermVectorEntry{" + - "termText='" + termText + '\'' + - ", freq=" + freq + - ", positions=" + positionsStr + - '}'; + return "TermVectorEntry{" + + "termText='" + + termText + + '\'' + + ", freq=" + + freq + + ", positions=" + + positionsStr + + '}'; } - /** - * Holder for position information for a term vector entry. - */ + /** Holder for position information for a term vector entry. */ public static final class TermVectorPosition { private final int position; private final int startOffset; private final int endOffset; /** - * Returns a new position entry representing the specified posting, and optionally, start and end offsets. + * Returns a new position entry representing the specified posting, and optionally, start and + * end offsets. + * * @param pos - term position * @param pe - positioned postings iterator * @return position entry @@ -132,24 +129,22 @@ public final class TermVectorEntry { return new TermVectorPosition(pos); } - /** - * Returns the position for this term in the document field. - */ + /** Returns the position for this term in the document field. */ public int getPosition() { return position; } /** - * Returns the start offset for this term in the document field. - * Empty Optional instance is returned if no offset information available. + * Returns the start offset for this term in the document field. Empty Optional instance is + * returned if no offset information available. */ public OptionalInt getStartOffset() { return startOffset >= 0 ? OptionalInt.of(startOffset) : OptionalInt.empty(); } /** - * Returns the end offset for this term in the document field. - * Empty Optional instance is returned if no offset information available. + * Returns the end offset for this term in the document field. Empty Optional instance is + * returned if no offset information available. */ public OptionalInt getEndOffset() { return endOffset >= 0 ? OptionalInt.of(endOffset) : OptionalInt.empty(); @@ -157,11 +152,14 @@ public final class TermVectorEntry { @Override public String toString() { - return "TermVectorPosition{" + - "position=" + position + - ", startOffset=" + startOffset + - ", endOffset=" + endOffset + - '}'; + return "TermVectorPosition{" + + "position=" + + position + + ", startOffset=" + + startOffset + + ", endOffset=" + + endOffset + + '}'; } private TermVectorPosition(int position) { diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorsAdapter.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorsAdapter.java index accdf253d4b..64d8552a464 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorsAdapter.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorsAdapter.java @@ -23,16 +23,13 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.luke.util.LoggerFactory; -/** - * An utility class to access to the term vectors. - */ +/** An utility class to access to the term vectors. */ final class TermVectorsAdapter { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -44,8 +41,8 @@ final class TermVectorsAdapter { } /** - * Returns the term vectors for the specified field in the specified document. - * If no term vector is available for the field, empty list is returned. + * Returns the term vectors for the specified field in the specified document. If no term vector + * is available for the field, empty list is returned. * * @param docid - document id * @param field - field name @@ -67,5 +64,4 @@ final class TermVectorsAdapter { } return res; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/package-info.java index 6f4a38b753c..dfba9021d22 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/package-info.java @@ -16,4 +16,4 @@ */ /** Models and APIs for the Documents tab */ -package org.apache.lucene.luke.models.documents; \ No newline at end of file +package org.apache.lucene.luke.models.documents; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/Overview.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/Overview.java index 9913be368d2..7c050e5e906 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/Overview.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/Overview.java @@ -21,53 +21,44 @@ import java.util.List; import java.util.Map; import java.util.Optional; -/** - * A dedicated interface for Luke's Overview tab. - */ +/** A dedicated interface for Luke's Overview tab. */ public interface Overview { /** - * Returns the currently opened index directory path, - * or the root directory path if multiple index directories are opened. + * Returns the currently opened index directory path, or the root directory path if multiple index + * directories are opened. */ String getIndexPath(); - /** - * Returns the number of fields in this index. - */ + /** Returns the number of fields in this index. */ int getNumFields(); - /** - * Returns the number of documents in this index. - */ + /** Returns the number of documents in this index. */ int getNumDocuments(); /** * Returns the total number of terms in this index. * - * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when accessing index + * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when + * accessing index */ long getNumTerms(); - /** - * Returns true if this index includes deleted documents. - */ + /** Returns true if this index includes deleted documents. */ boolean hasDeletions(); - /** - * Returns the number of deleted documents in this index. - */ + /** Returns the number of deleted documents in this index. */ int getNumDeletedDocs(); /** - * Returns true if the index is optimized. - * Empty Optional instance is returned if multiple indexes are opened. + * Returns true if the index is optimized. Empty Optional instance is returned if multiple indexes + * are opened. */ Optional isOptimized(); /** - * Returns the version number when this index was opened. - * Empty Optional instance is returned if multiple indexes are opened. + * Returns the version number when this index was opened. Empty Optional instance is returned if + * multiple indexes are opened. */ Optional getIndexVersion(); @@ -75,28 +66,30 @@ public interface Overview { * Returns the string representation for the Lucene segment version when the index was created. * Empty Optional instance is returned if multiple indexes are opened. * - * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when accessing index + * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when + * accessing index */ Optional getIndexFormat(); /** - * Returns the currently opened {@link org.apache.lucene.store.Directory} implementation class name. - * Empty Optional instance is returned if multiple indexes are opened. + * Returns the currently opened {@link org.apache.lucene.store.Directory} implementation class + * name. Empty Optional instance is returned if multiple indexes are opened. */ Optional getDirImpl(); /** * Returns the information of the commit point that reader has opened. * - * Empty Optional instance is returned if multiple indexes are opened. + *

Empty Optional instance is returned if multiple indexes are opened. */ Optional getCommitDescription(); /** - * Returns the user provided data for the commit point. - * Empty Optional instance is returned if multiple indexes are opened. + * Returns the user provided data for the commit point. Empty Optional instance is returned if + * multiple indexes are opened. * - * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when accessing index + * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when + * accessing index */ Optional getCommitUserData(); @@ -105,7 +98,8 @@ public interface Overview { * * @param order - the sort order * @return the ordered map of terms and their frequencies - * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when accessing index + * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when + * accessing index */ Map getSortedTermCounts(TermCountsOrder order); @@ -115,7 +109,8 @@ public interface Overview { * @param field - the field name * @param numTerms - the max number of terms to be returned * @return the list of top terms and their document frequencies - * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when accessing index + * @throws org.apache.lucene.luke.models.LukeException - if an internal error occurs when + * accessing index */ List getTopTerms(String field, int numTerms); } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/OverviewFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/OverviewFactory.java index 620e2e51501..16228e4ff27 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/OverviewFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/OverviewFactory.java @@ -25,5 +25,4 @@ public class OverviewFactory { public Overview newInstance(IndexReader reader, String indexPath) { return new OverviewImpl(reader, indexPath); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/OverviewImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/OverviewImpl.java index 4dfd06be1e6..c0a50eefe2a 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/OverviewImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/OverviewImpl.java @@ -23,7 +23,6 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.luke.models.LukeException; @@ -130,8 +129,11 @@ public final class OverviewImpl extends LukeModel implements Overview { } return Optional.of( commit.getSegmentsFileName() - + " (generation=" + commit.getGeneration() - + ", segs=" + commit.getSegmentCount() + ")"); + + " (generation=" + + commit.getGeneration() + + ", segs=" + + commit.getSegmentCount() + + ")"); } @Override @@ -159,13 +161,17 @@ public final class OverviewImpl extends LukeModel implements Overview { Objects.requireNonNull(field); if (numTerms < 0) { - throw new IllegalArgumentException(String.format(Locale.ENGLISH, "'numTerms' must be a positive integer: %d is not accepted.", numTerms)); + throw new IllegalArgumentException( + String.format( + Locale.ENGLISH, + "'numTerms' must be a positive integer: %d is not accepted.", + numTerms)); } try { return topTerms.getTopTerms(field, numTerms); } catch (Exception e) { - throw new LukeException(String.format(Locale.ENGLISH, "Top terms for field %s not available.", field), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Top terms for field %s not available.", field), e); } } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermCounts.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermCounts.java index d48edd79d1f..ceccb43338d 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermCounts.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermCounts.java @@ -23,13 +23,10 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.luke.models.util.IndexUtils; -/** - * An utility class that collects term counts terms for all fields in a index. - */ +/** An utility class that collects term counts terms for all fields in a index. */ final class TermCounts { private final Map termCountMap; @@ -39,18 +36,17 @@ final class TermCounts { termCountMap = IndexUtils.countTerms(reader, IndexUtils.getFieldNames(reader)); } - /** - * Returns the total number of terms in this index. - */ + /** Returns the total number of terms in this index. */ long numTerms() { return termCountMap.values().stream().mapToLong(Long::longValue).sum(); } /** * Returns all fields with the number of terms for each field sorted by {@link TermCountsOrder} + * * @param order - sort order */ - Map sortedTermCounts(TermCountsOrder order){ + Map sortedTermCounts(TermCountsOrder order) { Objects.requireNonNull(order); Comparator> comparator; @@ -76,7 +72,8 @@ final class TermCounts { private Map sortedTermCounts(Comparator> comparator) { return termCountMap.entrySet().stream() .sorted(comparator) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (v1, v2) -> v1, LinkedHashMap::new)); + .collect( + Collectors.toMap( + Map.Entry::getKey, Map.Entry::getValue, (v1, v2) -> v1, LinkedHashMap::new)); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermCountsOrder.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermCountsOrder.java index a5976ba8d52..3ac2ad109c2 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermCountsOrder.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermCountsOrder.java @@ -17,27 +17,17 @@ package org.apache.lucene.luke.models.overview; -/** - * Sort orders for fields with their term counts - */ +/** Sort orders for fields with their term counts */ public enum TermCountsOrder { - /** - * Ascending order by the field name - */ + /** Ascending order by the field name */ NAME_ASC, - /** - * Descending order by the field name - */ + /** Descending order by the field name */ NAME_DESC, - /** - * Ascending order by the count of terms - */ + /** Ascending order by the count of terms */ COUNT_ASC, - /** - * Descending order by the count of terms - */ + /** Descending order by the count of terms */ COUNT_DESC } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermStats.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermStats.java index b97afe7c0ae..87ef56ab9be 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermStats.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TermStats.java @@ -19,9 +19,7 @@ package org.apache.lucene.luke.models.overview; import org.apache.lucene.luke.util.BytesRefUtils; -/** - * Holder for statistics for a term in a specific field. - */ +/** Holder for statistics for a term in a specific field. */ public final class TermStats { private final String decodedTermText; @@ -31,7 +29,8 @@ public final class TermStats { private final int docFreq; /** - * Returns a TermStats instance representing the specified {@link org.apache.lucene.misc.TermStats} value. + * Returns a TermStats instance representing the specified {@link + * org.apache.lucene.misc.TermStats} value. */ static TermStats of(org.apache.lucene.misc.TermStats stats) { String termText = BytesRefUtils.decode(stats.termtext); @@ -44,33 +43,32 @@ public final class TermStats { this.docFreq = docFreq; } - /** - * Returns the string representation for this term. - */ + /** Returns the string representation for this term. */ public String getDecodedTermText() { return decodedTermText; } - /** - * Returns the field name. - */ + /** Returns the field name. */ public String getField() { return field; } - /** - * Returns the document frequency of this term. - */ + /** Returns the document frequency of this term. */ public int getDocFreq() { return docFreq; } @Override public String toString() { - return "TermStats{" + - "decodedTermText='" + decodedTermText + '\'' + - ", field='" + field + '\'' + - ", docFreq=" + docFreq + - '}'; + return "TermStats{" + + "decodedTermText='" + + decodedTermText + + '\'' + + ", field='" + + field + + '\'' + + ", docFreq=" + + docFreq + + '}'; } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java index 4d371287531..5e5bb692e56 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java @@ -23,13 +23,10 @@ import java.util.Map; import java.util.Objects; import java.util.WeakHashMap; import java.util.stream.Collectors; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.misc.HighFreqTerms; -/** - * An utility class that collects terms and their statistics in a specific field. - */ +/** An utility class that collects terms and their statistics in a specific field. */ final class TopTerms { private final IndexReader reader; @@ -52,11 +49,11 @@ final class TopTerms { if (!topTermsCache.containsKey(field) || topTermsCache.get(field).size() < numTerms) { org.apache.lucene.misc.TermStats[] stats = - HighFreqTerms.getHighFreqTerms(reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); - List topTerms = Arrays.stream(stats) - .map(TermStats::of) - .collect(Collectors.toList()); + List topTerms = + Arrays.stream(stats).map(TermStats::of).collect(Collectors.toList()); // cache computed statistics for later uses topTermsCache.put(field, topTerms); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/package-info.java index 11b12e81266..674ada54d5b 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/package-info.java @@ -16,4 +16,4 @@ */ /** Models and APIs for the Overview tab */ -package org.apache.lucene.luke.models.overview; \ No newline at end of file +package org.apache.lucene.luke.models.overview; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/package-info.java index 0065130864b..d2dc58c17c4 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/package-info.java @@ -16,4 +16,4 @@ */ /** Models and internal APIs for Luke */ -package org.apache.lucene.luke.models; \ No newline at end of file +package org.apache.lucene.luke.models; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/MLTConfig.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/MLTConfig.java index fb88e6055d6..a5008e62698 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/MLTConfig.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/MLTConfig.java @@ -20,12 +20,9 @@ package org.apache.lucene.luke.models.search; import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.lucene.queries.mlt.MoreLikeThis; -/** - * Configurations for MoreLikeThis query. - */ +/** Configurations for MoreLikeThis query. */ public final class MLTConfig { private final List fields; @@ -91,5 +88,4 @@ public final class MLTConfig { public int getMinTermFreq() { return minTermFreq; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/QueryParserConfig.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/QueryParserConfig.java index 104c36f9531..adf848d056b 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/QueryParserConfig.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/QueryParserConfig.java @@ -22,17 +22,15 @@ import java.util.Locale; import java.util.Map; import java.util.TimeZone; import java.util.stream.Collectors; - import org.apache.lucene.document.DateTools; -/** - * Configurations for query parser. - */ +/** Configurations for query parser. */ public final class QueryParserConfig { /** query operators */ public enum Operator { - AND, OR + AND, + OR } private final boolean useClassicParser; @@ -169,7 +167,8 @@ public final class QueryParserConfig { this.locale = builder.locale; this.timeZone = builder.timeZone; this.phraseSlop = builder.phraseSlop; - this.autoGenerateMultiTermSynonymsPhraseQuery = builder.autoGenerateMultiTermSynonymsPhraseQuery; + this.autoGenerateMultiTermSynonymsPhraseQuery = + builder.autoGenerateMultiTermSynonymsPhraseQuery; this.autoGeneratePhraseQueries = builder.autoGeneratePhraseQueries; this.splitOnWhitespace = builder.splitOnWhitespace; this.typeMap = Map.copyOf(builder.typeMap); @@ -233,19 +232,43 @@ public final class QueryParserConfig { @Override public String toString() { - return "QueryParserConfig: [" + - " default operator=" + defaultOperator.name() + ";" + - " enable position increment=" + enablePositionIncrements + ";" + - " allow leading wildcard=" + allowLeadingWildcard + ";" + - " split whitespace=" + splitOnWhitespace + ";" + - " generate phrase query=" + autoGeneratePhraseQueries + ";" + - " generate multiterm sysnonymsphrase query=" + autoGenerateMultiTermSynonymsPhraseQuery + ";" + - " phrase slop=" + phraseSlop + ";" + - " date resolution=" + dateResolution.name() + - " locale=" + locale.toLanguageTag() + ";" + - " time zone=" + timeZone.getID() + ";" + - " numeric types=" + String.join(",", getTypeMap().entrySet().stream() - .map(e -> e.getKey() + "=" + e.getValue().toString()).collect(Collectors.toSet())) + ";" + - "]"; + return "QueryParserConfig: [" + + " default operator=" + + defaultOperator.name() + + ";" + + " enable position increment=" + + enablePositionIncrements + + ";" + + " allow leading wildcard=" + + allowLeadingWildcard + + ";" + + " split whitespace=" + + splitOnWhitespace + + ";" + + " generate phrase query=" + + autoGeneratePhraseQueries + + ";" + + " generate multiterm sysnonymsphrase query=" + + autoGenerateMultiTermSynonymsPhraseQuery + + ";" + + " phrase slop=" + + phraseSlop + + ";" + + " date resolution=" + + dateResolution.name() + + " locale=" + + locale.toLanguageTag() + + ";" + + " time zone=" + + timeZone.getID() + + ";" + + " numeric types=" + + String.join( + ",", + getTypeMap().entrySet().stream() + .map(e -> e.getKey() + "=" + e.getValue().toString()) + .collect(Collectors.toSet())) + + ";" + + "]"; } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/Search.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/Search.java index e8c41008a39..18aa7687142 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/Search.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/Search.java @@ -21,7 +21,6 @@ import java.util.Collection; import java.util.List; import java.util.Optional; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.luke.models.LukeException; import org.apache.lucene.search.Explanation; @@ -29,34 +28,22 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; -/** - * A dedicated interface for Luke's Search tab. - */ +/** A dedicated interface for Luke's Search tab. */ public interface Search { - /** - * Returns all field names in this index. - */ + /** Returns all field names in this index. */ Collection getFieldNames(); - /** - * Returns field names those are sortable. - */ + /** Returns field names those are sortable. */ Collection getSortableFieldNames(); - /** - * Returns field names those are searchable. - */ + /** Returns field names those are searchable. */ Collection getSearchableFieldNames(); - /** - * Returns field names those are searchable by range query. - */ + /** Returns field names those are searchable by range query. */ Collection getRangeSearchableFieldNames(); - /** - * Returns the current query. - */ + /** Returns the current query. */ Query getCurrentQuery(); /** @@ -70,7 +57,12 @@ public interface Search { * @return parsed query * @throws LukeException - if an internal error occurs when accessing index */ - Query parseQuery(String expression, String defField, Analyzer analyzer, QueryParserConfig config, boolean rewrite); + Query parseQuery( + String expression, + String defField, + Analyzer analyzer, + QueryParserConfig config, + boolean rewrite); /** * Creates the MoreLikeThis query for the specified document with given configurations. @@ -94,7 +86,12 @@ public interface Search { * @return search results * @throws LukeException - if an internal error occurs when accessing index */ - SearchResults search(Query query, SimilarityConfig simConfig, Set fieldsToLoad, int pageSize, boolean exactHitsCount); + SearchResults search( + Query query, + SimilarityConfig simConfig, + Set fieldsToLoad, + int pageSize, + boolean exactHitsCount); /** * Searches this index by the query with given sort criteria and configurations. @@ -108,7 +105,13 @@ public interface Search { * @return search results * @throws LukeException - if an internal error occurs when accessing index */ - SearchResults search(Query query, SimilarityConfig simConfig, Sort sort, Set fieldsToLoad, int pageSize, boolean exactHitsCount); + SearchResults search( + Query query, + SimilarityConfig simConfig, + Sort sort, + Set fieldsToLoad, + int pageSize, + boolean exactHitsCount); /** * Returns the next page for the current query. diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchFactory.java index b2f97b11e6a..803b07ca9b7 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchFactory.java @@ -25,5 +25,4 @@ public class SearchFactory { public Search newInstance(IndexReader reader) { return new SearchImpl(reader); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java index 38c4b2146ab..86c67aa811c 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java @@ -31,7 +31,6 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; - import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.DocValuesType; @@ -94,6 +93,7 @@ public final class SearchImpl extends LukeModel implements Search { /** * Constructs a SearchImpl that holds given {@link IndexReader} + * * @param reader - the index reader */ public SearchImpl(IndexReader reader) { @@ -134,30 +134,36 @@ public final class SearchImpl extends LukeModel implements Search { } @Override - public Query parseQuery(String expression, String defField, Analyzer analyzer, - QueryParserConfig config, boolean rewrite) { + public Query parseQuery( + String expression, + String defField, + Analyzer analyzer, + QueryParserConfig config, + boolean rewrite) { Objects.requireNonNull(expression); Objects.requireNonNull(defField); Objects.requireNonNull(analyzer); Objects.requireNonNull(config); - Query query = config.isUseClassicParser() ? - parseByClassicParser(expression, defField, analyzer, config) : - parseByStandardParser(expression, defField, analyzer, config); + Query query = + config.isUseClassicParser() + ? parseByClassicParser(expression, defField, analyzer, config) + : parseByStandardParser(expression, defField, analyzer, config); if (rewrite) { try { query = query.rewrite(reader); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to rewrite query: %s", query.toString()), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Failed to rewrite query: %s", query.toString()), e); } } return query; } - private Query parseByClassicParser(String expression, String defField, Analyzer analyzer, - QueryParserConfig config) { + private Query parseByClassicParser( + String expression, String defField, Analyzer analyzer, QueryParserConfig config) { QueryParser parser = new QueryParser(defField, analyzer); switch (config.getDefaultOperator()) { @@ -170,7 +176,8 @@ public final class SearchImpl extends LukeModel implements Search { } parser.setSplitOnWhitespace(config.isSplitOnWhitespace()); - parser.setAutoGenerateMultiTermSynonymsPhraseQuery(config.isAutoGenerateMultiTermSynonymsPhraseQuery()); + parser.setAutoGenerateMultiTermSynonymsPhraseQuery( + config.isAutoGenerateMultiTermSynonymsPhraseQuery()); parser.setAutoGeneratePhraseQueries(config.isAutoGeneratePhraseQueries()); parser.setEnablePositionIncrements(config.isEnablePositionIncrements()); parser.setAllowLeadingWildcard(config.isAllowLeadingWildcard()); @@ -184,13 +191,13 @@ public final class SearchImpl extends LukeModel implements Search { try { return parser.parse(expression); } catch (ParseException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to parse query expression: %s", expression), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Failed to parse query expression: %s", expression), e); } - } - private Query parseByStandardParser(String expression, String defField, Analyzer analyzer, - QueryParserConfig config) { + private Query parseByStandardParser( + String expression, String defField, Analyzer analyzer, QueryParserConfig config) { StandardQueryParser parser = new StandardQueryParser(analyzer); switch (config.getDefaultOperator()) { @@ -223,7 +230,8 @@ public final class SearchImpl extends LukeModel implements Search { } else if (type == Float.class || type == Double.class) { pc = new PointsConfig(NumberFormat.getNumberInstance(Locale.ROOT), type); } else { - log.warn(String.format(Locale.ENGLISH, "Ignored invalid number type: %s.", type.getName())); + log.warn( + String.format(Locale.ENGLISH, "Ignored invalid number type: %s.", type.getName())); continue; } pointsConfigMap.put(field, pc); @@ -235,9 +243,9 @@ public final class SearchImpl extends LukeModel implements Search { try { return parser.parse(expression, defField); } catch (QueryNodeException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to parse query expression: %s", expression), e); + throw new LukeException( + String.format(Locale.ENGLISH, "Failed to parse query expression: %s", expression), e); } - } @Override @@ -259,15 +267,25 @@ public final class SearchImpl extends LukeModel implements Search { @Override public SearchResults search( - Query query, SimilarityConfig simConfig, Set fieldsToLoad, int pageSize, boolean exactHitsCount) { + Query query, + SimilarityConfig simConfig, + Set fieldsToLoad, + int pageSize, + boolean exactHitsCount) { return search(query, simConfig, null, fieldsToLoad, pageSize, exactHitsCount); } @Override public SearchResults search( - Query query, SimilarityConfig simConfig, Sort sort, Set fieldsToLoad, int pageSize, boolean exactHitsCount) { + Query query, + SimilarityConfig simConfig, + Sort sort, + Set fieldsToLoad, + int pageSize, + boolean exactHitsCount) { if (pageSize < 0) { - throw new LukeException(new IllegalArgumentException("Negative integer is not acceptable for page size.")); + throw new LukeException( + new IllegalArgumentException("Negative integer is not acceptable for page size.")); } // reset internal status to prepare for a new search session @@ -310,7 +328,8 @@ public final class SearchImpl extends LukeModel implements Search { System.arraycopy(topDocs.scoreDocs, 0, newDocs, docs.length, topDocs.scoreDocs.length); this.docs = newDocs; - return SearchResults.of(topDocs.totalHits, topDocs.scoreDocs, currentPage * pageSize, searcher, fieldsToLoad); + return SearchResults.of( + topDocs.totalHits, topDocs.scoreDocs, currentPage * pageSize, searcher, fieldsToLoad); } @Override @@ -322,8 +341,9 @@ public final class SearchImpl extends LukeModel implements Search { // proceed to next page currentPage += 1; - if (totalHits.value == 0 || - (totalHits.relation == TotalHits.Relation.EQUAL_TO && currentPage * pageSize >= totalHits.value)) { + if (totalHits.value == 0 + || (totalHits.relation == TotalHits.Relation.EQUAL_TO + && currentPage * pageSize >= totalHits.value)) { log.warn("No more next search results are available."); return Optional.empty(); } @@ -345,7 +365,6 @@ public final class SearchImpl extends LukeModel implements Search { } } - @Override public Optional prevPage() { if (currentPage < 0 || query == null) { @@ -401,26 +420,32 @@ public final class SearchImpl extends LukeModel implements Search { return Collections.emptyList(); case NUMERIC: - return Arrays.stream(new SortField[]{ - new SortField(name, SortField.Type.INT), - new SortField(name, SortField.Type.LONG), - new SortField(name, SortField.Type.FLOAT), - new SortField(name, SortField.Type.DOUBLE) - }).collect(Collectors.toList()); + return Arrays.stream( + new SortField[] { + new SortField(name, SortField.Type.INT), + new SortField(name, SortField.Type.LONG), + new SortField(name, SortField.Type.FLOAT), + new SortField(name, SortField.Type.DOUBLE) + }) + .collect(Collectors.toList()); case SORTED_NUMERIC: - return Arrays.stream(new SortField[]{ - new SortedNumericSortField(name, SortField.Type.INT), - new SortedNumericSortField(name, SortField.Type.LONG), - new SortedNumericSortField(name, SortField.Type.FLOAT), - new SortedNumericSortField(name, SortField.Type.DOUBLE) - }).collect(Collectors.toList()); + return Arrays.stream( + new SortField[] { + new SortedNumericSortField(name, SortField.Type.INT), + new SortedNumericSortField(name, SortField.Type.LONG), + new SortedNumericSortField(name, SortField.Type.FLOAT), + new SortedNumericSortField(name, SortField.Type.DOUBLE) + }) + .collect(Collectors.toList()); case SORTED: - return Arrays.stream(new SortField[] { - new SortField(name, SortField.Type.STRING), - new SortField(name, SortField.Type.STRING_VAL) - }).collect(Collectors.toList()); + return Arrays.stream( + new SortField[] { + new SortField(name, SortField.Type.STRING), + new SortField(name, SortField.Type.STRING_VAL) + }) + .collect(Collectors.toList()); case SORTED_SET: return Collections.singletonList(new SortedSetSortField(name, false)); @@ -428,7 +453,6 @@ public final class SearchImpl extends LukeModel implements Search { default: return Collections.singletonList(new SortField(name, SortField.Type.DOC)); } - } @Override @@ -465,7 +489,13 @@ public final class SearchImpl extends LukeModel implements Search { try { return searcher.explain(query, docid); } catch (IOException e) { - throw new LukeException(String.format(Locale.ENGLISH, "Failed to create explanation for doc: %d for query: \"%s\"", docid, query.toString()), e); + throw new LukeException( + String.format( + Locale.ENGLISH, + "Failed to create explanation for doc: %d for query: \"%s\"", + docid, + query.toString()), + e); } } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchResults.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchResults.java index ebb7fa83c1b..a8061c6aafb 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchResults.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchResults.java @@ -25,16 +25,13 @@ import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TotalHits; -/** - * Holder for a search result page. - */ +/** Holder for a search result page. */ public final class SearchResults { private TotalHits totalHits; @@ -54,8 +51,12 @@ public final class SearchResults { * @return the search result page * @throws IOException - if there is a low level IO error. */ - static SearchResults of(TotalHits totalHits, ScoreDoc[] docs, int offset, - IndexSearcher searcher, Set fieldsToLoad) + static SearchResults of( + TotalHits totalHits, + ScoreDoc[] docs, + int offset, + IndexSearcher searcher, + Set fieldsToLoad) throws IOException { SearchResults res = new SearchResults(); @@ -64,8 +65,8 @@ public final class SearchResults { Objects.requireNonNull(searcher); for (ScoreDoc sd : docs) { - Document luceneDoc = (fieldsToLoad == null) ? - searcher.doc(sd.doc) : searcher.doc(sd.doc, fieldsToLoad); + Document luceneDoc = + (fieldsToLoad == null) ? searcher.doc(sd.doc) : searcher.doc(sd.doc, fieldsToLoad); res.hits.add(Doc.of(sd.doc, sd.score, luceneDoc)); res.offset = offset; } @@ -73,40 +74,29 @@ public final class SearchResults { return res; } - /** - * Returns the total number of hits for this query. - */ + /** Returns the total number of hits for this query. */ public TotalHits getTotalHits() { return totalHits; } - /** - * Returns the offset of the current page. - */ + /** Returns the offset of the current page. */ public int getOffset() { return offset; } - /** - * Returns the documents of the current page. - */ + /** Returns the documents of the current page. */ public List getHits() { return List.copyOf(hits); } - /** - * Returns the size of the current page. - */ + /** Returns the size of the current page. */ public int size() { return hits.size(); } - private SearchResults() { - } + private SearchResults() {} - /** - * Holder for a hit. - */ + /** Holder for a hit. */ public static class Doc { private int docId; private float score; @@ -126,35 +116,29 @@ public final class SearchResults { Doc doc = new Doc(); doc.docId = docId; doc.score = score; - Set fields = luceneDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); + Set fields = + luceneDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); for (String f : fields) { doc.fieldValues.put(f, luceneDoc.getValues(f)); } return doc; } - /** - * Returns the document id. - */ + /** Returns the document id. */ public int getDocId() { return docId; } - /** - * Returns the score of this document for the current query. - */ + /** Returns the score of this document for the current query. */ public float getScore() { return score; } - /** - * Returns the field data of this document. - */ + /** Returns the field data of this document. */ public Map getFieldValues() { return Map.copyOf(fieldValues); } - private Doc() { - } + private Doc() {} } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SimilarityConfig.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SimilarityConfig.java index 072d1c54351..73171492198 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SimilarityConfig.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SimilarityConfig.java @@ -17,9 +17,7 @@ package org.apache.lucene.luke.models.search; -/** - * Configurations for Similarity. - */ +/** Configurations for Similarity. */ public final class SimilarityConfig { private final boolean useClassicSimilarity; @@ -56,7 +54,7 @@ public final class SimilarityConfig { return this; } - public Builder discountOverlaps (boolean val) { + public Builder discountOverlaps(boolean val) { discountOverlaps = val; return this; } @@ -90,11 +88,19 @@ public final class SimilarityConfig { } public String toString() { - return "SimilarityConfig: [" + - " use classic similarity=" + useClassicSimilarity + ";" + - " discount overlaps=" + discountOverlaps + ";" + - " k1=" + k1 + ";" + - " b=" + b + ";" + - "]"; + return "SimilarityConfig: [" + + " use classic similarity=" + + useClassicSimilarity + + ";" + + " discount overlaps=" + + discountOverlaps + + ";" + + " k1=" + + k1 + + ";" + + " b=" + + b + + ";" + + "]"; } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/package-info.java index 63433a1bf2c..2bed5de5744 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/package-info.java @@ -16,4 +16,4 @@ */ /** Models and APIs for the Search tab */ -package org.apache.lucene.luke.models.search; \ No newline at end of file +package org.apache.lucene.luke.models.search; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexTools.java b/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexTools.java index a4f4d12052e..81d648809b6 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexTools.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexTools.java @@ -18,25 +18,20 @@ package org.apache.lucene.luke.models.tools; import java.io.PrintStream; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.luke.models.LukeException; import org.apache.lucene.search.Query; -/** - * A dedicated interface for Luke's various index manipulations. - */ +/** A dedicated interface for Luke's various index manipulations. */ public interface IndexTools { /** * Execute force merges. * - *

- * Merges are executed until there are maxNumSegments segments.
+ *

Merges are executed until there are maxNumSegments segments.
* When expunge is true, maxNumSegments parameter is ignored. - *

* * @param expunge - if true, only segments having deleted documents are merged * @param maxNumSegments - max number of segments @@ -57,7 +52,8 @@ public interface IndexTools { /** * Try to repair the corrupted index using previously returned index status. * - *

This method must be called with the return value from {@link IndexTools#checkIndex(PrintStream)}.

+ *

This method must be called with the return value from {@link + * IndexTools#checkIndex(PrintStream)}. * * @param st - index status * @param ps - information stream @@ -91,13 +87,14 @@ public interface IndexTools { /** * Create a new index with sample documents. + * * @param dataDir - the directory path which contains sample documents (20 Newsgroups). */ void createNewIndex(String dataDir); - /** * Export terms from given field into a new file on the destination directory + * * @param destDir - destination directory * @param field - field name * @param delimiter - delimiter to separate terms and their frequency diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexToolsFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexToolsFactory.java index c3bd86376a1..69776dcfac5 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexToolsFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexToolsFactory.java @@ -30,5 +30,4 @@ public class IndexToolsFactory { public IndexTools newInstance(IndexReader reader, boolean useCompound, boolean keepAllCommits) { return new IndexToolsImpl(reader, useCompound, keepAllCommits); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexToolsImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexToolsImpl.java index 4fdd6e3f96a..9583935c567 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexToolsImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/tools/IndexToolsImpl.java @@ -27,7 +27,6 @@ import java.nio.file.Paths; import java.util.List; import java.util.Locale; import java.util.Objects; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.CheckIndex; @@ -83,7 +82,8 @@ public final class IndexToolsImpl extends LukeModel implements IndexTools { public void optimize(boolean expunge, int maxNumSegments, PrintStream ps) { if (reader instanceof DirectoryReader) { Directory dir = ((DirectoryReader) reader).directory(); - try (IndexWriter writer = IndexUtils.createWriter(dir, null, useCompound, keepAllCommits, ps)) { + try (IndexWriter writer = + IndexUtils.createWriter(dir, null, useCompound, keepAllCommits, ps)) { IndexUtils.optimizeIndex(writer, expunge, maxNumSegments); } catch (IOException e) { throw new LukeException("Failed to optimize index", e); @@ -128,7 +128,8 @@ public final class IndexToolsImpl extends LukeModel implements IndexTools { if (reader instanceof DirectoryReader) { Directory dir = ((DirectoryReader) reader).directory(); - try (IndexWriter writer = IndexUtils.createWriter(dir, analyzer, useCompound, keepAllCommits)) { + try (IndexWriter writer = + IndexUtils.createWriter(dir, analyzer, useCompound, keepAllCommits)) { writer.addDocument(doc); writer.commit(); } catch (IOException e) { @@ -170,7 +171,8 @@ public final class IndexToolsImpl extends LukeModel implements IndexTools { throw new IllegalStateException(); } - writer = IndexUtils.createWriter(dir, Message.createLuceneAnalyzer(), useCompound, keepAllCommits); + writer = + IndexUtils.createWriter(dir, Message.createLuceneAnalyzer(), useCompound, keepAllCommits); if (Objects.nonNull(dataDir)) { Path path = Paths.get(dataDir); @@ -188,7 +190,8 @@ public final class IndexToolsImpl extends LukeModel implements IndexTools { if (writer != null) { try { writer.close(); - } catch (IOException e) {} + } catch (IOException e) { + } } } } @@ -199,18 +202,22 @@ public final class IndexToolsImpl extends LukeModel implements IndexTools { try { Terms terms = MultiTerms.getTerms(reader, field); if (terms == null) { - throw new LukeException(String.format(Locale.US, "Field %s does not contain any terms to be exported", field)); + throw new LukeException( + String.format(Locale.US, "Field %s does not contain any terms to be exported", field)); } try (BufferedWriter writer = Files.newBufferedWriter(path, Charset.forName("UTF-8"))) { TermsEnum termsEnum = terms.iterator(); BytesRef term; while (!Thread.currentThread().isInterrupted() && (term = termsEnum.next()) != null) { - writer.write(String.format(Locale.US, "%s%s%d\n", term.utf8ToString(), delimiter, +termsEnum.docFreq())); + writer.write( + String.format( + Locale.US, "%s%s%d\n", term.utf8ToString(), delimiter, +termsEnum.docFreq())); } return path.toString(); } } catch (IOException e) { - throw new LukeException("Terms file export for field [" + field + "] to file [" + filename + "] has failed.", e); + throw new LukeException( + "Terms file export for field [" + field + "] to file [" + filename + "] has failed.", e); } } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/tools/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/tools/package-info.java index cb76b17725e..cb0dab8fe35 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/tools/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/tools/package-info.java @@ -16,4 +16,4 @@ */ /** Models and APIs for various index manipulation */ -package org.apache.lucene.luke.models.tools; \ No newline at end of file +package org.apache.lucene.luke.models.tools; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java index 13046350ebc..11244ec78c4 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java @@ -36,7 +36,6 @@ import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.StreamSupport; - import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; @@ -53,9 +52,7 @@ import org.apache.lucene.util.Bits; /** * Utilities for various raw index operations. * - *

- * This is for internal uses, DO NOT call from UI components or applications. - *

+ *

This is for internal uses, DO NOT call from UI components or applications. */ public final class IndexUtils { @@ -69,32 +66,39 @@ public final class IndexUtils { * @return index reader * @throws Exception - if there is a low level IO error. */ - public static IndexReader openIndex(String indexPath, String dirImpl) - throws Exception { + public static IndexReader openIndex(String indexPath, String dirImpl) throws Exception { final Path root = FileSystems.getDefault().getPath(Objects.requireNonNull(indexPath)); final List readers = new ArrayList<>(); // find all valid index directories in this directory - Files.walkFileTree(root, new SimpleFileVisitor() { - @Override - public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes attrs) throws IOException { - Directory dir = openDirectory(path, dirImpl); - try { - DirectoryReader dr = DirectoryReader.open(dir); - readers.add(dr); - } catch (IOException e) { - log.warn("Error opening directory", e); - } - return FileVisitResult.CONTINUE; - } - }); + Files.walkFileTree( + root, + new SimpleFileVisitor() { + @Override + public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes attrs) + throws IOException { + Directory dir = openDirectory(path, dirImpl); + try { + DirectoryReader dr = DirectoryReader.open(dir); + readers.add(dr); + } catch (IOException e) { + log.warn("Error opening directory", e); + } + return FileVisitResult.CONTINUE; + } + }); if (readers.isEmpty()) { throw new RuntimeException("No valid directory at the location: " + indexPath); } if (log.isInfoEnabled()) { - log.info(String.format(Locale.ENGLISH, "IndexReaders (%d leaf readers) successfully opened. Index path=%s", readers.size(), indexPath)); + log.info( + String.format( + Locale.ENGLISH, + "IndexReaders (%d leaf readers) successfully opened. Index path=%s", + readers.size(), + indexPath)); } if (readers.size() == 1) { @@ -107,7 +111,7 @@ public final class IndexUtils { /** * Opens an index directory for given index path. * - *

This can be used to open/repair corrupted indexes.

+ *

This can be used to open/repair corrupted indexes. * * @param dirPath - index directory path * @param dirImpl - class name for the specific directory implementation @@ -118,7 +122,9 @@ public final class IndexUtils { final Path path = FileSystems.getDefault().getPath(Objects.requireNonNull(dirPath)); Directory dir = openDirectory(path, dirImpl); if (log.isInfoEnabled()) { - log.info(String.format(Locale.ENGLISH, "DirectoryReader successfully opened. Directory path=%s", dirPath)); + log.info( + String.format( + Locale.ENGLISH, "DirectoryReader successfully opened. Directory path=%s", dirPath)); } return dir; } @@ -196,7 +202,9 @@ public final class IndexUtils { * @return new index writer * @throws IOException - if there is a low level IO error. */ - public static IndexWriter createWriter(Directory dir, Analyzer analyzer, boolean useCompound, boolean keepAllCommits) throws IOException { + public static IndexWriter createWriter( + Directory dir, Analyzer analyzer, boolean useCompound, boolean keepAllCommits) + throws IOException { return createWriter(Objects.requireNonNull(dir), analyzer, useCompound, keepAllCommits, null); } @@ -211,11 +219,13 @@ public final class IndexUtils { * @return new index writer * @throws IOException - if there is a low level IO error. */ - public static IndexWriter createWriter(Directory dir, Analyzer analyzer, boolean useCompound, boolean keepAllCommits, - PrintStream ps) throws IOException { + public static IndexWriter createWriter( + Directory dir, Analyzer analyzer, boolean useCompound, boolean keepAllCommits, PrintStream ps) + throws IOException { Objects.requireNonNull(dir); - IndexWriterConfig config = new IndexWriterConfig(analyzer == null ? new WhitespaceAnalyzer() : analyzer); + IndexWriterConfig config = + new IndexWriterConfig(analyzer == null ? new WhitespaceAnalyzer() : analyzer); config.setUseCompoundFile(useCompound); if (ps != null) { config.setInfoStream(ps); @@ -237,7 +247,8 @@ public final class IndexUtils { * @param maxNumSegments - max number of segments * @throws IOException - if there is a low level IO error. */ - public static void optimizeIndex(IndexWriter writer, boolean expunge, int maxNumSegments) throws IOException { + public static void optimizeIndex(IndexWriter writer, boolean expunge, int maxNumSegments) + throws IOException { Objects.requireNonNull(writer); if (expunge) { writer.forceMergeDeletes(true); @@ -273,7 +284,8 @@ public final class IndexUtils { * @param ps - information stream * @throws IOException - if there is a low level IO error. */ - public static void tryRepairIndex(Directory dir, CheckIndex.Status st, PrintStream ps) throws IOException { + public static void tryRepairIndex(Directory dir, CheckIndex.Status st, PrintStream ps) + throws IOException { Objects.requireNonNull(dir); Objects.requireNonNull(st); @@ -300,7 +312,9 @@ public final class IndexUtils { String format = "unknown"; try (IndexInput in = dir.openInput(segmentFileName, IOContext.READ)) { if (CodecUtil.CODEC_MAGIC == in.readInt()) { - int actualVersion = CodecUtil.checkHeaderNoMagic(in, "segments", SegmentInfos.VERSION_70, Integer.MAX_VALUE); + int actualVersion = + CodecUtil.checkHeaderNoMagic( + in, "segments", SegmentInfos.VERSION_70, Integer.MAX_VALUE); if (actualVersion == SegmentInfos.VERSION_70) { format = "Lucene 7.0 or later"; } else if (actualVersion == SegmentInfos.VERSION_72) { @@ -344,7 +358,8 @@ public final class IndexUtils { * @return a map contains terms and their occurrence frequencies * @throws IOException - if there is a low level IO error. */ - public static Map countTerms(IndexReader reader, Collection fields) throws IOException { + public static Map countTerms(IndexReader reader, Collection fields) + throws IOException { Map res = new HashMap<>(); for (String field : fields) { if (!res.containsKey(field)) { @@ -430,7 +445,8 @@ public final class IndexUtils { * @param field - field name * @throws IOException - if there is a low level IO error. */ - public static BinaryDocValues getBinaryDocValues(IndexReader reader, String field) throws IOException { + public static BinaryDocValues getBinaryDocValues(IndexReader reader, String field) + throws IOException { if (reader instanceof LeafReader) { return ((LeafReader) reader).getBinaryDocValues(field); } else { @@ -445,7 +461,8 @@ public final class IndexUtils { * @param field - field name * @throws IOException - if there is a low level IO error. */ - public static NumericDocValues getNumericDocValues(IndexReader reader, String field) throws IOException { + public static NumericDocValues getNumericDocValues(IndexReader reader, String field) + throws IOException { if (reader instanceof LeafReader) { return ((LeafReader) reader).getNumericDocValues(field); } else { @@ -460,7 +477,8 @@ public final class IndexUtils { * @param field - field name * @throws IOException - if there is a low level IO error. */ - public static SortedNumericDocValues getSortedNumericDocValues(IndexReader reader, String field) throws IOException { + public static SortedNumericDocValues getSortedNumericDocValues(IndexReader reader, String field) + throws IOException { if (reader instanceof LeafReader) { return ((LeafReader) reader).getSortedNumericDocValues(field); } else { @@ -475,7 +493,8 @@ public final class IndexUtils { * @param field - field name * @throws IOException - if there is a low level IO error. */ - public static SortedDocValues getSortedDocValues(IndexReader reader, String field) throws IOException { + public static SortedDocValues getSortedDocValues(IndexReader reader, String field) + throws IOException { if (reader instanceof LeafReader) { return ((LeafReader) reader).getSortedDocValues(field); } else { @@ -490,7 +509,8 @@ public final class IndexUtils { * @param field - field name * @throws IOException - if there is a low level IO error. */ - public static SortedSetDocValues getSortedSetDocvalues(IndexReader reader, String field) throws IOException { + public static SortedSetDocValues getSortedSetDocvalues(IndexReader reader, String field) + throws IOException { if (reader instanceof LeafReader) { return ((LeafReader) reader).getSortedSetDocValues(field); } else { @@ -498,6 +518,5 @@ public final class IndexUtils { } } - private IndexUtils() { - } + private IndexUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/util/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/util/package-info.java index 29354bd9273..60674ebad30 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/util/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/util/package-info.java @@ -16,4 +16,4 @@ */ /** Utilities for models and APIs */ -package org.apache.lucene.luke.models.util; \ No newline at end of file +package org.apache.lucene.luke.models.util; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/Message.java b/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/Message.java index e28b73ebefa..f68655e98e8 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/Message.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/Message.java @@ -20,11 +20,10 @@ package org.apache.lucene.luke.models.util.twentynewsgroups; import java.util.HashMap; import java.util.Map; import java.util.Objects; - import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.email.UAX29URLEmailAnalyzer; import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.analysis.email.UAX29URLEmailAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -139,7 +138,6 @@ public class Message { doc.add(new StoredField("date_raw", getDate())); } - if (getOrganization() != null) { doc.add(new TextField("organization", getOrganization(), Field.Store.YES)); } @@ -161,9 +159,9 @@ public class Message { return new PerFieldAnalyzerWrapper(new StandardAnalyzer(), map); } - private final static FieldType SUBJECT_FIELD_TYPE; + private static final FieldType SUBJECT_FIELD_TYPE; - private final static FieldType BODY_FIELD_TYPE; + private static final FieldType BODY_FIELD_TYPE; static { SUBJECT_FIELD_TYPE = new FieldType(); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/MessageFilesParser.java b/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/MessageFilesParser.java index a11997450d1..38773a1dce5 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/MessageFilesParser.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/MessageFilesParser.java @@ -28,12 +28,14 @@ import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.List; - import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.util.LoggerFactory; -/** 20 Newsgroups (http://kdd.ics.uci.edu/databases/20newsgroups/20newsgroups.html) message files parser */ -public class MessageFilesParser extends SimpleFileVisitor { +/** + * 20 Newsgroups (http://kdd.ics.uci.edu/databases/20newsgroups/20newsgroups.html) message files + * parser + */ +public class MessageFilesParser extends SimpleFileVisitor { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -94,7 +96,8 @@ public class MessageFilesParser extends SimpleFileVisitor { case "Lines": try { message.setLines(Integer.parseInt(ary[1].trim())); - } catch (NumberFormatException e) {} + } catch (NumberFormatException e) { + } break; default: break; @@ -119,5 +122,4 @@ public class MessageFilesParser extends SimpleFileVisitor { Files.walkFileTree(root, this); return messages; } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/package-info.java index 58218fb3ea3..3d54afa0217 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/util/twentynewsgroups/package-info.java @@ -16,4 +16,4 @@ */ /** Utilities for indexing 20 Newsgroups data */ -package org.apache.lucene.luke.models.util.twentynewsgroups; \ No newline at end of file +package org.apache.lucene.luke.models.util.twentynewsgroups; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/package-info.java index 9c6a51e1c8b..3963e15377f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/package-info.java @@ -16,4 +16,4 @@ */ /** Luke : Lucene toolbox project */ -package org.apache.lucene.luke; \ No newline at end of file +package org.apache.lucene.luke; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/util/BytesRefUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/util/BytesRefUtils.java index 4c7cf18657f..ed61e670979 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/util/BytesRefUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/util/BytesRefUtils.java @@ -19,9 +19,7 @@ package org.apache.lucene.luke.util; import org.apache.lucene.util.BytesRef; -/** - * An utility class for handling {@link BytesRef} objects. - */ +/** An utility class for handling {@link BytesRef} objects. */ public final class BytesRefUtils { public static String decode(BytesRef ref) { @@ -32,6 +30,5 @@ public final class BytesRefUtils { } } - private BytesRefUtils() { - } + private BytesRefUtils() {} } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/util/LoggerFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/util/LoggerFactory.java index 4735d64ad56..a1a91848059 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/util/LoggerFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/util/LoggerFactory.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.util; import java.nio.charset.StandardCharsets; - import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -32,33 +31,32 @@ import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration; import org.apache.logging.log4j.core.layout.PatternLayout; import org.apache.lucene.luke.app.desktop.util.TextAreaAppender; -/** - * Logger factory. This programmatically configurates logger context (Appenders etc.) - */ +/** Logger factory. This programmatically configurates logger context (Appenders etc.) */ public class LoggerFactory { public static void initGuiLogging(String logFile) { - ConfigurationBuilder builder = ConfigurationBuilderFactory.newConfigurationBuilder(); + ConfigurationBuilder builder = + ConfigurationBuilderFactory.newConfigurationBuilder(); builder.add(builder.newRootLogger(Level.INFO)); LoggerContext context = Configurator.initialize(builder.build()); - PatternLayout layout = PatternLayout.newBuilder() - .withPattern("[%d{ISO8601}] %5p (%F:%L) - %m%n") - .withCharset(StandardCharsets.UTF_8) - .build(); + PatternLayout layout = + PatternLayout.newBuilder() + .withPattern("[%d{ISO8601}] %5p (%F:%L) - %m%n") + .withCharset(StandardCharsets.UTF_8) + .build(); - Appender fileAppender = FileAppender.newBuilder() - .setName("File") - .setLayout(layout) - .withFileName(logFile) - .withAppend(false) - .build(); + Appender fileAppender = + FileAppender.newBuilder() + .setName("File") + .setLayout(layout) + .withFileName(logFile) + .withAppend(false) + .build(); fileAppender.start(); - Appender textAreaAppender = TextAreaAppender.newBuilder() - .setName("TextArea") - .setLayout(layout) - .build(); + Appender textAreaAppender = + TextAreaAppender.newBuilder().setName("TextArea").setLayout(layout).build(); textAreaAppender.start(); context.getRootLogger().addAppender(fileAppender); @@ -69,5 +67,4 @@ public class LoggerFactory { public static Logger getLogger(Class clazz) { return LogManager.getLogger(clazz); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/util/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/util/package-info.java index e9830cf28e6..57d8f06d707 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/util/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/util/package-info.java @@ -16,4 +16,4 @@ */ /** General utilities */ -package org.apache.lucene.luke.util; \ No newline at end of file +package org.apache.lucene.luke.util; diff --git a/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/ClassScanner.java b/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/ClassScanner.java index 2937298aee2..e2e5776a5eb 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/ClassScanner.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/ClassScanner.java @@ -29,14 +29,11 @@ import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; - import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.util.LoggerFactory; import org.apache.lucene.util.NamedThreadFactory; -/** - * Utility class for scanning class files in jars. - */ +/** Utility class for scanning class files in jars. */ public class ClassScanner { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -63,7 +60,8 @@ public class ClassScanner { collectors.get(i % numThreads).addUrl(urls.get(i)); } - ExecutorService executorService = Executors.newFixedThreadPool(numThreads, new NamedThreadFactory("scanner-scan-subtypes")); + ExecutorService executorService = + Executors.newFixedThreadPool(numThreads, new NamedThreadFactory("scanner-scan-subtypes")); for (SubtypeCollector collector : collectors) { executorService.submit(collector); } @@ -101,7 +99,7 @@ public class ClassScanner { } } } - return urls; + return urls; } private static String resourceName(String packageName) { diff --git a/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/SubtypeCollector.java b/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/SubtypeCollector.java index de40a187eef..fcd6d226745 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/SubtypeCollector.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/SubtypeCollector.java @@ -25,7 +25,6 @@ import java.util.Objects; import java.util.Set; import java.util.jar.JarInputStream; import java.util.zip.ZipEntry; - import org.apache.logging.log4j.Logger; import org.apache.lucene.luke.util.LoggerFactory; @@ -65,8 +64,10 @@ final class SubtypeCollector implements Runnable { ZipEntry entry; while ((entry = jis.getNextEntry()) != null) { String name = entry.getName(); - if (name.endsWith(".class") && name.indexOf('$') < 0 - && !name.contains("package-info") && !name.startsWith("META-INF")) { + if (name.endsWith(".class") + && name.indexOf('$') < 0 + && !name.contains("package-info") + && !name.startsWith("META-INF")) { String fqcn = convertToFQCN(name); if (!fqcn.startsWith(packageName)) { continue; @@ -96,5 +97,4 @@ final class SubtypeCollector implements Runnable { int index = name.lastIndexOf(".class"); return name.replace('/', '.').substring(0, index); } - } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/package-info.java b/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/package-info.java index 268245e2ad7..fb718bc36a8 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/package-info.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/util/reflection/package-info.java @@ -16,4 +16,4 @@ */ /** Utilities for reflections */ -package org.apache.lucene.luke.util.reflection; \ No newline at end of file +package org.apache.lucene.luke.util.reflection; diff --git a/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileTest.java b/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileTest.java index c345800e53e..a6c6b581282 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileTest.java @@ -25,7 +25,6 @@ import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/analysis/AnalysisImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/analysis/AnalysisImplTest.java index cfa6df27845..258c577b518 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/analysis/AnalysisImplTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/analysis/AnalysisImplTest.java @@ -26,7 +26,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.custom.CustomAnalyzer; import org.apache.lucene.luke.models.LukeException; @@ -73,7 +72,8 @@ public class AnalysisImplTest extends LuceneTestCase { Analyzer analyzer = analysis.createAnalyzerFromClassName(analyzerType); assertEquals(analyzerType, analyzer.getClass().getName()); - String text = "It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife."; + String text = + "It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife."; List tokens = analysis.analyze(text); assertNotNull(tokens); } @@ -83,13 +83,17 @@ public class AnalysisImplTest extends LuceneTestCase { AnalysisImpl analysis = new AnalysisImpl(); Map tkParams = new HashMap<>(); tkParams.put("maxTokenLen", "128"); - CustomAnalyzerConfig.Builder builder = new CustomAnalyzerConfig.Builder( - "keyword", tkParams) - .addTokenFilterConfig("lowercase", Collections.emptyMap()); + CustomAnalyzerConfig.Builder builder = + new CustomAnalyzerConfig.Builder("keyword", tkParams) + .addTokenFilterConfig("lowercase", Collections.emptyMap()); CustomAnalyzer analyzer = (CustomAnalyzer) analysis.buildCustomAnalyzer(builder.build()); assertEquals("org.apache.lucene.analysis.custom.CustomAnalyzer", analyzer.getClass().getName()); - assertEquals("org.apache.lucene.analysis.core.KeywordTokenizerFactory", analyzer.getTokenizerFactory().getClass().getName()); - assertEquals("org.apache.lucene.analysis.core.LowerCaseFilterFactory", analyzer.getTokenFilterFactories().get(0).getClass().getName()); + assertEquals( + "org.apache.lucene.analysis.core.KeywordTokenizerFactory", + analyzer.getTokenizerFactory().getClass().getName()); + assertEquals( + "org.apache.lucene.analysis.core.LowerCaseFilterFactory", + analyzer.getTokenFilterFactories().get(0).getClass().getName()); String text = "Apache Lucene"; List tokens = analysis.analyze(text); @@ -109,16 +113,22 @@ public class AnalysisImplTest extends LuceneTestCase { tfParams.put("ignoreCase", "true"); tfParams.put("words", "stop.txt"); tfParams.put("format", "wordset"); - CustomAnalyzerConfig.Builder builder = new CustomAnalyzerConfig.Builder( - "whitespace", tkParams) - .configDir(confDir.toString()) - .addTokenFilterConfig("lowercase", Collections.emptyMap()) - .addTokenFilterConfig("stop", tfParams); + CustomAnalyzerConfig.Builder builder = + new CustomAnalyzerConfig.Builder("whitespace", tkParams) + .configDir(confDir.toString()) + .addTokenFilterConfig("lowercase", Collections.emptyMap()) + .addTokenFilterConfig("stop", tfParams); CustomAnalyzer analyzer = (CustomAnalyzer) analysis.buildCustomAnalyzer(builder.build()); assertEquals("org.apache.lucene.analysis.custom.CustomAnalyzer", analyzer.getClass().getName()); - assertEquals("org.apache.lucene.analysis.core.WhitespaceTokenizerFactory", analyzer.getTokenizerFactory().getClass().getName()); - assertEquals("org.apache.lucene.analysis.core.LowerCaseFilterFactory", analyzer.getTokenFilterFactories().get(0).getClass().getName()); - assertEquals("org.apache.lucene.analysis.core.StopFilterFactory", analyzer.getTokenFilterFactories().get(1).getClass().getName()); + assertEquals( + "org.apache.lucene.analysis.core.WhitespaceTokenizerFactory", + analyzer.getTokenizerFactory().getClass().getName()); + assertEquals( + "org.apache.lucene.analysis.core.LowerCaseFilterFactory", + analyzer.getTokenFilterFactories().get(0).getClass().getName()); + assertEquals( + "org.apache.lucene.analysis.core.StopFilterFactory", + analyzer.getTokenFilterFactories().get(1).getClass().getName()); String text = "Government of the People, by the People, for the People"; List tokens = analysis.analyze(text); @@ -148,28 +158,32 @@ public class AnalysisImplTest extends LuceneTestCase { AnalysisImpl analysis = new AnalysisImpl(); Map tkParams = new HashMap<>(); tkParams.put("maxTokenLen", "128"); - CustomAnalyzerConfig.Builder builder = new CustomAnalyzerConfig.Builder("keyword", tkParams) - .addTokenFilterConfig("lowercase", Collections.emptyMap()) - .addCharFilterConfig("htmlstrip", Collections.emptyMap()); + CustomAnalyzerConfig.Builder builder = + new CustomAnalyzerConfig.Builder("keyword", tkParams) + .addTokenFilterConfig("lowercase", Collections.emptyMap()) + .addCharFilterConfig("htmlstrip", Collections.emptyMap()); CustomAnalyzer analyzer = (CustomAnalyzer) analysis.buildCustomAnalyzer(builder.build()); assertEquals("org.apache.lucene.analysis.custom.CustomAnalyzer", analyzer.getClass().getName()); - assertEquals("org.apache.lucene.analysis.charfilter.HTMLStripCharFilterFactory", + assertEquals( + "org.apache.lucene.analysis.charfilter.HTMLStripCharFilterFactory", analyzer.getCharFilterFactories().get(0).getClass().getName()); - assertEquals("org.apache.lucene.analysis.core.KeywordTokenizerFactory", + assertEquals( + "org.apache.lucene.analysis.core.KeywordTokenizerFactory", analyzer.getTokenizerFactory().getClass().getName()); - assertEquals("org.apache.lucene.analysis.core.LowerCaseFilterFactory", + assertEquals( + "org.apache.lucene.analysis.core.LowerCaseFilterFactory", analyzer.getTokenFilterFactories().get(0).getClass().getName()); String text = "Apache Lucene"; Analysis.StepByStepResult result = analysis.analyzeStepByStep(text); assertNotNull(result); assertNotNull(result.getCharfilteredTexts()); - assertEquals(1,result.getCharfilteredTexts().size()); + assertEquals(1, result.getCharfilteredTexts().size()); assertEquals("htmlStrip", result.getCharfilteredTexts().get(0).getName()); assertNotNull(result.getNamedTokens()); assertEquals(2, result.getNamedTokens().size()); - //FIXME check each namedTokensList + // FIXME check each namedTokensList assertEquals("keyword", result.getNamedTokens().get(0).getName()); assertEquals("lowercase", result.getNamedTokens().get(1).getName()); } diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/commits/CommitsImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/commits/CommitsImplTest.java index 84a9b433309..93a60a7b890 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/commits/CommitsImplTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/commits/CommitsImplTest.java @@ -22,7 +22,6 @@ import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.Optional; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; @@ -60,7 +59,8 @@ public class CommitsImplTest extends LuceneTestCase { Directory dir = newFSDirectory(indexDir); - IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()); + IndexWriterConfig config = + new IndexWriterConfig(new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()); config.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config); @@ -101,7 +101,7 @@ public class CommitsImplTest extends LuceneTestCase { assertTrue(commitList.size() > 0); // should be sorted by descending order in generation assertEquals(commitList.size(), commitList.get(0).getGeneration()); - assertEquals(1, commitList.get(commitList.size()-1).getGeneration()); + assertEquals(1, commitList.get(commitList.size() - 1).getGeneration()); } @Test @@ -179,11 +179,10 @@ public class CommitsImplTest extends LuceneTestCase { assertTrue(commits.getSegmentDiagnostics(10, "_0").isEmpty()); } - @Test public void testGetSegmentDiagnostics_invalid_name() { CommitsImpl commits = new CommitsImpl(reader, indexDir.toString()); - Map diagnostics = commits.getSegmentDiagnostics(1,"xxx"); + Map diagnostics = commits.getSegmentDiagnostics(1, "xxx"); assertTrue(diagnostics.isEmpty()); } @@ -206,6 +205,5 @@ public class CommitsImplTest extends LuceneTestCase { CommitsImpl commits = new CommitsImpl(reader, indexDir.toString()); Optional codec = commits.getSegmentCodec(1, "xxx"); assertFalse(codec.isPresent()); - } } diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocValuesAdapterTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocValuesAdapterTest.java index e6349bf1e9d..572806bf9b0 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocValuesAdapterTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocValuesAdapterTest.java @@ -19,7 +19,6 @@ package org.apache.lucene.luke.models.documents; import java.io.IOException; import java.util.Collections; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; @@ -62,7 +61,8 @@ public class DocValuesAdapterTest extends DocumentsTestBase { @Test public void testGetDocValues_binary() throws Exception { DocValuesAdapter adapterImpl = new DocValuesAdapter(reader); - DocValues values = adapterImpl.getDocValues(0, "dv_binary").orElseThrow(IllegalStateException::new); + DocValues values = + adapterImpl.getDocValues(0, "dv_binary").orElseThrow(IllegalStateException::new); assertEquals(DocValuesType.BINARY, values.getDvType()); assertEquals(new BytesRef("lucene"), values.getValues().get(0)); assertEquals(Collections.emptyList(), values.getNumericValues()); @@ -71,7 +71,8 @@ public class DocValuesAdapterTest extends DocumentsTestBase { @Test public void testGetDocValues_sorted() throws Exception { DocValuesAdapter adapterImpl = new DocValuesAdapter(reader); - DocValues values = adapterImpl.getDocValues(0, "dv_sorted").orElseThrow(IllegalStateException::new); + DocValues values = + adapterImpl.getDocValues(0, "dv_sorted").orElseThrow(IllegalStateException::new); assertEquals(DocValuesType.SORTED, values.getDvType()); assertEquals(new BytesRef("abc"), values.getValues().get(0)); assertEquals(Collections.emptyList(), values.getNumericValues()); @@ -80,7 +81,8 @@ public class DocValuesAdapterTest extends DocumentsTestBase { @Test public void testGetDocValues_sorted_set() throws Exception { DocValuesAdapter adapterImpl = new DocValuesAdapter(reader); - DocValues values = adapterImpl.getDocValues(0, "dv_sortedset").orElseThrow(IllegalStateException::new); + DocValues values = + adapterImpl.getDocValues(0, "dv_sortedset").orElseThrow(IllegalStateException::new); assertEquals(DocValuesType.SORTED_SET, values.getDvType()); assertEquals(new BytesRef("java"), values.getValues().get(0)); assertEquals(new BytesRef("python"), values.getValues().get(1)); @@ -90,7 +92,8 @@ public class DocValuesAdapterTest extends DocumentsTestBase { @Test public void testGetDocValues_numeric() throws Exception { DocValuesAdapter adapterImpl = new DocValuesAdapter(reader); - DocValues values = adapterImpl.getDocValues(0, "dv_numeric").orElseThrow(IllegalStateException::new); + DocValues values = + adapterImpl.getDocValues(0, "dv_numeric").orElseThrow(IllegalStateException::new); assertEquals(DocValuesType.NUMERIC, values.getDvType()); assertEquals(Collections.emptyList(), values.getValues()); assertEquals(42L, values.getNumericValues().get(0).longValue()); @@ -99,7 +102,8 @@ public class DocValuesAdapterTest extends DocumentsTestBase { @Test public void testGetDocValues_sorted_numeric() throws Exception { DocValuesAdapter adapterImpl = new DocValuesAdapter(reader); - DocValues values = adapterImpl.getDocValues(0, "dv_sortednumeric").orElseThrow(IllegalStateException::new); + DocValues values = + adapterImpl.getDocValues(0, "dv_sortednumeric").orElseThrow(IllegalStateException::new); assertEquals(DocValuesType.SORTED_NUMERIC, values.getDvType()); assertEquals(Collections.emptyList(), values.getValues()); assertEquals(11L, values.getNumericValues().get(0).longValue()); diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsImplTest.java index 01a1f67d4c8..7306026e85e 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsImplTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsImplTest.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.models.documents; import java.util.List; - import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; @@ -28,10 +27,12 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.junit.Test; - // See: https://github.com/DmitryKey/luke/issues/133 @LuceneTestCase.SuppressCodecs({ - "DummyCompressingStoredFieldsData", "HighCompressionCompressingStoredFieldsData", "FastCompressingStoredFieldsData", "FastDecompressionCompressingStoredFieldsData" + "DummyCompressingStoredFieldsData", + "HighCompressionCompressingStoredFieldsData", + "FastCompressingStoredFieldsData", + "FastDecompressionCompressingStoredFieldsData" }) public class DocumentsImplTest extends DocumentsTestBase { diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsTestBase.java b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsTestBase.java index 5c1cd0b0062..fded2b76bcb 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsTestBase.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsTestBase.java @@ -19,7 +19,6 @@ package org.apache.lucene.luke.models.documents; import java.io.IOException; import java.nio.file.Path; - import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -82,9 +81,11 @@ public abstract class DocumentsTestBase extends LuceneTestCase { Document doc1 = new Document(); doc1.add(new Field("title", "Pride and Prejudice", titleType)); doc1.add(new Field("author", "Jane Austen", authorType)); - doc1.add(new Field("text", - "It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife.", - textType)); + doc1.add( + new Field( + "text", + "It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife.", + textType)); doc1.add(new SortedSetDocValuesField("subject", new BytesRef("Fiction"))); doc1.add(new SortedSetDocValuesField("subject", new BytesRef("Love stories"))); doc1.add(new Field("downloads", packInt(28533), downloadsType)); @@ -93,8 +94,11 @@ public abstract class DocumentsTestBase extends LuceneTestCase { Document doc2 = new Document(); doc2.add(new Field("title", "Alice's Adventures in Wonderland", titleType)); doc2.add(new Field("author", "Lewis Carroll", authorType)); - doc2.add(new Field("text", "Alice was beginning to get very tired of sitting by her sister on the bank, and of having nothing to do: once or twice she had peeped into the book her sister was reading, but it had no pictures or conversations in it, ‘and what is the use of a book,’ thought Alice ‘without pictures or conversations?’", - textType)); + doc2.add( + new Field( + "text", + "Alice was beginning to get very tired of sitting by her sister on the bank, and of having nothing to do: once or twice she had peeped into the book her sister was reading, but it had no pictures or conversations in it, ‘and what is the use of a book,’ thought Alice ‘without pictures or conversations?’", + textType)); doc2.add(new SortedSetDocValuesField("subject", new BytesRef("Fantasy literature"))); doc2.add(new Field("downloads", packInt(18712), downloadsType)); writer.addDocument(doc2); @@ -102,8 +106,11 @@ public abstract class DocumentsTestBase extends LuceneTestCase { Document doc3 = new Document(); doc3.add(new Field("title", "Frankenstein; Or, The Modern Prometheus", titleType)); doc3.add(new Field("author", "Mary Wollstonecraft Shelley", authorType)); - doc3.add(new Field("text", "You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and increasing confidence in the success of my undertaking.", - textType)); + doc3.add( + new Field( + "text", + "You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and increasing confidence in the success of my undertaking.", + textType)); doc3.add(new SortedSetDocValuesField("subject", new BytesRef("Science fiction"))); doc3.add(new SortedSetDocValuesField("subject", new BytesRef("Horror tales"))); doc3.add(new SortedSetDocValuesField("subject", new BytesRef("Monsters"))); @@ -113,8 +120,7 @@ public abstract class DocumentsTestBase extends LuceneTestCase { Document doc4 = new Document(); doc4.add(new Field("title", "A Doll's House : a play", titleType)); doc4.add(new Field("author", "Henrik Ibsen", authorType)); - doc4.add(new Field("text", "", - textType)); + doc4.add(new Field("text", "", textType)); doc4.add(new SortedSetDocValuesField("subject", new BytesRef("Drama"))); doc4.add(new Field("downloads", packInt(14629), downloadsType)); writer.addDocument(doc4); @@ -122,8 +128,11 @@ public abstract class DocumentsTestBase extends LuceneTestCase { Document doc5 = new Document(); doc5.add(new Field("title", "The Adventures of Sherlock Holmes", titleType)); doc5.add(new Field("author", "Arthur Conan Doyle", authorType)); - doc5.add(new Field("text", "To Sherlock Holmes she is always the woman. I have seldom heard him mention her under any other name. In his eyes she eclipses and predominates the whole of her sex.", - textType)); + doc5.add( + new Field( + "text", + "To Sherlock Holmes she is always the woman. I have seldom heard him mention her under any other name. In his eyes she eclipses and predominates the whole of her sex.", + textType)); doc5.add(new SortedSetDocValuesField("subject", new BytesRef("Fiction"))); doc5.add(new SortedSetDocValuesField("subject", new BytesRef("Detective and mystery stories"))); doc5.add(new Field("downloads", packInt(12828), downloadsType)); @@ -148,5 +157,4 @@ public abstract class DocumentsTestBase extends LuceneTestCase { reader.close(); dir.close(); } - } diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TermVectorsAdapterTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TermVectorsAdapterTest.java index 5d85e854bb0..ec7884d1472 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TermVectorsAdapterTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TermVectorsAdapterTest.java @@ -19,7 +19,6 @@ package org.apache.lucene.luke.models.documents; import java.io.IOException; import java.util.List; - import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; @@ -55,7 +54,8 @@ public class TermVectorsAdapterTest extends DocumentsTestBase { textType_pos_offset.setStoreTermVectorPositions(true); textType_pos_offset.setStoreTermVectorOffsets(true); - String text = "It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife."; + String text = + "It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife."; Document doc = new Document(); doc.add(newField("text1", text, textType)); doc.add(newField("text2", text, textType_pos)); diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewImplTest.java index 5eb15ef946c..25f543c117b 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewImplTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewImplTest.java @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; - import org.apache.lucene.store.AlreadyClosedException; import org.junit.Test; @@ -43,9 +42,7 @@ public class OverviewImplTest extends OverviewTestBase { @Test public void testGetFieldNames() { OverviewImpl overview = new OverviewImpl(reader, indexDir.toString()); - assertEquals( - new HashSet<>(Arrays.asList("f1", "f2")), - new HashSet<>(overview.getFieldNames())); + assertEquals(new HashSet<>(Arrays.asList("f1", "f2")), new HashSet<>(overview.getFieldNames())); } @Test @@ -111,7 +108,7 @@ public class OverviewImplTest extends OverviewTestBase { @Test public void testGetSortedTermCounts() { OverviewImpl overview = new OverviewImpl(reader, indexDir.toString()); - Map countsMap = overview.getSortedTermCounts(TermCountsOrder.COUNT_DESC); + Map countsMap = overview.getSortedTermCounts(TermCountsOrder.COUNT_DESC); assertEquals(Arrays.asList("f2", "f1"), new ArrayList<>(countsMap.keySet())); } @@ -136,5 +133,4 @@ public class OverviewImplTest extends OverviewTestBase { reader.close(); overview.getNumFields(); } - } diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewTestBase.java b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewTestBase.java index f1516174250..79fd0b64610 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewTestBase.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewTestBase.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.nio.file.Path; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -58,7 +57,7 @@ public abstract class OverviewTestBase extends LuceneTestCase { Directory dir = newFSDirectory(indexDir); IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random())); - config.setMergePolicy(NoMergePolicy.INSTANCE); // see LUCENE-8998 + config.setMergePolicy(NoMergePolicy.INSTANCE); // see LUCENE-8998 RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config); Document doc1 = new Document(); @@ -95,5 +94,4 @@ public abstract class OverviewTestBase extends LuceneTestCase { reader.close(); dir.close(); } - } diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TermCountsTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TermCountsTest.java index 0ccfd5e67ce..16345f067ae 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TermCountsTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TermCountsTest.java @@ -20,7 +20,6 @@ package org.apache.lucene.luke.models.overview; import java.util.ArrayList; import java.util.Arrays; import java.util.Map; - import org.junit.Test; public class TermCountsTest extends OverviewTestBase { @@ -78,5 +77,4 @@ public class TermCountsTest extends OverviewTestBase { assertEquals(3, (long) countsMap.get("f1")); assertEquals(6, (long) countsMap.get("f2")); } - -} \ No newline at end of file +} diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TopTermsTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TopTermsTest.java index a726ad87a33..80e63586da9 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TopTermsTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TopTermsTest.java @@ -18,7 +18,6 @@ package org.apache.lucene.luke.models.overview; import java.util.List; - import org.junit.Test; public class TopTermsTest extends OverviewTestBase { @@ -36,5 +35,4 @@ public class TopTermsTest extends OverviewTestBase { assertEquals(2, result.get(1).getDocFreq()); assertEquals("f2", result.get(1).getField()); } - } diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/search/SearchImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/search/SearchImplTest.java index e9603cf4b3e..41679d779fe 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/models/search/SearchImplTest.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/models/search/SearchImplTest.java @@ -22,7 +22,6 @@ import java.nio.file.Path; import java.util.HashMap; import java.util.Map; import java.util.Optional; - import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.DoubleDocValuesField; @@ -158,33 +157,34 @@ public class SearchImplTest extends LuceneTestCase { @Test public void testGetSortableFieldNames() { SearchImpl search = new SearchImpl(reader); - assertArrayEquals(new String[]{"f2", "f3", "f4", "f5", "f6", "f7"}, + assertArrayEquals( + new String[] {"f2", "f3", "f4", "f5", "f6", "f7"}, search.getSortableFieldNames().toArray()); } @Test public void testGetSearchableFieldNames() { SearchImpl search = new SearchImpl(reader); - assertArrayEquals(new String[]{"f1"}, - search.getSearchableFieldNames().toArray()); + assertArrayEquals(new String[] {"f1"}, search.getSearchableFieldNames().toArray()); } @Test public void testGetRangeSearchableFieldNames() { SearchImpl search = new SearchImpl(reader); - assertArrayEquals(new String[]{"f8", "f9", "f10", "f11"}, search.getRangeSearchableFieldNames().toArray()); + assertArrayEquals( + new String[] {"f8", "f9", "f10", "f11"}, search.getRangeSearchableFieldNames().toArray()); } @Test public void testParseClassic() { SearchImpl search = new SearchImpl(reader); - QueryParserConfig config = new QueryParserConfig.Builder() - .allowLeadingWildcard(true) - .defaultOperator(QueryParserConfig.Operator.AND) - .fuzzyMinSim(1.0f) - .build(); - Query q = search.parseQuery("app~ f2:*ie", "f1", new StandardAnalyzer(), - config, false); + QueryParserConfig config = + new QueryParserConfig.Builder() + .allowLeadingWildcard(true) + .defaultOperator(QueryParserConfig.Operator.AND) + .fuzzyMinSim(1.0f) + .build(); + Query q = search.parseQuery("app~ f2:*ie", "f1", new StandardAnalyzer(), config, false); assertEquals("+f1:app~1 +f2:*ie", q.toString()); } @@ -194,12 +194,9 @@ public class SearchImplTest extends LuceneTestCase { Map> types = new HashMap<>(); types.put("f8", Integer.class); - QueryParserConfig config = new QueryParserConfig.Builder() - .useClassicParser(false) - .typeMap(types) - .build(); - Query q = search.parseQuery("f8:[10 TO 20]", "f1", new StandardAnalyzer(), - config, false); + QueryParserConfig config = + new QueryParserConfig.Builder().useClassicParser(false).typeMap(types).build(); + Query q = search.parseQuery("f8:[10 TO 20]", "f1", new StandardAnalyzer(), config, false); assertEquals("f8:[10 TO 20]", q.toString()); assertTrue(q instanceof PointRangeQuery); } @@ -211,45 +208,49 @@ public class SearchImplTest extends LuceneTestCase { assertTrue(search.guessSortTypes("f1").isEmpty()); assertArrayEquals( - new SortField[]{ - new SortField("f2", SortField.Type.STRING), - new SortField("f2", SortField.Type.STRING_VAL)}, + new SortField[] { + new SortField("f2", SortField.Type.STRING), new SortField("f2", SortField.Type.STRING_VAL) + }, search.guessSortTypes("f2").toArray()); assertArrayEquals( - new SortField[]{new SortedSetSortField("f3", false)}, + new SortField[] {new SortedSetSortField("f3", false)}, search.guessSortTypes("f3").toArray()); assertArrayEquals( - new SortField[]{ - new SortField("f4", SortField.Type.INT), - new SortField("f4", SortField.Type.LONG), - new SortField("f4", SortField.Type.FLOAT), - new SortField("f4", SortField.Type.DOUBLE)}, + new SortField[] { + new SortField("f4", SortField.Type.INT), + new SortField("f4", SortField.Type.LONG), + new SortField("f4", SortField.Type.FLOAT), + new SortField("f4", SortField.Type.DOUBLE) + }, search.guessSortTypes("f4").toArray()); assertArrayEquals( - new SortField[]{ - new SortField("f5", SortField.Type.INT), - new SortField("f5", SortField.Type.LONG), - new SortField("f5", SortField.Type.FLOAT), - new SortField("f5", SortField.Type.DOUBLE)}, + new SortField[] { + new SortField("f5", SortField.Type.INT), + new SortField("f5", SortField.Type.LONG), + new SortField("f5", SortField.Type.FLOAT), + new SortField("f5", SortField.Type.DOUBLE) + }, search.guessSortTypes("f5").toArray()); assertArrayEquals( - new SortField[]{ - new SortField("f6", SortField.Type.INT), - new SortField("f6", SortField.Type.LONG), - new SortField("f6", SortField.Type.FLOAT), - new SortField("f6", SortField.Type.DOUBLE)}, + new SortField[] { + new SortField("f6", SortField.Type.INT), + new SortField("f6", SortField.Type.LONG), + new SortField("f6", SortField.Type.FLOAT), + new SortField("f6", SortField.Type.DOUBLE) + }, search.guessSortTypes("f6").toArray()); assertArrayEquals( - new SortField[]{ - new SortedNumericSortField("f7", SortField.Type.INT), - new SortedNumericSortField("f7", SortField.Type.LONG), - new SortedNumericSortField("f7", SortField.Type.FLOAT), - new SortedNumericSortField("f7", SortField.Type.DOUBLE)}, + new SortField[] { + new SortedNumericSortField("f7", SortField.Type.INT), + new SortedNumericSortField("f7", SortField.Type.LONG), + new SortedNumericSortField("f7", SortField.Type.FLOAT), + new SortedNumericSortField("f7", SortField.Type.DOUBLE) + }, search.guessSortTypes("f7").toArray()); } @@ -265,26 +266,31 @@ public class SearchImplTest extends LuceneTestCase { assertFalse(search.getSortType("f1", "STRING", false).isPresent()); - assertEquals(new SortField("f2", SortField.Type.STRING, false), + assertEquals( + new SortField("f2", SortField.Type.STRING, false), search.getSortType("f2", "STRING", false).get()); assertFalse(search.getSortType("f2", "INT", false).isPresent()); - assertEquals(new SortedSetSortField("f3", false), - search.getSortType("f3", "CUSTOM", false).get()); + assertEquals( + new SortedSetSortField("f3", false), search.getSortType("f3", "CUSTOM", false).get()); - assertEquals(new SortField("f4", SortField.Type.LONG, false), + assertEquals( + new SortField("f4", SortField.Type.LONG, false), search.getSortType("f4", "LONG", false).get()); assertFalse(search.getSortType("f4", "STRING", false).isPresent()); - assertEquals(new SortField("f5", SortField.Type.FLOAT, false), + assertEquals( + new SortField("f5", SortField.Type.FLOAT, false), search.getSortType("f5", "FLOAT", false).get()); assertFalse(search.getSortType("f5", "STRING", false).isPresent()); - assertEquals(new SortField("f6", SortField.Type.DOUBLE, false), + assertEquals( + new SortField("f6", SortField.Type.DOUBLE, false), search.getSortType("f6", "DOUBLE", false).get()); assertFalse(search.getSortType("f6", "STRING", false).isPresent()); - assertEquals(new SortedNumericSortField("f7", SortField.Type.LONG, false), + assertEquals( + new SortedNumericSortField("f7", SortField.Type.LONG, false), search.getSortType("f7", "LONG", false).get()); assertFalse(search.getSortType("f7", "STRING", false).isPresent()); } @@ -300,7 +306,8 @@ public class SearchImplTest extends LuceneTestCase { public void testSearch() throws Exception { SearchImpl search = new SearchImpl(reader); Query query = new QueryParser("f1", new StandardAnalyzer()).parse("apple"); - SearchResults res = search.search(query, new SimilarityConfig.Builder().build(), null, 10, true); + SearchResults res = + search.search(query, new SimilarityConfig.Builder().build(), null, 10, true); assertEquals(10, res.getTotalHits().value); assertEquals(10, res.size()); @@ -312,7 +319,8 @@ public class SearchImplTest extends LuceneTestCase { SearchImpl search = new SearchImpl(reader); Query query = new QueryParser("f1", new StandardAnalyzer()).parse("apple"); Sort sort = new Sort(new SortField("f2", SortField.Type.STRING, true)); - SearchResults res = search.search(query, new SimilarityConfig.Builder().build(), sort, null, 10, true); + SearchResults res = + search.search(query, new SimilarityConfig.Builder().build(), sort, null, 10, true); assertEquals(10, res.getTotalHits().value); assertEquals(10, res.size()); @@ -376,5 +384,4 @@ public class SearchImplTest extends LuceneTestCase { search.search(query, new SimilarityConfig.Builder().build(), null, 10, true); assertFalse(search.prevPage().isPresent()); } - } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/CollectorMemoryTracker.java b/lucene/misc/src/java/org/apache/lucene/misc/CollectorMemoryTracker.java index 10d6b145b87..305c61c8cd0 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/CollectorMemoryTracker.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/CollectorMemoryTracker.java @@ -18,12 +18,11 @@ package org.apache.lucene.misc; import java.util.concurrent.atomic.AtomicLong; - import org.apache.lucene.misc.util.MemoryTracker; /** - * Default implementation of {@code MemoryTracker} that tracks - * allocations and allows setting a memory limit per collector + * Default implementation of {@code MemoryTracker} that tracks allocations and allows setting a + * memory limit per collector */ public class CollectorMemoryTracker implements MemoryTracker { private String name; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/GetTermInfo.java b/lucene/misc/src/java/org/apache/lucene/misc/GetTermInfo.java index 829f3ed4e72..874a8f363cc 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/GetTermInfo.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/GetTermInfo.java @@ -18,26 +18,26 @@ package org.apache.lucene.misc; import java.nio.file.Paths; import java.util.Locale; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.util.SuppressForbidden; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.util.SuppressForbidden; /** - * Utility to get document frequency and total number of occurrences (sum of the tf for each doc) of a term. + * Utility to get document frequency and total number of occurrences (sum of the tf for each doc) of + * a term. */ @SuppressForbidden(reason = "System.out required: command line tool") public class GetTermInfo { - + public static void main(String[] args) throws Exception { - + FSDirectory dir = null; String inputStr = null; String field = null; - + if (args.length == 3) { dir = FSDirectory.open(Paths.get(args[0])); field = args[1]; @@ -46,19 +46,23 @@ public class GetTermInfo { usage(); System.exit(1); } - - getTermInfo(dir,new Term(field, inputStr)); + + getTermInfo(dir, new Term(field, inputStr)); } - + public static void getTermInfo(Directory dir, Term term) throws Exception { IndexReader reader = DirectoryReader.open(dir); - System.out.printf(Locale.ROOT, "%s:%s \t totalTF = %,d \t doc freq = %,d \n", - term.field(), term.text(), reader.totalTermFreq(term), reader.docFreq(term)); + System.out.printf( + Locale.ROOT, + "%s:%s \t totalTF = %,d \t doc freq = %,d \n", + term.field(), + term.text(), + reader.totalTermFreq(term), + reader.docFreq(term)); } - + private static void usage() { - System.out - .println("\n\nusage:\n\t" - + "java " + GetTermInfo.class.getName() + " field term \n\n"); + System.out.println( + "\n\nusage:\n\t" + "java " + GetTermInfo.class.getName() + " field term \n\n"); } } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java b/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java index 7b191bced33..96362db6361 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java @@ -21,7 +21,6 @@ import java.nio.file.Paths; import java.util.Collection; import java.util.Comparator; import java.util.Locale; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexReader; @@ -35,69 +34,71 @@ import org.apache.lucene.util.PriorityQueue; import org.apache.lucene.util.SuppressForbidden; /** - * HighFreqTerms class extracts the top n most frequent terms - * (by document frequency) from an existing Lucene index and reports their - * document frequency. - *

- * If the -t flag is given, both document frequency and total tf (total - * number of occurrences) are reported, ordered by descending total tf. + * HighFreqTerms class extracts the top n most frequent terms (by document frequency) + * from an existing Lucene index and reports their document frequency. * + *

If the -t flag is given, both document frequency and total tf (total number of occurrences) + * are reported, ordered by descending total tf. */ public class HighFreqTerms { - + // The top numTerms will be displayed public static final int DEFAULT_NUMTERMS = 100; - + @SuppressForbidden(reason = "System.out required: command line tool") public static void main(String[] args) throws Exception { String field = null; int numTerms = DEFAULT_NUMTERMS; - + if (args.length == 0 || args.length > 4) { usage(); System.exit(1); - } + } Directory dir = FSDirectory.open(Paths.get(args[0])); - + Comparator comparator = new DocFreqComparator(); - + for (int i = 1; i < args.length; i++) { if (args[i].equals("-t")) { comparator = new TotalTermFreqComparator(); - } - else{ + } else { try { numTerms = Integer.parseInt(args[i]); } catch (NumberFormatException e) { - field=args[i]; + field = args[i]; } } } - + IndexReader reader = DirectoryReader.open(dir); TermStats[] terms = getHighFreqTerms(reader, numTerms, field, comparator); for (int i = 0; i < terms.length; i++) { - System.out.printf(Locale.ROOT, "%s:%s \t totalTF = %,d \t docFreq = %,d \n", - terms[i].field, terms[i].termtext.utf8ToString(), terms[i].totalTermFreq, terms[i].docFreq); + System.out.printf( + Locale.ROOT, + "%s:%s \t totalTF = %,d \t docFreq = %,d \n", + terms[i].field, + terms[i].termtext.utf8ToString(), + terms[i].totalTermFreq, + terms[i].docFreq); } reader.close(); } - + @SuppressForbidden(reason = "System.out required: command line tool") private static void usage() { - System.out - .println("\n\n" + System.out.println( + "\n\n" + "java org.apache.lucene.misc.HighFreqTerms [-t] [number_terms] [field]\n\t -t: order by totalTermFreq\n\n"); } - - /** - * Returns TermStats[] ordered by the specified comparator - */ - public static TermStats[] getHighFreqTerms(IndexReader reader, int numTerms, String field, Comparator comparator) throws Exception { + + /** Returns TermStats[] ordered by the specified comparator */ + public static TermStats[] getHighFreqTerms( + IndexReader reader, int numTerms, String field, Comparator comparator) + throws Exception { TermStatsQueue tiq = null; - + if (field != null) { Terms terms = MultiTerms.getTerms(reader, field); if (terms == null) { @@ -120,7 +121,7 @@ public class HighFreqTerms { } } } - + TermStats[] result = new TermStats[tiq.size()]; // we want highest first so we read the queue and populate the array // starting at the end and work backwards @@ -131,12 +132,10 @@ public class HighFreqTerms { } return result; } - - /** - * Compares terms by docTermFreq - */ + + /** Compares terms by docTermFreq */ public static final class DocFreqComparator implements Comparator { - + @Override public int compare(TermStats a, TermStats b) { int res = Long.compare(a.docFreq, b.docFreq); @@ -150,11 +149,9 @@ public class HighFreqTerms { } } - /** - * Compares terms by totalTermFreq - */ + /** Compares terms by totalTermFreq */ public static final class TotalTermFreqComparator implements Comparator { - + @Override public int compare(TermStats a, TermStats b) { int res = Long.compare(a.totalTermFreq, b.totalTermFreq); @@ -167,27 +164,26 @@ public class HighFreqTerms { return res; } } - - /** - * Priority queue for TermStats objects - **/ + + /** Priority queue for TermStats objects */ static final class TermStatsQueue extends PriorityQueue { final Comparator comparator; - + TermStatsQueue(int size, Comparator comparator) { super(size); this.comparator = comparator; } - + @Override protected boolean lessThan(TermStats termInfoA, TermStats termInfoB) { return comparator.compare(termInfoA, termInfoB) < 0; } - + protected void fill(String field, TermsEnum termsEnum) throws IOException { BytesRef term = null; while ((term = termsEnum.next()) != null) { - insertWithOverflow(new TermStats(field, term, termsEnum.docFreq(), termsEnum.totalTermFreq())); + insertWithOverflow( + new TermStats(field, term, termsEnum.docFreq(), termsEnum.totalTermFreq())); } } } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java b/lucene/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java index 207283bff48..50b93175d6b 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java @@ -16,34 +16,33 @@ */ package org.apache.lucene.misc; +import java.nio.file.Paths; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.misc.store.HardlinkCopyDirectoryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.misc.store.HardlinkCopyDirectoryWrapper; import org.apache.lucene.util.SuppressForbidden; -import java.nio.file.Paths; - /** - * Merges indices specified on the command line into the index - * specified as the first command line argument. + * Merges indices specified on the command line into the index specified as the first command line + * argument. */ @SuppressForbidden(reason = "System.out required: command line tool") public class IndexMergeTool { - + static final String USAGE = - "Usage: IndexMergeTool [OPTION...] [index3] ...\n" + - "Merges source indexes 'index1' .. 'indexN' into 'mergedIndex'\n" + - "\n" + - "OPTIONS:\n" + - " -merge-policy ClassName specifies MergePolicy class (must be in CLASSPATH).The default is\n" + - " 'org.apache.lucene.index.TieredMergePolicy.TieredMergePolicy'\n" + - " -max-segments N force-merge's the index to a maximum of N segments. Default is\n" + - " to execute only the merges according to the merge policy.\n" + - " -verbose print additional details.\n"; + "Usage: IndexMergeTool [OPTION...] [index3] ...\n" + + "Merges source indexes 'index1' .. 'indexN' into 'mergedIndex'\n" + + "\n" + + "OPTIONS:\n" + + " -merge-policy ClassName specifies MergePolicy class (must be in CLASSPATH).The default is\n" + + " 'org.apache.lucene.index.TieredMergePolicy.TieredMergePolicy'\n" + + " -max-segments N force-merge's the index to a maximum of N segments. Default is\n" + + " to execute only the merges according to the merge policy.\n" + + " -verbose print additional details.\n"; @SuppressForbidden(reason = "System.err required (verbose mode): command line tool") static class Options { @@ -62,10 +61,11 @@ public class IndexMergeTool { if (args[index] == "--") { break; } - switch(args[index]) { + switch (args[index]) { case "-merge-policy": String clazzName = args[++index]; - Class clazz = Class.forName(clazzName).asSubclass(MergePolicy.class); + Class clazz = + Class.forName(clazzName).asSubclass(MergePolicy.class); options.config.setMergePolicy(clazz.getConstructor().newInstance()); break; case "-max-segments": @@ -74,7 +74,9 @@ public class IndexMergeTool { case "-verbose": options.config.setInfoStream(System.err); break; - default: throw new IllegalArgumentException("unrecognized option: '" + args[index] + "'\n" + USAGE); + default: + throw new IllegalArgumentException( + "unrecognized option: '" + args[index] + "'\n" + USAGE); } index++; } @@ -102,7 +104,8 @@ public class IndexMergeTool { } // Try to use hardlinks to source segments, if possible. - Directory mergedIndex = new HardlinkCopyDirectoryWrapper(FSDirectory.open(Paths.get(options.mergedIndexPath))); + Directory mergedIndex = + new HardlinkCopyDirectoryWrapper(FSDirectory.open(Paths.get(options.mergedIndexPath))); Directory[] indexes = new Directory[options.indexPaths.length]; for (int i = 0; i < indexes.length; i++) { @@ -121,5 +124,4 @@ public class IndexMergeTool { writer.close(); System.out.println("Done."); } - } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java b/lucene/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java index 9287a750ef4..6976fca03c6 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java @@ -19,22 +19,17 @@ package org.apache.lucene.misc; import org.apache.lucene.search.similarities.ClassicSimilarity; /** - *

- * A similarity with a lengthNorm that provides for a "plateau" of - * equally good lengths, and tf helper functions. - *

- *

- * For lengthNorm, A min/max can be specified to define the - * plateau of lengths that should all have a norm of 1.0. - * Below the min, and above the max the lengthNorm drops off in a - * sqrt function. - *

- *

- * For tf, baselineTf and hyperbolicTf functions are provided, which - * subclasses can choose between. - *

+ * A similarity with a lengthNorm that provides for a "plateau" of equally good lengths, and tf + * helper functions. * - * @see A Gnuplot file used to generate some of the visualizations referenced from each function. + *

For lengthNorm, A min/max can be specified to define the plateau of lengths that should all + * have a norm of 1.0. Below the min, and above the max the lengthNorm drops off in a sqrt function. + * + *

For tf, baselineTf and hyperbolicTf functions are provided, which subclasses can choose + * between. + * + * @see A Gnuplot file used to generate some of the visualizations + * referenced from each function. */ public class SweetSpotSimilarity extends ClassicSimilarity { @@ -49,7 +44,7 @@ public class SweetSpotSimilarity extends ClassicSimilarity { private float tf_hyper_max = 2.0f; private double tf_hyper_base = 1.3d; private float tf_hyper_xoffset = 10.0f; - + public SweetSpotSimilarity() { super(); } @@ -63,27 +58,27 @@ public class SweetSpotSimilarity extends ClassicSimilarity { tf_min = min; tf_base = base; } - + /** * Sets the function variables for the hyperbolicTf functions * * @param min the minimum tf value to ever be returned (default: 0.0) * @param max the maximum tf value to ever be returned (default: 2.0) - * @param base the base value to be used in the exponential for the hyperbolic function (default: 1.3) + * @param base the base value to be used in the exponential for the hyperbolic function (default: + * 1.3) * @param xoffset the midpoint of the hyperbolic function (default: 10.0) * @see #hyperbolicTf */ - public void setHyperbolicTfFactors(float min, float max, - double base, float xoffset) { + public void setHyperbolicTfFactors(float min, float max, double base, float xoffset) { tf_hyper_min = min; tf_hyper_max = max; tf_hyper_base = base; tf_hyper_xoffset = xoffset; } - + /** - * Sets the default function variables used by lengthNorm when no field - * specific variables have been set. + * Sets the default function variables used by lengthNorm when no field specific variables have + * been set. * * @see #lengthNorm */ @@ -95,41 +90,28 @@ public class SweetSpotSimilarity extends ClassicSimilarity { } /** - * Implemented as: - * + * Implemented as: * 1/sqrt( steepness * (abs(x-min) + abs(x-max) - (max-min)) + 1 ) * . * - *

- * This degrades to 1/sqrt(x) when min and max are both 1 and - * steepness is 0.5 - *

+ *

This degrades to 1/sqrt(x) when min and max are both 1 and steepness is 0.5 * - *

- * :TODO: potential optimization is to just flat out return 1.0f if numTerms - * is between min and max. - *

+ *

:TODO: potential optimization is to just flat out return 1.0f if numTerms is between min and + * max. * * @see #setLengthNormFactors - * @see An SVG visualization of this function + * @see An SVG visualization of this function */ @Override public float lengthNorm(int numTerms) { final int l = ln_min; final int h = ln_max; final float s = ln_steep; - + return (float) - (1.0f / - Math.sqrt - ( - ( - s * - (float)(Math.abs(numTerms - l) + Math.abs(numTerms - h) - (h-l)) - ) - + 1.0f - ) - ); + (1.0f + / Math.sqrt( + (s * (float) (Math.abs(numTerms - l) + Math.abs(numTerms - h) - (h - l))) + 1.0f)); } /** @@ -141,43 +123,34 @@ public class SweetSpotSimilarity extends ClassicSimilarity { public float tf(float freq) { return baselineTf(freq); } - + /** - * Implemented as: - * + * Implemented as: * (x <= min) ? base : sqrt(x+(base**2)-min) - * - * ...but with a special case check for 0. - *

- * This degrates to sqrt(x) when min and base are both 0 - *

+ * ...but with a special case check for 0. + * + *

This degrates to sqrt(x) when min and base are both 0 * * @see #setBaselineTfFactors - * @see An SVG visualization of this function + * @see An SVG visualization of this function */ public float baselineTf(float freq) { if (0.0f == freq) return 0.0f; - - return (freq <= tf_min) - ? tf_base - : (float)Math.sqrt(freq + (tf_base * tf_base) - tf_min); + + return (freq <= tf_min) ? tf_base : (float) Math.sqrt(freq + (tf_base * tf_base) - tf_min); } /** - * Uses a hyperbolic tangent function that allows for a hard max... - * - * + * Uses a hyperbolic tangent function that allows for a hard max... * tf(x)=min+(max-min)/2*(((base**(x-xoffset)-base**-(x-xoffset))/(base**(x-xoffset)+base**-(x-xoffset)))+1) * * - *

- * This code is provided as a convenience for subclasses that want - * to use a hyperbolic tf function. - *

+ *

This code is provided as a convenience for subclasses that want to use a hyperbolic tf + * function. * * @see #setHyperbolicTfFactors - * @see An SVG visualization of this function + * @see An SVG visualization of this function */ public float hyperbolicTf(float freq) { if (0.0f == freq) return 0.0f; @@ -186,36 +159,50 @@ public class SweetSpotSimilarity extends ClassicSimilarity { final float max = tf_hyper_max; final double base = tf_hyper_base; final float xoffset = tf_hyper_xoffset; - final double x = (double)(freq - xoffset); - - final float result = min + - (float)( - (max-min) / 2.0f - * - ( - ( ( Math.pow(base,x) - Math.pow(base,-x) ) - / ( Math.pow(base,x) + Math.pow(base,-x) ) - ) - + 1.0d - ) - ); + final double x = (double) (freq - xoffset); + + final float result = + min + + (float) + ((max - min) + / 2.0f + * (((Math.pow(base, x) - Math.pow(base, -x)) + / (Math.pow(base, x) + Math.pow(base, -x))) + + 1.0d)); return Float.isNaN(result) ? max : result; - } public String toString() { StringBuilder sb = new StringBuilder(); sb.append("SweetSpotSimilarity") - .append('(').append("ln_min=").append(ln_min).append(", ") - .append("ln_max=").append(ln_max).append(", ") - .append("ln_steep=").append(ln_steep).append(", ") - .append("tf_base=").append(tf_base).append(", ") - .append("tf_min=").append(tf_min).append(", ") - .append("tf_hyper_min=").append(tf_hyper_min).append(", ") - .append("tf_hyper_max=").append(tf_hyper_max).append(", ") - .append("tf_hyper_base=").append(tf_hyper_base).append(", ") - .append("tf_hyper_xoffset=").append(tf_hyper_xoffset) + .append('(') + .append("ln_min=") + .append(ln_min) + .append(", ") + .append("ln_max=") + .append(ln_max) + .append(", ") + .append("ln_steep=") + .append(ln_steep) + .append(", ") + .append("tf_base=") + .append(tf_base) + .append(", ") + .append("tf_min=") + .append(tf_min) + .append(", ") + .append("tf_hyper_min=") + .append(tf_hyper_min) + .append(", ") + .append("tf_hyper_max=") + .append(tf_hyper_max) + .append(", ") + .append("tf_hyper_base=") + .append(tf_hyper_base) + .append(", ") + .append("tf_hyper_xoffset=") + .append(tf_hyper_xoffset) .append(")"); return sb.toString(); } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/TermStats.java b/lucene/misc/src/java/org/apache/lucene/misc/TermStats.java index 5c4f5698ba1..44af2608469 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/TermStats.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/TermStats.java @@ -18,29 +18,31 @@ package org.apache.lucene.misc; import org.apache.lucene.util.BytesRef; -/** - * Holder for a term along with its statistics - * ({@link #docFreq} and {@link #totalTermFreq}). - */ +/** Holder for a term along with its statistics ({@link #docFreq} and {@link #totalTermFreq}). */ public final class TermStats { public BytesRef termtext; public String field; public int docFreq; public long totalTermFreq; - + TermStats(String field, BytesRef termtext, int df, long tf) { this.termtext = BytesRef.deepCopyOf(termtext); this.field = field; this.docFreq = df; this.totalTermFreq = tf; } - + String getTermText() { return termtext.utf8ToString(); } @Override public String toString() { - return("TermStats: term=" + termtext.utf8ToString() + " docFreq=" + docFreq + " totalTermFreq=" + totalTermFreq); + return ("TermStats: term=" + + termtext.utf8ToString() + + " docFreq=" + + docFreq + + " totalTermFreq=" + + totalTermFreq); } -} \ No newline at end of file +} diff --git a/lucene/misc/src/java/org/apache/lucene/misc/document/LazyDocument.java b/lucene/misc/src/java/org/apache/lucene/misc/document/LazyDocument.java index ef1d6fe4630..ee3c654e1df 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/document/LazyDocument.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/document/LazyDocument.java @@ -24,7 +24,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Document; @@ -34,9 +33,10 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; -/** Defers actually loading a field's value until you ask - * for it. You must not use the returned Field instances - * after the provided reader has been closed. +/** + * Defers actually loading a field's value until you ask for it. You must not use the returned Field + * instances after the provided reader has been closed. + * * @see #getField */ public class LazyDocument { @@ -46,7 +46,7 @@ public class LazyDocument { // null until first field is loaded private Document doc; - private Map> fields = new HashMap<>(); + private Map> fields = new HashMap<>(); private Set fieldNames = new HashSet<>(); public LazyDocument(IndexReader reader, int docID) { @@ -55,28 +55,23 @@ public class LazyDocument { } /** - * Creates a StorableField whose value will be lazy loaded if and - * when it is used. - *

- * NOTE: This method must be called once for each value of the field - * name specified in sequence that the values exist. This method may not be - * used to generate multiple, lazy, StorableField instances referring to - * the same underlying StorableField instance. - *

- *

- * The lazy loading of field values from all instances of StorableField - * objects returned by this method are all backed by a single StoredDocument - * per LazyDocument instance. - *

+ * Creates a StorableField whose value will be lazy loaded if and when it is used. + * + *

NOTE: This method must be called once for each value of the field name specified in + * sequence that the values exist. This method may not be used to generate multiple, lazy, + * StorableField instances referring to the same underlying StorableField instance. + * + *

The lazy loading of field values from all instances of StorableField objects returned by + * this method are all backed by a single StoredDocument per LazyDocument instance. */ - public IndexableField getField(FieldInfo fieldInfo) { + public IndexableField getField(FieldInfo fieldInfo) { fieldNames.add(fieldInfo.name); List values = fields.get(fieldInfo.number); if (null == values) { values = new ArrayList<>(); fields.put(fieldInfo.number, values); - } + } LazyField value = new LazyField(fieldInfo.name, fieldInfo.number); values.add(value); @@ -91,9 +86,10 @@ public class LazyDocument { return value; } - /** + /** * non-private for test only access - * @lucene.internal + * + * @lucene.internal */ synchronized Document getDocument() { if (doc == null) { @@ -112,10 +108,10 @@ public class LazyDocument { List lazyValues = fields.get(fieldNum); IndexableField[] realValues = d.getFields(name); - - assert realValues.length <= lazyValues.size() - : "More lazy values then real values for field: " + name; - + + assert realValues.length <= lazyValues.size() + : "More lazy values then real values for field: " + name; + for (int i = 0; i < lazyValues.size(); i++) { LazyField f = lazyValues.get(i); if (null != f) { @@ -124,10 +120,10 @@ public class LazyDocument { } } - - /** + /** * Lazy-loaded field - * @lucene.internal + * + * @lucene.internal */ public class LazyField implements IndexableField { private String name; @@ -139,9 +135,10 @@ public class LazyDocument { this.fieldNum = fieldNum; } - /** + /** * non-private for test only access - * @lucene.internal + * + * @lucene.internal */ public boolean hasBeenLoaded() { return null != realValue; @@ -152,8 +149,8 @@ public class LazyDocument { fetchRealValues(name, fieldNum); } assert hasBeenLoaded() : "field value was not lazy loaded"; - assert realValue.name().equals(name()) : - "realvalue name != name: " + realValue.name() + " != " + name(); + assert realValue.name().equals(name()) + : "realvalue name != name: " + realValue.name() + " != " + name(); return realValue; } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/document/package-info.java b/lucene/misc/src/java/org/apache/lucene/misc/document/package-info.java index 0e37415bd6c..ca97b99d39a 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/document/package-info.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/document/package-info.java @@ -16,4 +16,4 @@ */ /** Misc extensions of the Document/Field API. */ -package org.apache.lucene.misc.document; \ No newline at end of file +package org.apache.lucene.misc.document; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/index/IndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/misc/index/IndexSplitter.java index 93888407e14..20e8a8a0c46 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/index/IndexSplitter.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/index/IndexSplitter.java @@ -27,7 +27,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Locale; - import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentInfos; @@ -35,21 +34,15 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.SuppressForbidden; /** - * Command-line tool that enables listing segments in an - * index, copying specific segments to another index, and - * deleting segments from an index. + * Command-line tool that enables listing segments in an index, copying specific segments to another + * index, and deleting segments from an index. * - *

This tool does file-level copying of segments files. - * This means it's unable to split apart a single segment - * into multiple segments. For example if your index is a - * single segment, this tool won't help. Also, it does basic - * file-level copying (using simple - * File{In,Out}putStream) so it will not work with non - * FSDirectory Directory impls.

+ *

This tool does file-level copying of segments files. This means it's unable to split apart a + * single segment into multiple segments. For example if your index is a single segment, this tool + * won't help. Also, it does basic file-level copying (using simple File{In,Out}putStream) so it + * will not work with non FSDirectory Directory impls. * - * @lucene.experimental You can easily - * accidentally remove segments from your index so be - * careful! + * @lucene.experimental You can easily accidentally remove segments from your index so be careful! */ public class IndexSplitter { public final SegmentInfos infos; @@ -61,18 +54,15 @@ public class IndexSplitter { @SuppressForbidden(reason = "System.out required: command line tool") public static void main(String[] args) throws Exception { if (args.length < 2) { - System.err - .println("Usage: IndexSplitter -l (list the segments and their sizes)"); + System.err.println("Usage: IndexSplitter -l (list the segments and their sizes)"); System.err.println("IndexSplitter +"); - System.err - .println("IndexSplitter -d (delete the following segments)"); + System.err.println("IndexSplitter -d (delete the following segments)"); return; } Path srcDir = Paths.get(args[0]); IndexSplitter is = new IndexSplitter(srcDir); if (!Files.exists(srcDir)) { - throw new Exception("srcdir:" + srcDir.toAbsolutePath() - + " doesn't exist"); + throw new Exception("srcdir:" + srcDir.toAbsolutePath() + " doesn't exist"); } if (args[1].equals("-l")) { is.listSegments(); @@ -91,7 +81,7 @@ public class IndexSplitter { is.split(targetDir, segs.toArray(new String[0])); } } - + public IndexSplitter(Path dir) throws IOException { this.dir = dir; fsDir = FSDirectory.open(dir); @@ -100,7 +90,8 @@ public class IndexSplitter { @SuppressForbidden(reason = "System.out required: command line tool") public void listSegments() throws IOException { - DecimalFormat formatter = new DecimalFormat("###,###.###", DecimalFormatSymbols.getInstance(Locale.ROOT)); + DecimalFormat formatter = + new DecimalFormat("###,###.###", DecimalFormatSymbols.getInstance(Locale.ROOT)); for (int x = 0; x < infos.size(); x++) { SegmentCommitInfo info = infos.info(x); String sizeStr = formatter.format(info.sizeInBytes()); @@ -110,8 +101,7 @@ public class IndexSplitter { private SegmentCommitInfo getInfo(String name) { for (int x = 0; x < infos.size(); x++) { - if (name.equals(infos.info(x).info.name)) - return infos.info(x); + if (name.equals(infos.info(x).info.name)) return infos.info(x); } return null; } @@ -134,11 +124,28 @@ public class IndexSplitter { SegmentCommitInfo infoPerCommit = getInfo(n); SegmentInfo info = infoPerCommit.info; // Same info just changing the dir: - SegmentInfo newInfo = new SegmentInfo(destFSDir, info.getVersion(), info.getMinVersion(), info.name, info.maxDoc(), - info.getUseCompoundFile(), info.getCodec(), info.getDiagnostics(), info.getId(), Collections.emptyMap(), null); - destInfos.add(new SegmentCommitInfo(newInfo, infoPerCommit.getDelCount(), infoPerCommit.getSoftDelCount(), - infoPerCommit.getDelGen(), infoPerCommit.getFieldInfosGen(), - infoPerCommit.getDocValuesGen(), infoPerCommit.getId())); + SegmentInfo newInfo = + new SegmentInfo( + destFSDir, + info.getVersion(), + info.getMinVersion(), + info.name, + info.maxDoc(), + info.getUseCompoundFile(), + info.getCodec(), + info.getDiagnostics(), + info.getId(), + Collections.emptyMap(), + null); + destInfos.add( + new SegmentCommitInfo( + newInfo, + infoPerCommit.getDelCount(), + infoPerCommit.getSoftDelCount(), + infoPerCommit.getDelGen(), + infoPerCommit.getFieldInfosGen(), + infoPerCommit.getDocValuesGen(), + infoPerCommit.getId())); // now copy files over Collection files = infoPerCommit.files(); for (final String srcName : files) { diff --git a/lucene/misc/src/java/org/apache/lucene/misc/index/MultiPassIndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/misc/index/MultiPassIndexSplitter.java index 2f4c001ae15..5c40c42fa7c 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/index/MultiPassIndexSplitter.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/index/MultiPassIndexSplitter.java @@ -22,7 +22,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.index.BaseCompositeReader; import org.apache.lucene.index.CodecReader; import org.apache.lucene.index.DirectoryReader; @@ -36,37 +35,36 @@ import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SlowCodecReaderWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.Bits; +import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.SuppressForbidden; /** - * This tool splits input index into multiple equal parts. The method employed - * here uses {@link IndexWriter#addIndexes(CodecReader[])} where the input data - * comes from the input index with artificially applied deletes to the document - * id-s that fall outside the selected partition. - *

Note 1: Deletes are only applied to a buffered list of deleted docs and - * don't affect the source index - this tool works also with read-only indexes. - *

Note 2: the disadvantage of this tool is that source index needs to be - * read as many times as there are parts to be created, hence the name of this - * tool. + * This tool splits input index into multiple equal parts. The method employed here uses {@link + * IndexWriter#addIndexes(CodecReader[])} where the input data comes from the input index with + * artificially applied deletes to the document id-s that fall outside the selected partition. * - *

NOTE: this tool is unaware of documents added - * atomically via {@link IndexWriter#addDocuments} or {@link - * IndexWriter#updateDocuments}, which means it can easily - * break up such document groups. + *

Note 1: Deletes are only applied to a buffered list of deleted docs and don't affect the + * source index - this tool works also with read-only indexes. + * + *

Note 2: the disadvantage of this tool is that source index needs to be read as many times as + * there are parts to be created, hence the name of this tool. + * + *

NOTE: this tool is unaware of documents added atomically via {@link + * IndexWriter#addDocuments} or {@link IndexWriter#updateDocuments}, which means it can easily break + * up such document groups. */ @SuppressForbidden(reason = "System.out required: command line tool") public class MultiPassIndexSplitter { - + /** * Split source index into multiple parts. - * @param in source index, can have deletions, can have - * multiple segments (or multiple readers). + * + * @param in source index, can have deletions, can have multiple segments (or multiple readers). * @param outputs list of directories where the output parts will be stored. - * @param seq if true, then the source index will be split into equal - * increasing ranges of document id-s. If false, source document id-s will be - * assigned in a deterministic round-robin fashion to one of the output splits. + * @param seq if true, then the source index will be split into equal increasing ranges of + * document id-s. If false, source document id-s will be assigned in a deterministic + * round-robin fashion to one of the output splits. * @throws IOException If there is a low-level I/O error */ public void split(IndexReader in, Directory[] outputs, boolean seq) throws IOException { @@ -107,8 +105,8 @@ public class MultiPassIndexSplitter { } } } - IndexWriter w = new IndexWriter(outputs[i], new IndexWriterConfig(null) - .setOpenMode(OpenMode.CREATE)); + IndexWriter w = + new IndexWriter(outputs[i], new IndexWriterConfig(null).setOpenMode(OpenMode.CREATE)); System.err.println("Writing part " + (i + 1) + " ..."); // pass the subreaders directly, as our wrapper's numDocs/hasDeletetions are not up-to-date final List sr = input.getSequentialSubReadersWrapper(); @@ -117,11 +115,12 @@ public class MultiPassIndexSplitter { } System.err.println("Done."); } - + @SuppressWarnings("deprecation") public static void main(String[] args) throws Exception { if (args.length < 5) { - System.err.println("Usage: MultiPassIndexSplitter -out -num [-seq] [ -num [-seq] [ { + + /** This class emulates deletions on the underlying index. */ + private static final class FakeDeleteIndexReader + extends BaseCompositeReader { public FakeDeleteIndexReader(IndexReader reader) throws IOException { super(initSubReaders(reader)); } - - private static FakeDeleteLeafIndexReader[] initSubReaders(IndexReader reader) throws IOException { + + private static FakeDeleteLeafIndexReader[] initSubReaders(IndexReader reader) + throws IOException { final List leaves = reader.leaves(); final FakeDeleteLeafIndexReader[] subs = new FakeDeleteLeafIndexReader[leaves.size()]; int i = 0; @@ -201,13 +200,13 @@ public class MultiPassIndexSplitter { } return subs; } - + public void deleteDocument(int docID) { final int i = readerIndex(docID); getSequentialSubReaders().get(i).deleteDocument(docID - readerBase(i)); } - public void undeleteAll() { + public void undeleteAll() { for (FakeDeleteLeafIndexReader r : getSequentialSubReaders()) { r.undeleteAll(); } @@ -228,7 +227,7 @@ public class MultiPassIndexSplitter { // no need to override numDocs/hasDeletions, // as we pass the subreaders directly to IW.addIndexes(). } - + private static final class FakeDeleteLeafIndexReader extends FilterCodecReader { FixedBitSet liveDocs; @@ -242,7 +241,7 @@ public class MultiPassIndexSplitter { return liveDocs.cardinality(); } - public void undeleteAll() { + public void undeleteAll() { final int maxDoc = in.maxDoc(); liveDocs = new FixedBitSet(in.maxDoc()); if (in.hasDeletions()) { diff --git a/lucene/misc/src/java/org/apache/lucene/misc/index/PKIndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/misc/index/PKIndexSplitter.java index aef5c13e2e0..789db6646d6 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/index/PKIndexSplitter.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/index/PKIndexSplitter.java @@ -18,7 +18,6 @@ package org.apache.lucene.misc.index; import java.io.IOException; import java.util.List; - import org.apache.lucene.index.CodecReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterCodecReader; @@ -40,10 +39,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.IOUtils; -/** - * Split an index based on a {@link Query}. - */ - +/** Split an index based on a {@link Query}. */ public class PKIndexSplitter { private final Query docsInFirstIndex; private final Directory input; @@ -51,21 +47,26 @@ public class PKIndexSplitter { private final Directory dir2; private final IndexWriterConfig config1; private final IndexWriterConfig config2; - + /** - * Split an index based on a {@link Query}. All documents that match the query - * are sent to dir1, remaining ones to dir2. + * Split an index based on a {@link Query}. All documents that match the query are sent to dir1, + * remaining ones to dir2. */ public PKIndexSplitter(Directory input, Directory dir1, Directory dir2, Query docsInFirstIndex) { this(input, dir1, dir2, docsInFirstIndex, newDefaultConfig(), newDefaultConfig()); } - + private static IndexWriterConfig newDefaultConfig() { return new IndexWriterConfig(null).setOpenMode(OpenMode.CREATE); } - - public PKIndexSplitter(Directory input, Directory dir1, - Directory dir2, Query docsInFirstIndex, IndexWriterConfig config1, IndexWriterConfig config2) { + + public PKIndexSplitter( + Directory input, + Directory dir1, + Directory dir2, + Query docsInFirstIndex, + IndexWriterConfig config1, + IndexWriterConfig config2) { this.input = input; this.dir1 = dir1; this.dir2 = dir2; @@ -73,23 +74,32 @@ public class PKIndexSplitter { this.config1 = config1; this.config2 = config2; } - + /** - * Split an index based on a given primary key term - * and a 'middle' term. If the middle term is present, it's - * sent to dir2. + * Split an index based on a given primary key term and a 'middle' term. If the middle term is + * present, it's sent to dir2. */ public PKIndexSplitter(Directory input, Directory dir1, Directory dir2, Term midTerm) { - this(input, dir1, dir2, - new TermRangeQuery(midTerm.field(), null, midTerm.bytes(), true, false)); + this( + input, dir1, dir2, new TermRangeQuery(midTerm.field(), null, midTerm.bytes(), true, false)); } - - public PKIndexSplitter(Directory input, Directory dir1, - Directory dir2, Term midTerm, IndexWriterConfig config1, IndexWriterConfig config2) { - this(input, dir1, dir2, - new TermRangeQuery(midTerm.field(), null, midTerm.bytes(), true, false), config1, config2); + + public PKIndexSplitter( + Directory input, + Directory dir1, + Directory dir2, + Term midTerm, + IndexWriterConfig config1, + IndexWriterConfig config2) { + this( + input, + dir1, + dir2, + new TermRangeQuery(midTerm.field(), null, midTerm.bytes(), true, false), + config1, + config2); } - + public void split() throws IOException { boolean success = false; DirectoryReader reader = DirectoryReader.open(input); @@ -106,15 +116,22 @@ public class PKIndexSplitter { } } } - - private void createIndex(IndexWriterConfig config, Directory target, DirectoryReader reader, Query preserveFilter, boolean negateFilter) throws IOException { + + private void createIndex( + IndexWriterConfig config, + Directory target, + DirectoryReader reader, + Query preserveFilter, + boolean negateFilter) + throws IOException { boolean success = false; final IndexWriter w = new IndexWriter(target, config); try { final IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); preserveFilter = searcher.rewrite(preserveFilter); - final Weight preserveWeight = searcher.createWeight(preserveFilter, ScoreMode.COMPLETE_NO_SCORES, 1); + final Weight preserveWeight = + searcher.createWeight(preserveFilter, ScoreMode.COMPLETE_NO_SCORES, 1); final List leaves = reader.leaves(); final CodecReader[] subReaders = new CodecReader[leaves.size()]; int i = 0; @@ -131,12 +148,13 @@ public class PKIndexSplitter { } } } - + private static class DocumentFilteredLeafIndexReader extends FilterCodecReader { final Bits liveDocs; final int numDocs; - - public DocumentFilteredLeafIndexReader(LeafReaderContext context, Weight preserveWeight, boolean negateFilter) throws IOException { + + public DocumentFilteredLeafIndexReader( + LeafReaderContext context, Weight preserveWeight, boolean negateFilter) throws IOException { // our cast is ok, since we open the Directory. super((CodecReader) context.reader()); final int maxDoc = in.maxDoc(); @@ -165,12 +183,12 @@ public class PKIndexSplitter { this.liveDocs = bits; this.numDocs = bits.cardinality(); } - + @Override public int numDocs() { return numDocs; } - + @Override public Bits getLiveDocs() { return liveDocs; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/index/package-info.java b/lucene/misc/src/java/org/apache/lucene/misc/index/package-info.java index 3db8b5016e9..de44673095e 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/index/package-info.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/index/package-info.java @@ -16,4 +16,4 @@ */ /** Misc index tools and index support. */ -package org.apache.lucene.misc.index; \ No newline at end of file +package org.apache.lucene.misc.index; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/package-info.java b/lucene/misc/src/java/org/apache/lucene/misc/package-info.java index 6067a6e54aa..3fd5f32c477 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/package-info.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Miscellaneous index tools. - */ +/** Miscellaneous index tools. */ package org.apache.lucene.misc; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/search/DiversifiedTopDocsCollector.java b/lucene/misc/src/java/org/apache/lucene/misc/search/DiversifiedTopDocsCollector.java index 5e88cd89914..d38022718b1 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/search/DiversifiedTopDocsCollector.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/search/DiversifiedTopDocsCollector.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Stack; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.misc.search.DiversifiedTopDocsCollector.ScoreDocKey; @@ -34,39 +33,38 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.PriorityQueue; /** - * A {@link TopDocsCollector} that controls diversity in results by ensuring no - * more than maxHitsPerKey results from a common source are collected in the - * final results. - * - * An example application might be a product search in a marketplace where no - * more than 3 results per retailer are permitted in search results. - * - *

- * To compare behaviour with other forms of collector, a useful analogy might be - * the problem of making a compilation album of 1967's top hit records: + * A {@link TopDocsCollector} that controls diversity in results by ensuring no more than + * maxHitsPerKey results from a common source are collected in the final results. + * + *

An example application might be a product search in a marketplace where no more than 3 results + * per retailer are permitted in search results. + * + *

To compare behaviour with other forms of collector, a useful analogy might be the problem of + * making a compilation album of 1967's top hit records: + * *

    - *
  1. A vanilla query's results might look like a "Best of the Beatles" album - - * high quality but not much diversity
  2. - *
  3. A GroupingSearch would produce the equivalent of "The 10 top-selling - * artists of 1967 - some killer and quite a lot of filler"
  4. - *
  5. A "diversified" query would be the top 20 hit records of that year - with - * a max of 3 Beatles hits in order to maintain diversity
  6. + *
  7. A vanilla query's results might look like a "Best of the Beatles" album - high quality but + * not much diversity + *
  8. A GroupingSearch would produce the equivalent of "The 10 top-selling artists of 1967 - some + * killer and quite a lot of filler" + *
  9. A "diversified" query would be the top 20 hit records of that year - with a max of 3 + * Beatles hits in order to maintain diversity *
+ * * This collector improves on the "GroupingSearch" type queries by + * *
    - *
  • Working in one pass over the data
  • - *
  • Not requiring the client to guess how many groups are required
  • - *
  • Removing low-scoring "filler" which sits at the end of each group's hits
  • + *
  • Working in one pass over the data + *
  • Not requiring the client to guess how many groups are required + *
  • Removing low-scoring "filler" which sits at the end of each group's hits *
- * - * This is an abstract class and subclasses have to provide a source of keys for - * documents which is then used to help identify duplicate sources. - * + * + * This is an abstract class and subclasses have to provide a source of keys for documents which is + * then used to help identify duplicate sources. + * * @lucene.experimental - * */ -public abstract class DiversifiedTopDocsCollector extends - TopDocsCollector { +public abstract class DiversifiedTopDocsCollector extends TopDocsCollector { ScoreDocKey spare; private ScoreDocKeyQueue globalQueue; private int numHits; @@ -83,9 +81,7 @@ public abstract class DiversifiedTopDocsCollector extends this.maxNumPerKey = maxHitsPerKey; } - /** - * Get a source of values used for grouping keys - */ + /** Get a source of values used for grouping keys */ protected abstract NumericDocValues getKeys(LeafReaderContext context); @Override @@ -102,10 +98,9 @@ public abstract class DiversifiedTopDocsCollector extends return new TopDocs(new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), results); } - protected ScoreDocKey insert(ScoreDocKey addition, int docBase, - NumericDocValues keys) throws IOException { - if ((globalQueue.size() >= numHits) - && (globalQueue.lessThan(addition, globalQueue.top()))) { + protected ScoreDocKey insert(ScoreDocKey addition, int docBase, NumericDocValues keys) + throws IOException { + if ((globalQueue.size() >= numHits) && (globalQueue.lessThan(addition, globalQueue.top()))) { // Queue is full and proposed addition is not a globally // competitive score return addition; @@ -176,8 +171,7 @@ public abstract class DiversifiedTopDocsCollector extends } @Override - public LeafCollector getLeafCollector(LeafReaderContext context) - throws IOException { + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { final int base = context.docBase; final NumericDocValues keySource = getKeys(context); @@ -219,18 +213,17 @@ public abstract class DiversifiedTopDocsCollector extends @Override protected final boolean lessThan(ScoreDocKey hitA, ScoreDocKey hitB) { - if (hitA.score == hitB.score) + if (hitA.score == hitB.score) { return hitA.doc > hitB.doc; - else + } else { return hitA.score < hitB.score; + } } } - // - /** - * An extension to ScoreDoc that includes a key used for grouping purposes - */ - static public class ScoreDocKey extends ScoreDoc { + // + /** An extension to ScoreDoc that includes a key used for grouping purposes */ + public static class ScoreDocKey extends ScoreDoc { Long key; protected ScoreDocKey(int doc, float score) { @@ -245,7 +238,5 @@ public abstract class DiversifiedTopDocsCollector extends public String toString() { return "key:" + key + " doc=" + doc + " s=" + score; } - } - } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/search/DocValuesStats.java b/lucene/misc/src/java/org/apache/lucene/misc/search/DocValuesStats.java index 323c1e45740..c7c5d192e74 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/search/DocValuesStats.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/search/DocValuesStats.java @@ -17,7 +17,6 @@ package org.apache.lucene.misc.search; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedDocValues; @@ -44,17 +43,17 @@ public abstract class DocValuesStats { } /** - * Called after {@link #accumulate(int)} was processed and verified that the document has a value for - * the field. Implementations should update the statistics based on the value of the current document. + * Called after {@link #accumulate(int)} was processed and verified that the document has a value + * for the field. Implementations should update the statistics based on the value of the current + * document. * - * @param count - * the updated number of documents with value for this field. + * @param count the updated number of documents with value for this field. */ protected abstract void doAccumulate(int count) throws IOException; /** - * Initializes this object with the given reader context. Returns whether stats can be computed for this segment (i.e. - * it does have the requested DocValues field). + * Initializes this object with the given reader context. Returns whether stats can be computed + * for this segment (i.e. it does have the requested DocValues field). */ protected abstract boolean init(LeafReaderContext context) throws IOException; @@ -100,7 +99,7 @@ public abstract class DocValuesStats { } /** Holds statistics for a numeric DocValues field. */ - public static abstract class NumericDocValuesStats extends DocValuesStats { + public abstract static class NumericDocValuesStats extends DocValuesStats { protected double mean = 0.0; protected double variance = 0.0; @@ -138,7 +137,10 @@ public abstract class DocValuesStats { return Math.sqrt(variance()); } - /** Returns the sum of values of the field. Note that if the values are large, the {@code sum} might overflow. */ + /** + * Returns the sum of values of the field. Note that if the values are large, the {@code sum} + * might overflow. + */ public abstract T sum(); } @@ -176,7 +178,8 @@ public abstract class DocValuesStats { /** Holds DocValues statistics for a numeric field storing {@code double} values. */ public static final class DoubleDocValuesStats extends NumericDocValuesStats { - // To avoid boxing 'double' to 'Double' while the sum is computed, declare it as private variable. + // To avoid boxing 'double' to 'Double' while the sum is computed, declare it as private + // variable. private double sum = 0; public DoubleDocValuesStats(String field) { @@ -205,7 +208,8 @@ public abstract class DocValuesStats { } /** Holds statistics for a sorted-numeric DocValues field. */ - public static abstract class SortedNumericDocValuesStats extends DocValuesStats { + public abstract static class SortedNumericDocValuesStats + extends DocValuesStats { protected long valuesCount = 0; protected double mean = 0.0; @@ -249,7 +253,10 @@ public abstract class DocValuesStats { return valuesCount; } - /** Returns the sum of values of the field. Note that if the values are large, the {@code sum} might overflow. */ + /** + * Returns the sum of values of the field. Note that if the values are large, the {@code sum} + * might overflow. + */ public abstract T sum(); } @@ -276,7 +283,8 @@ public abstract class DocValuesStats { } sum += val; double oldMean = mean; - // for correct "running average computation", increase valuesCount with each value, rather than once before the + // for correct "running average computation", increase valuesCount with each value, rather + // than once before the // loop stats. ++valuesCount; mean += (val - mean) / valuesCount; @@ -293,7 +301,8 @@ public abstract class DocValuesStats { /** Holds DocValues statistics for a sorted-numeric field storing {@code double} values. */ public static final class SortedDoubleDocValuesStats extends SortedNumericDocValuesStats { - // To avoid boxing 'double' to 'Double' while the sum is computed, declare it as private variable. + // To avoid boxing 'double' to 'Double' while the sum is computed, declare it as private + // variable. private double sum = 0; public SortedDoubleDocValuesStats(String field) { @@ -313,7 +322,8 @@ public abstract class DocValuesStats { } sum += val; double oldMean = mean; - // for correct "running average computation", increase valuesCount with each value, rather than once before the + // for correct "running average computation", increase valuesCount with each value, rather + // than once before the // loop stats. ++valuesCount; mean += (val - mean) / valuesCount; @@ -405,5 +415,4 @@ public abstract class DocValuesStats { } } } - -} \ No newline at end of file +} diff --git a/lucene/misc/src/java/org/apache/lucene/misc/search/DocValuesStatsCollector.java b/lucene/misc/src/java/org/apache/lucene/misc/search/DocValuesStatsCollector.java index e429ba43515..29f31651e5d 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/search/DocValuesStatsCollector.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/search/DocValuesStatsCollector.java @@ -17,7 +17,6 @@ package org.apache.lucene.misc.search; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; @@ -29,7 +28,9 @@ public class DocValuesStatsCollector implements Collector { private final DocValuesStats stats; - /** Creates a collector to compute statistics for a DocValues field using the given {@code stats}. */ + /** + * Creates a collector to compute statistics for a DocValues field using the given {@code stats}. + */ public DocValuesStatsCollector(DocValuesStats stats) { this.stats = stats; } @@ -38,9 +39,11 @@ public class DocValuesStatsCollector implements Collector { public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { boolean shouldProcess = stats.init(context); if (!shouldProcess) { - // Stats cannot be computed for this segment, therefore consider all matching documents as a 'miss'. + // Stats cannot be computed for this segment, therefore consider all matching documents as a + // 'miss'. return new LeafCollector() { - @Override public void setScorer(Scorable scorer) throws IOException {} + @Override + public void setScorer(Scorable scorer) throws IOException {} @Override public void collect(int doc) throws IOException { @@ -51,7 +54,8 @@ public class DocValuesStatsCollector implements Collector { } return new LeafCollector() { - @Override public void setScorer(Scorable scorer) throws IOException {} + @Override + public void setScorer(Scorable scorer) throws IOException {} @Override public void collect(int doc) throws IOException { @@ -64,5 +68,4 @@ public class DocValuesStatsCollector implements Collector { public ScoreMode scoreMode() { return ScoreMode.COMPLETE_NO_SCORES; } - } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/search/MemoryAccountingBitsetCollector.java b/lucene/misc/src/java/org/apache/lucene/misc/search/MemoryAccountingBitsetCollector.java index 22b17d9d4a3..21e3c58537a 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/search/MemoryAccountingBitsetCollector.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/search/MemoryAccountingBitsetCollector.java @@ -18,7 +18,6 @@ package org.apache.lucene.misc.search; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.misc.CollectorMemoryTracker; import org.apache.lucene.search.ScoreMode; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/search/package-info.java b/lucene/misc/src/java/org/apache/lucene/misc/search/package-info.java index 912b4780c9a..0581271db6b 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/search/package-info.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/search/package-info.java @@ -16,4 +16,4 @@ */ /** Misc search implementations. */ -package org.apache.lucene.misc.search; \ No newline at end of file +package org.apache.lucene.misc.search; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/search/similarity/LegacyBM25Similarity.java b/lucene/misc/src/java/org/apache/lucene/misc/search/similarity/LegacyBM25Similarity.java index a005dcc4d1a..13387ead238 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/search/similarity/LegacyBM25Similarity.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/search/similarity/LegacyBM25Similarity.java @@ -23,11 +23,10 @@ import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; /** - * Similarity that behaves like {@link BM25Similarity} while also applying - * the k1+1 factor to the numerator of the scoring formula + * Similarity that behaves like {@link BM25Similarity} while also applying the k1+1 factor to the + * numerator of the scoring formula * * @see BM25Similarity - * * @deprecated {@link BM25Similarity} should be used instead */ @Deprecated @@ -35,10 +34,12 @@ public final class LegacyBM25Similarity extends Similarity { private final BM25Similarity bm25Similarity; - /** BM25 with these default values: + /** + * BM25 with these default values: + * *
    - *
  • {@code k1 = 1.2}
  • - *
  • {@code b = 0.75}
  • + *
  • {@code k1 = 1.2} + *
  • {@code b = 0.75} *
*/ public LegacyBM25Similarity() { @@ -47,10 +48,11 @@ public final class LegacyBM25Similarity extends Similarity { /** * BM25 with the supplied parameter values. + * * @param k1 Controls non-linear term frequency normalization (saturation). * @param b Controls to what degree document length normalizes tf values. - * @throws IllegalArgumentException if {@code k1} is infinite or negative, or if {@code b} is - * not within the range {@code [0..1]} + * @throws IllegalArgumentException if {@code k1} is infinite or negative, or if {@code b} is not + * within the range {@code [0..1]} */ public LegacyBM25Similarity(float k1, float b) { this.bm25Similarity = new BM25Similarity(k1, b); @@ -62,12 +64,14 @@ public final class LegacyBM25Similarity extends Similarity { } @Override - public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + public SimScorer scorer( + float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { return bm25Similarity.scorer(boost * (1 + bm25Similarity.getK1()), collectionStats, termStats); } /** * Returns the k1 parameter + * * @see #LegacyBM25Similarity(float, float) */ public final float getK1() { @@ -76,21 +80,24 @@ public final class LegacyBM25Similarity extends Similarity { /** * Returns the b parameter + * * @see #LegacyBM25Similarity(float, float) */ public final float getB() { return bm25Similarity.getB(); } - /** Sets whether overlap tokens (Tokens with 0 position increment) are - * ignored when computing norm. By default this is true, meaning overlap - * tokens do not count when computing norms. */ + /** + * Sets whether overlap tokens (Tokens with 0 position increment) are ignored when computing norm. + * By default this is true, meaning overlap tokens do not count when computing norms. + */ public void setDiscountOverlaps(boolean v) { bm25Similarity.setDiscountOverlaps(v); } /** * Returns true if overlap tokens are discounted from the document's length. + * * @see #setDiscountOverlaps */ public boolean getDiscountOverlaps() { diff --git a/lucene/misc/src/java/org/apache/lucene/misc/search/similarity/package-info.java b/lucene/misc/src/java/org/apache/lucene/misc/search/similarity/package-info.java index 7f722632cd5..ead622353c9 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/search/similarity/package-info.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/search/similarity/package-info.java @@ -16,4 +16,4 @@ */ /** Misc similarity implementations. */ -package org.apache.lucene.misc.search.similarity; \ No newline at end of file +package org.apache.lucene.misc.search.similarity; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/store/HardlinkCopyDirectoryWrapper.java b/lucene/misc/src/java/org/apache/lucene/misc/store/HardlinkCopyDirectoryWrapper.java index 816595b6f4e..3361c2615d4 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/store/HardlinkCopyDirectoryWrapper.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/store/HardlinkCopyDirectoryWrapper.java @@ -17,11 +17,6 @@ package org.apache.lucene.misc.store; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.store.FilterDirectory; -import org.apache.lucene.store.IOContext; - import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.FileAlreadyExistsException; @@ -30,59 +25,68 @@ import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.security.AccessController; import java.security.PrivilegedAction; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; /** - * This directory wrapper overrides {@link Directory#copyFrom(Directory, String, String, IOContext)} in order - * to optionally use a hard-link instead of a full byte by byte file copy if applicable. Hard-links are only used if the - * underlying filesystem supports it and if the {@link java.nio.file.LinkPermission} "hard" is granted. + * This directory wrapper overrides {@link Directory#copyFrom(Directory, String, String, IOContext)} + * in order to optionally use a hard-link instead of a full byte by byte file copy if applicable. + * Hard-links are only used if the underlying filesystem supports it and if the {@link + * java.nio.file.LinkPermission} "hard" is granted. * - *

NOTE: Using hard-links changes the copy semantics of - * {@link Directory#copyFrom(Directory, String, String, IOContext)}. When hard-links are used changes to the source file - * will be reflected in the target file and vice-versa. Within Lucene, files are write once and should not be modified - * after they have been written. This directory should not be used in situations where files change after they have - * been written. - *

+ *

NOTE: Using hard-links changes the copy semantics of {@link + * Directory#copyFrom(Directory, String, String, IOContext)}. When hard-links are used changes to + * the source file will be reflected in the target file and vice-versa. Within Lucene, files are + * write once and should not be modified after they have been written. This directory should not be + * used in situations where files change after they have been written. */ public final class HardlinkCopyDirectoryWrapper extends FilterDirectory { - /** - * Creates a new HardlinkCopyDirectoryWrapper delegating to the given directory - */ + /** Creates a new HardlinkCopyDirectoryWrapper delegating to the given directory */ public HardlinkCopyDirectoryWrapper(Directory in) { super(in); } @Override - public void copyFrom(Directory from, String srcFile, String destFile, IOContext context) throws IOException { + public void copyFrom(Directory from, String srcFile, String destFile, IOContext context) + throws IOException { final Directory fromUnwrapped = FilterDirectory.unwrap(from); final Directory toUnwrapped = FilterDirectory.unwrap(this); - // try to unwrap to FSDirectory - we might be able to just create hard-links of these files and save copying + // try to unwrap to FSDirectory - we might be able to just create hard-links of these files and + // save copying // the entire file. Exception suppressedException = null; boolean tryCopy = true; - if (fromUnwrapped instanceof FSDirectory - && toUnwrapped instanceof FSDirectory) { + if (fromUnwrapped instanceof FSDirectory && toUnwrapped instanceof FSDirectory) { final Path fromPath = ((FSDirectory) fromUnwrapped).getDirectory(); final Path toPath = ((FSDirectory) toUnwrapped).getDirectory(); if (Files.isReadable(fromPath.resolve(srcFile)) && Files.isWritable(toPath)) { // only try hardlinks if we have permission to access the files // if not super.copyFrom() will give us the right exceptions - suppressedException = AccessController.doPrivileged((PrivilegedAction) () -> { - try { - Files.createLink(toPath.resolve(destFile), fromPath.resolve(srcFile)); - } catch (FileNotFoundException | NoSuchFileException | FileAlreadyExistsException ex) { - return ex; // in these cases we bubble up since it's a true error condition. - } catch (IOException - | UnsupportedOperationException // if the FS doesn't support hard-links - | SecurityException ex // we don't have permission to use hard-links just fall back to byte copy - ) { - // hard-links are not supported or the files are on different filesystems - // we could go deeper and check if their filesstores are the same and opt - // out earlier but for now we just fall back to normal file-copy - return ex; - } - return null; - }); + suppressedException = + AccessController.doPrivileged( + (PrivilegedAction) + () -> { + try { + Files.createLink(toPath.resolve(destFile), fromPath.resolve(srcFile)); + } catch (FileNotFoundException + | NoSuchFileException + | FileAlreadyExistsException ex) { + return ex; // in these cases we bubble up since it's a true error condition. + } catch (IOException + // if the FS doesn't support hard-links + | UnsupportedOperationException + // we don't have permission to use hard-links just fall back to byte copy + | SecurityException ex) { + // hard-links are not supported or the files are on different filesystems + // we could go deeper and check if their filesstores are the same and opt + // out earlier but for now we just fall back to normal file-copy + return ex; + } + return null; + }); tryCopy = suppressedException != null; } } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/store/NativePosixUtil.java b/lucene/misc/src/java/org/apache/lucene/misc/store/NativePosixUtil.java index c119e521f53..04fe9bffb9b 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/store/NativePosixUtil.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/store/NativePosixUtil.java @@ -16,37 +16,40 @@ */ package org.apache.lucene.misc.store; -import java.io.IOException; import java.io.FileDescriptor; +import java.io.IOException; import java.nio.ByteBuffer; -/** - * Provides JNI access to native methods such as madvise() for - * {@link NativeUnixDirectory} - */ +/** Provides JNI access to native methods such as madvise() for {@link NativeUnixDirectory} */ public final class NativePosixUtil { - public final static int NORMAL = 0; - public final static int SEQUENTIAL = 1; - public final static int RANDOM = 2; - public final static int WILLNEED = 3; - public final static int DONTNEED = 4; - public final static int NOREUSE = 5; + public static final int NORMAL = 0; + public static final int SEQUENTIAL = 1; + public static final int RANDOM = 2; + public static final int WILLNEED = 3; + public static final int DONTNEED = 4; + public static final int NOREUSE = 5; static { System.loadLibrary("LuceneNativeIO"); } - private static native int posix_fadvise(FileDescriptor fd, long offset, long len, int advise) throws IOException; - public static native int posix_madvise(ByteBuffer buf, int advise) throws IOException; - public static native int madvise(ByteBuffer buf, int advise) throws IOException; - public static native FileDescriptor open_direct(String filename, boolean read) throws IOException; - public static native long pread(FileDescriptor fd, long pos, ByteBuffer byteBuf) throws IOException; + private static native int posix_fadvise(FileDescriptor fd, long offset, long len, int advise) + throws IOException; - public static void advise(FileDescriptor fd, long offset, long len, int advise) throws IOException { + public static native int posix_madvise(ByteBuffer buf, int advise) throws IOException; + + public static native int madvise(ByteBuffer buf, int advise) throws IOException; + + public static native FileDescriptor open_direct(String filename, boolean read) throws IOException; + + public static native long pread(FileDescriptor fd, long pos, ByteBuffer byteBuf) + throws IOException; + + public static void advise(FileDescriptor fd, long offset, long len, int advise) + throws IOException { final int code = posix_fadvise(fd, offset, len, advise); if (code != 0) { throw new RuntimeException("posix_fadvise failed code=" + code); } } } - diff --git a/lucene/misc/src/java/org/apache/lucene/misc/store/NativeUnixDirectory.java b/lucene/misc/src/java/org/apache/lucene/misc/store/NativeUnixDirectory.java index 5057a0a3f22..fd7fae1d0d9 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/store/NativeUnixDirectory.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/store/NativeUnixDirectory.java @@ -24,8 +24,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.file.Path; - -import org.apache.lucene.misc.store.NativePosixUtil; import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; @@ -45,103 +43,111 @@ import org.apache.lucene.util.SuppressForbidden; // IO when context is merge /** - * A {@link Directory} implementation for all Unixes that uses - * DIRECT I/O to bypass OS level IO caching during - * merging. For all other cases (searching, writing) we delegate - * to the provided Directory instance. + * A {@link Directory} implementation for all Unixes that uses DIRECT I/O to bypass OS level IO + * caching during merging. For all other cases (searching, writing) we delegate to the provided + * Directory instance. * - *

See Overview - * for more details. + *

See Overview for more + * details. * - *

To use this you must compile - * NativePosixUtil.cpp (exposes Linux-specific APIs through - * JNI) for your platform, by running ./gradlew build, and then putting the resulting - * libLuceneNativeIO.so or libLuceneNativeIO.dylib - * (from lucene/misc/native/build/lib/release/platform/) onto your dynamic - * linker search path. + *

To use this you must compile NativePosixUtil.cpp (exposes Linux-specific APIs through JNI) for + * your platform, by running ./gradlew build, and then putting the resulting + * libLuceneNativeIO.so or libLuceneNativeIO.dylib (from + * lucene/misc/native/build/lib/release/platform/) onto your dynamic linker search path. * - *

WARNING: this code is very new and quite easily - * could contain horrible bugs. For example, here's one - * known issue: if you use seek in IndexOutput, and then - * write more than one buffer's worth of bytes, then the - * file will be wrong. Lucene does not do this today (only writes - * small number of bytes after seek), but that may change. + *

WARNING: this code is very new and quite easily could contain horrible bugs. For + * example, here's one known issue: if you use seek in IndexOutput, and then write more + * than one buffer's worth of bytes, then the file will be wrong. Lucene does not do this today + * (only writes small number of bytes after seek), but that may change. * - *

This directory passes Solr and Lucene tests on Linux - * and OS X; other Unixes should work but have not been - * tested! Use at your own risk. + *

This directory passes Solr and Lucene tests on Linux and OS X; other Unixes should work but + * have not been tested! Use at your own risk. * * @lucene.experimental */ public class NativeUnixDirectory extends FSDirectory { // TODO: this is OS dependent, but likely 512 is the LCD - private final static long ALIGN = 512; - private final static long ALIGN_NOT_MASK = ~(ALIGN-1); - - /** Default buffer size before writing to disk (256 KB); - * larger means less IO load but more RAM and direct - * buffer storage space consumed during merging. */ + private static final long ALIGN = 512; + private static final long ALIGN_NOT_MASK = ~(ALIGN - 1); - public final static int DEFAULT_MERGE_BUFFER_SIZE = 262144; + /** + * Default buffer size before writing to disk (256 KB); larger means less IO load but more RAM and + * direct buffer storage space consumed during merging. + */ + public static final int DEFAULT_MERGE_BUFFER_SIZE = 262144; - /** Default min expected merge size before direct IO is - * used (10 MB): */ - public final static long DEFAULT_MIN_BYTES_DIRECT = 10*1024*1024; + /** Default min expected merge size before direct IO is used (10 MB): */ + public static final long DEFAULT_MIN_BYTES_DIRECT = 10 * 1024 * 1024; private final int mergeBufferSize; private final long minBytesDirect; private final Directory delegate; - /** Create a new NIOFSDirectory for the named location. - * + /** + * Create a new NIOFSDirectory for the named location. + * * @param path the path of the directory * @param lockFactory to use - * @param mergeBufferSize Size of buffer to use for - * merging. See {@link #DEFAULT_MERGE_BUFFER_SIZE}. - * @param minBytesDirect Merges, or files to be opened for - * reading, smaller than this will - * not use direct IO. See {@link - * #DEFAULT_MIN_BYTES_DIRECT} + * @param mergeBufferSize Size of buffer to use for merging. See {@link + * #DEFAULT_MERGE_BUFFER_SIZE}. + * @param minBytesDirect Merges, or files to be opened for reading, smaller than this will not use + * direct IO. See {@link #DEFAULT_MIN_BYTES_DIRECT} * @param delegate fallback Directory for non-merges * @throws IOException If there is a low-level I/O error */ - public NativeUnixDirectory(Path path, int mergeBufferSize, long minBytesDirect, LockFactory lockFactory, Directory delegate) throws IOException { + public NativeUnixDirectory( + Path path, + int mergeBufferSize, + long minBytesDirect, + LockFactory lockFactory, + Directory delegate) + throws IOException { super(path, lockFactory); if ((mergeBufferSize & ALIGN) != 0) { - throw new IllegalArgumentException("mergeBufferSize must be 0 mod " + ALIGN + " (got: " + mergeBufferSize + ")"); + throw new IllegalArgumentException( + "mergeBufferSize must be 0 mod " + ALIGN + " (got: " + mergeBufferSize + ")"); } this.mergeBufferSize = mergeBufferSize; this.minBytesDirect = minBytesDirect; this.delegate = delegate; } - - /** Create a new NIOFSDirectory for the named location. - * + + /** + * Create a new NIOFSDirectory for the named location. + * * @param path the path of the directory * @param lockFactory the lock factory to use * @param delegate fallback Directory for non-merges * @throws IOException If there is a low-level I/O error */ - public NativeUnixDirectory(Path path, LockFactory lockFactory, Directory delegate) throws IOException { + public NativeUnixDirectory(Path path, LockFactory lockFactory, Directory delegate) + throws IOException { this(path, DEFAULT_MERGE_BUFFER_SIZE, DEFAULT_MIN_BYTES_DIRECT, lockFactory, delegate); - } + } - /** Create a new NIOFSDirectory for the named location with {@link FSLockFactory#getDefault()}. - * + /** + * Create a new NIOFSDirectory for the named location with {@link FSLockFactory#getDefault()}. + * * @param path the path of the directory * @param delegate fallback Directory for non-merges * @throws IOException If there is a low-level I/O error */ public NativeUnixDirectory(Path path, Directory delegate) throws IOException { - this(path, DEFAULT_MERGE_BUFFER_SIZE, DEFAULT_MIN_BYTES_DIRECT, FSLockFactory.getDefault(), delegate); - } + this( + path, + DEFAULT_MERGE_BUFFER_SIZE, + DEFAULT_MIN_BYTES_DIRECT, + FSLockFactory.getDefault(), + delegate); + } @Override public IndexInput openInput(String name, IOContext context) throws IOException { ensureOpen(); - if (context.context != Context.MERGE || context.mergeInfo.estimatedMergeBytes < minBytesDirect || fileLength(name) < minBytesDirect) { + if (context.context != Context.MERGE + || context.mergeInfo.estimatedMergeBytes < minBytesDirect + || fileLength(name) < minBytesDirect) { return delegate.openInput(name, context); } else { return new NativeUnixIndexInput(getDirectory().resolve(name), mergeBufferSize); @@ -151,7 +157,8 @@ public class NativeUnixDirectory extends FSDirectory { @Override public IndexOutput createOutput(String name, IOContext context) throws IOException { ensureOpen(); - if (context.context != Context.MERGE || context.mergeInfo.estimatedMergeBytes < minBytesDirect) { + if (context.context != Context.MERGE + || context.mergeInfo.estimatedMergeBytes < minBytesDirect) { return delegate.createOutput(name, context); } else { return new NativeUnixIndexOutput(getDirectory().resolve(name), name, mergeBufferSize); @@ -159,13 +166,13 @@ public class NativeUnixDirectory extends FSDirectory { } @SuppressForbidden(reason = "java.io.File: native API requires old-style FileDescriptor") - private final static class NativeUnixIndexOutput extends IndexOutput { + private static final class NativeUnixIndexOutput extends IndexOutput { private final ByteBuffer buffer; private final FileOutputStream fos; private final FileChannel channel; private final int bufferSize; - //private final File path; + // private final File path; private int bufferPos; private long filePos; @@ -174,10 +181,10 @@ public class NativeUnixDirectory extends FSDirectory { public NativeUnixIndexOutput(Path path, String name, int bufferSize) throws IOException { super("NativeUnixIndexOutput(path=\"" + path.toString() + "\")", name); - //this.path = path; + // this.path = path; final FileDescriptor fd = NativePosixUtil.open_direct(path.toString(), false); fos = new FileOutputStream(fd); - //fos = new FileOutputStream(path); + // fos = new FileOutputStream(path); channel = fos.getChannel(); buffer = ByteBuffer.allocateDirect(bufferSize); this.bufferSize = bufferSize; @@ -186,7 +193,8 @@ public class NativeUnixDirectory extends FSDirectory { @Override public void writeByte(byte b) throws IOException { - assert bufferPos == buffer.position(): "bufferPos=" + bufferPos + " vs buffer.position()=" + buffer.position(); + assert bufferPos == buffer.position() + : "bufferPos=" + bufferPos + " vs buffer.position()=" + buffer.position(); buffer.put(b); if (++bufferPos == bufferSize) { dump(); @@ -196,7 +204,7 @@ public class NativeUnixDirectory extends FSDirectory { @Override public void writeBytes(byte[] src, int offset, int len) throws IOException { int toWrite = len; - while(true) { + while (true) { final int left = bufferSize - bufferPos; if (left <= toWrite) { buffer.put(src, offset, left); @@ -212,11 +220,11 @@ public class NativeUnixDirectory extends FSDirectory { } } - //@Override - //public void setLength() throws IOException { + // @Override + // public void setLength() throws IOException { // TODO -- how to impl this? neither FOS nor // FileChannel provides an API? - //} + // } private void dump() throws IOException { buffer.flip(); @@ -231,14 +239,16 @@ public class NativeUnixDirectory extends FSDirectory { // must always round to next block buffer.limit((int) ((buffer.limit() + ALIGN - 1) & ALIGN_NOT_MASK)); - assert (buffer.limit() & ALIGN_NOT_MASK) == buffer.limit() : "limit=" + buffer.limit() + " vs " + (buffer.limit() & ALIGN_NOT_MASK); + assert (buffer.limit() & ALIGN_NOT_MASK) == buffer.limit() + : "limit=" + buffer.limit() + " vs " + (buffer.limit() & ALIGN_NOT_MASK); assert (filePos & ALIGN_NOT_MASK) == filePos; - //System.out.println(Thread.currentThread().getName() + ": dump to " + filePos + " limit=" + buffer.limit() + " fos=" + fos); + // System.out.println(Thread.currentThread().getName() + ": dump to " + filePos + " limit=" + + // buffer.limit() + " fos=" + fos); channel.write(buffer, filePos); filePos += bufferPos; bufferPos = 0; buffer.clear(); - //System.out.println("dump: done"); + // System.out.println("dump: done"); // TODO: the case where we'd seek'd back, wrote an // entire buffer, we must here read the next buffer; @@ -264,15 +274,16 @@ public class NativeUnixDirectory extends FSDirectory { dump(); } finally { try { - //System.out.println("direct close set len=" + fileLength + " vs " + channel.size() + " path=" + path); + // System.out.println("direct close set len=" + fileLength + " vs " + channel.size() + " + // path=" + path); channel.truncate(fileLength); - //System.out.println(" now: " + channel.size()); + // System.out.println(" now: " + channel.size()); } finally { try { channel.close(); } finally { fos.close(); - //System.out.println(" final len=" + path.length()); + // System.out.println(" final len=" + path.length()); } } } @@ -281,7 +292,7 @@ public class NativeUnixDirectory extends FSDirectory { } @SuppressForbidden(reason = "java.io.File: native API requires old-style FileDescriptor") - private final static class NativeUnixIndexInput extends IndexInput { + private static final class NativeUnixIndexInput extends IndexInput { private final ByteBuffer buffer; private final FileInputStream fis; private final FileChannel channel; @@ -303,7 +314,7 @@ public class NativeUnixDirectory extends FSDirectory { isClone = false; filePos = -bufferSize; bufferPos = bufferSize; - //System.out.println("D open " + path + " this=" + this); + // System.out.println("D open " + path + " this=" + this); } // for clone @@ -317,7 +328,7 @@ public class NativeUnixDirectory extends FSDirectory { bufferPos = bufferSize; isOpen = true; isClone = true; - //System.out.println("D clone this=" + this); + // System.out.println("D clone this=" + this); seek(other.getFilePointer()); } @@ -343,8 +354,8 @@ public class NativeUnixDirectory extends FSDirectory { public void seek(long pos) throws IOException { if (pos != getFilePointer()) { final long alignedPos = pos & ALIGN_NOT_MASK; - filePos = alignedPos-bufferSize; - + filePos = alignedPos - bufferSize; + final int delta = (int) (pos - alignedPos); if (delta != 0) { refill(); @@ -374,7 +385,8 @@ public class NativeUnixDirectory extends FSDirectory { if (bufferPos == bufferSize) { refill(); } - assert bufferPos == buffer.position() : "bufferPos=" + bufferPos + " vs buffer.position()=" + buffer.position(); + assert bufferPos == buffer.position() + : "bufferPos=" + bufferPos + " vs buffer.position()=" + buffer.position(); bufferPos++; return buffer.get(); } @@ -383,8 +395,9 @@ public class NativeUnixDirectory extends FSDirectory { buffer.clear(); filePos += bufferSize; bufferPos = 0; - assert (filePos & ALIGN_NOT_MASK) == filePos : "filePos=" + filePos + " anded=" + (filePos & ALIGN_NOT_MASK); - //System.out.println("X refill filePos=" + filePos); + assert (filePos & ALIGN_NOT_MASK) == filePos + : "filePos=" + filePos + " anded=" + (filePos & ALIGN_NOT_MASK); + // System.out.println("X refill filePos=" + filePos); int n; try { n = channel.read(buffer, filePos); @@ -400,20 +413,21 @@ public class NativeUnixDirectory extends FSDirectory { @Override public void readBytes(byte[] dst, int offset, int len) throws IOException { int toRead = len; - //System.out.println("\nX readBytes len=" + len + " fp=" + getFilePointer() + " size=" + length() + " this=" + this); - while(true) { + // System.out.println("\nX readBytes len=" + len + " fp=" + getFilePointer() + " size=" + + // length() + " this=" + this); + while (true) { final int left = bufferSize - bufferPos; if (left < toRead) { - //System.out.println(" copy " + left); + // System.out.println(" copy " + left); buffer.get(dst, offset, left); toRead -= left; offset += left; refill(); } else { - //System.out.println(" copy " + toRead); + // System.out.println(" copy " + toRead); buffer.get(dst, offset, toRead); bufferPos += toRead; - //System.out.println(" readBytes done"); + // System.out.println(" readBytes done"); break; } } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/store/RAFDirectory.java b/lucene/misc/src/java/org/apache/lucene/misc/store/RAFDirectory.java index 2f79a93574e..731a0de47e4 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/store/RAFDirectory.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/store/RAFDirectory.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.file.Path; - import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FSLockFactory; @@ -33,23 +32,22 @@ import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.util.SuppressForbidden; -/** A straightforward implementation of {@link FSDirectory} - * using java.io.RandomAccessFile. However, this class has - * poor concurrent performance (multiple threads will - * bottleneck) as it synchronizes when multiple threads - * read from the same file. It's usually better to use - * {@link NIOFSDirectory} or {@link MMapDirectory} instead. - *

- * NOTE: Because this uses RandomAccessFile, it will generally - * not work with non-default filesystem providers. It is only - * provided for applications that relied on the fact that - * RandomAccessFile's IO was not interruptible. +/** + * A straightforward implementation of {@link FSDirectory} using java.io.RandomAccessFile. However, + * this class has poor concurrent performance (multiple threads will bottleneck) as it synchronizes + * when multiple threads read from the same file. It's usually better to use {@link NIOFSDirectory} + * or {@link MMapDirectory} instead. + * + *

NOTE: Because this uses RandomAccessFile, it will generally not work with non-default + * filesystem providers. It is only provided for applications that relied on the fact that + * RandomAccessFile's IO was not interruptible. */ @SuppressForbidden(reason = "java.io.File: RAFDirectory is legacy API") public class RAFDirectory extends FSDirectory { - - /** Create a new RAFDirectory for the named location. - * The directory is created at the named location if it does not yet exist. + + /** + * Create a new RAFDirectory for the named location. The directory is created at the named + * location if it does not yet exist. * * @param path the path of the directory * @param lockFactory the lock factory to use @@ -59,9 +57,10 @@ public class RAFDirectory extends FSDirectory { super(path, lockFactory); path.toFile(); // throw exception if we can't get a File } - - /** Create a new RAFDirectory for the named location and {@link FSLockFactory#getDefault()}. - * The directory is created at the named location if it does not yet exist. + + /** + * Create a new RAFDirectory for the named location and {@link FSLockFactory#getDefault()}. The + * directory is created at the named location if it does not yet exist. * * @param path the path of the directory * @throws IOException if there is a low-level I/O error @@ -81,17 +80,17 @@ public class RAFDirectory extends FSDirectory { } /** - * Reads bytes with {@link RandomAccessFile#seek(long)} followed by - * {@link RandomAccessFile#read(byte[], int, int)}. + * Reads bytes with {@link RandomAccessFile#seek(long)} followed by {@link + * RandomAccessFile#read(byte[], int, int)}. */ @SuppressForbidden(reason = "java.io.File: RAFDirectory is legacy API") static final class RAFIndexInput extends BufferedIndexInput { /** - * The maximum chunk size is 8192 bytes, because {@link RandomAccessFile} mallocs - * a native buffer outside of stack if the read buffer size is larger. + * The maximum chunk size is 8192 bytes, because {@link RandomAccessFile} mallocs a native + * buffer outside of stack if the read buffer size is larger. */ private static final int CHUNK_SIZE = 8192; - + /** the file channel we will read from */ protected final RandomAccessFile file; /** is this instance a clone and hence does not own the file to close it */ @@ -100,40 +99,43 @@ public class RAFDirectory extends FSDirectory { protected final long off; /** end offset (start+length) */ protected final long end; - - public RAFIndexInput(String resourceDesc, RandomAccessFile file, IOContext context) throws IOException { + + public RAFIndexInput(String resourceDesc, RandomAccessFile file, IOContext context) + throws IOException { super(resourceDesc, context); - this.file = file; + this.file = file; this.off = 0L; this.end = file.length(); } - - public RAFIndexInput(String resourceDesc, RandomAccessFile file, long off, long length, int bufferSize) { + + public RAFIndexInput( + String resourceDesc, RandomAccessFile file, long off, long length, int bufferSize) { super(resourceDesc, bufferSize); this.file = file; this.off = off; this.end = off + length; this.isClone = true; } - + @Override public void close() throws IOException { if (!isClone) { file.close(); } } - + @Override public RAFIndexInput clone() { - RAFIndexInput clone = (RAFIndexInput)super.clone(); + RAFIndexInput clone = (RAFIndexInput) super.clone(); clone.isClone = true; return clone; } - + @Override public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { if (offset < 0 || length < 0 || offset + length > this.length()) { - throw new IllegalArgumentException("slice() " + sliceDescription + " out of bounds: " + this); + throw new IllegalArgumentException( + "slice() " + sliceDescription + " out of bounds: " + this); } return new RAFIndexInput(sliceDescription, file, off + offset, length, getBufferSize()); } @@ -142,7 +144,7 @@ public class RAFDirectory extends FSDirectory { public final long length() { return end - off; } - + /** IndexInput methods */ @Override protected void readInternal(ByteBuffer b) throws IOException { @@ -158,10 +160,22 @@ public class RAFDirectory extends FSDirectory { while (b.hasRemaining()) { final int toRead = Math.min(CHUNK_SIZE, b.remaining()); final int i = file.read(b.array(), b.position(), toRead); - if (i < 0) { // be defensive here, even though we checked before hand, something could have changed - throw new EOFException("read past EOF: " + this + " off: " + b.position() + " len: " + b.remaining() + " chunkLen: " + toRead + " end: " + end); + if (i < 0) { + // be defensive here, even though we checked before hand, something could have changed + throw new EOFException( + "read past EOF: " + + this + + " off: " + + b.position() + + " len: " + + b.remaining() + + " chunkLen: " + + toRead + + " end: " + + end); } - assert i > 0 : "RandomAccessFile.read with non zero-length toRead must always read at least one byte"; + assert i > 0 + : "RandomAccessFile.read with non zero-length toRead must always read at least one byte"; b.position(b.position() + i); } } catch (IOException ioe) { @@ -169,14 +183,15 @@ public class RAFDirectory extends FSDirectory { } } } - + @Override protected void seekInternal(long pos) throws IOException { if (pos > length()) { - throw new EOFException("read past EOF: pos=" + pos + " vs length=" + length() + ": " + this); + throw new EOFException( + "read past EOF: pos=" + pos + " vs length=" + length() + ": " + this); } } - + boolean isFDValid() throws IOException { return file.getFD().valid(); } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/store/WindowsDirectory.java b/lucene/misc/src/java/org/apache/lucene/misc/store/WindowsDirectory.java index d791ecada75..f1b14a746db 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/store/WindowsDirectory.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/store/WindowsDirectory.java @@ -16,6 +16,10 @@ */ package org.apache.lucene.misc.store; +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; @@ -24,33 +28,31 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.LockFactory; -import java.io.IOException; -import java.io.EOFException; -import java.nio.ByteBuffer; -import java.nio.file.Path; - /** * Native {@link Directory} implementation for Microsoft Windows. - *

- * Steps: - *

    + * + *

    Steps: + * + *

      *
    1. Compile the source code to create libLuceneNativeIO.dll: ./gradlew build - *
    2. Put the resulting libLuceneNativeIO.dll - * (from lucene/misc/native/build/lib/release/platform/) - * into some directory in your windows PATH + *
    3. Put the resulting libLuceneNativeIO.dll (from + * lucene/misc/native/build/lib/release/platform/) into some directory in your windows + * PATH *
    4. Open indexes with WindowsDirectory and use it. *
    + * * @lucene.experimental */ public class WindowsDirectory extends FSDirectory { private static final int DEFAULT_BUFFERSIZE = 4096; /* default pgsize on ia32/amd64 */ - + static { System.loadLibrary("LuceneNativeIO"); } - - /** Create a new WindowsDirectory for the named location. - * + + /** + * Create a new WindowsDirectory for the named location. + * * @param path the path of the directory * @param lockFactory the lock factory to use * @throws IOException If there is a low-level I/O error @@ -59,7 +61,8 @@ public class WindowsDirectory extends FSDirectory { super(path, lockFactory); } - /** Create a new WindowsDirectory for the named location and {@link FSLockFactory#getDefault()}. + /** + * Create a new WindowsDirectory for the named location and {@link FSLockFactory#getDefault()}. * * @param path the path of the directory * @throws IOException If there is a low-level I/O error @@ -71,27 +74,30 @@ public class WindowsDirectory extends FSDirectory { @Override public IndexInput openInput(String name, IOContext context) throws IOException { ensureOpen(); - return new WindowsIndexInput(getDirectory().resolve(name), Math.max(BufferedIndexInput.bufferSize(context), DEFAULT_BUFFERSIZE)); + return new WindowsIndexInput( + getDirectory().resolve(name), + Math.max(BufferedIndexInput.bufferSize(context), DEFAULT_BUFFERSIZE)); } - + static class WindowsIndexInput extends BufferedIndexInput { private final long fd; private final long length; boolean isClone; boolean isOpen; - + public WindowsIndexInput(Path file, int bufferSize) throws IOException { super("WindowsIndexInput(path=\"" + file + "\")", bufferSize); fd = WindowsDirectory.open(file.toString()); length = WindowsDirectory.length(fd); isOpen = true; } - + @Override protected void readInternal(ByteBuffer b) throws IOException { int bytesRead; try { - bytesRead = WindowsDirectory.read(fd, b.array(), b.position(), b.remaining(), getFilePointer()); + bytesRead = + WindowsDirectory.read(fd, b.array(), b.position(), b.remaining(), getFilePointer()); } catch (IOException ioe) { throw new IOException(ioe.getMessage() + ": " + this, ioe); } @@ -102,8 +108,7 @@ public class WindowsDirectory extends FSDirectory { } @Override - protected void seekInternal(long pos) throws IOException { - } + protected void seekInternal(long pos) throws IOException {} @Override public synchronized void close() throws IOException { @@ -118,24 +123,25 @@ public class WindowsDirectory extends FSDirectory { public long length() { return length; } - + @Override public WindowsIndexInput clone() { - WindowsIndexInput clone = (WindowsIndexInput)super.clone(); + WindowsIndexInput clone = (WindowsIndexInput) super.clone(); clone.isClone = true; return clone; } } - + /** Opens a handle to a file. */ private static native long open(String filename) throws IOException; - + /** Reads data from a file at pos into bytes */ - private static native int read(long fd, byte bytes[], int offset, int length, long pos) throws IOException; - + private static native int read(long fd, byte bytes[], int offset, int length, long pos) + throws IOException; + /** Closes a handle to a file */ private static native void close(long fd) throws IOException; - + /** Returns the length of a file */ private static native long length(long fd) throws IOException; } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/store/package-info.java b/lucene/misc/src/java/org/apache/lucene/misc/store/package-info.java index a006862e94d..827ae5ef0ad 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/store/package-info.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/store/package-info.java @@ -16,4 +16,4 @@ */ /** Misc Directory implementations. */ -package org.apache.lucene.misc.store; \ No newline at end of file +package org.apache.lucene.misc.store; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/util/MemoryTracker.java b/lucene/misc/src/java/org/apache/lucene/misc/util/MemoryTracker.java index 20a45e2d278..be0ba61e27f 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/util/MemoryTracker.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/util/MemoryTracker.java @@ -17,10 +17,9 @@ package org.apache.lucene.misc.util; -/** - * Tracks dynamic allocations/deallocations of memory for transient objects - */ +/** Tracks dynamic allocations/deallocations of memory for transient objects */ public interface MemoryTracker { void updateBytes(long bytes); + long getBytes(); } diff --git a/lucene/misc/src/java/org/apache/lucene/misc/util/fst/ListOfOutputs.java b/lucene/misc/src/java/org/apache/lucene/misc/util/fst/ListOfOutputs.java index 9184736990f..dbbb606acd6 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/util/fst/ListOfOutputs.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/util/fst/ListOfOutputs.java @@ -19,7 +19,6 @@ package org.apache.lucene.misc.util.fst; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; import org.apache.lucene.util.IntsRef; // javadocs @@ -28,34 +27,27 @@ import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.Outputs; /** - * Wraps another Outputs implementation and encodes one or - * more of its output values. You can use this when a single - * input may need to map to more than one output, - * maintaining order: pass the same input with a different - * output by calling {@link FSTCompiler#add(IntsRef,Object)} multiple - * times. The builder will then combine the outputs using - * the {@link Outputs#merge(Object,Object)} method. + * Wraps another Outputs implementation and encodes one or more of its output values. You can use + * this when a single input may need to map to more than one output, maintaining order: pass the + * same input with a different output by calling {@link FSTCompiler#add(IntsRef,Object)} multiple + * times. The builder will then combine the outputs using the {@link Outputs#merge(Object,Object)} + * method. * - *

    The resulting FST may not be minimal when an input has - * more than one output, as this requires pushing all - * multi-output values to a final state. + *

    The resulting FST may not be minimal when an input has more than one output, as this requires + * pushing all multi-output values to a final state. * - *

    NOTE: the only way to create multiple outputs is to - * add the same input to the FST multiple times in a row. This is - * how the FST maps a single input to multiple outputs (e.g. you - * cannot pass a List<Object> to {@link FSTCompiler#add}). If - * your outputs are longs, and you need at most 2, then use - * {@link UpToTwoPositiveIntOutputs} instead since it stores - * the outputs more compactly (by stealing a bit from each - * long value). + *

    NOTE: the only way to create multiple outputs is to add the same input to the FST multiple + * times in a row. This is how the FST maps a single input to multiple outputs (e.g. you cannot pass + * a List<Object> to {@link FSTCompiler#add}). If your outputs are longs, and you need at most + * 2, then use {@link UpToTwoPositiveIntOutputs} instead since it stores the outputs more compactly + * (by stealing a bit from each long value). * - *

    NOTE: this cannot wrap itself (ie you cannot make an - * FST with List<List<Object>> outputs using this). + *

    NOTE: this cannot wrap itself (ie you cannot make an FST with List<List<Object>> + * outputs using this). * * @lucene.experimental */ - // NOTE: i think we could get a more compact FST if, instead // of adding the same input multiple times with a different // output each time, we added it only once with a @@ -66,7 +58,7 @@ import org.apache.lucene.util.fst.Outputs; @SuppressWarnings("unchecked") public final class ListOfOutputs extends Outputs { - + private final Outputs outputs; public ListOfOutputs(Outputs outputs) { @@ -93,7 +85,7 @@ public final class ListOfOutputs extends Outputs { } else { List outputList = (List) output; List addedList = new ArrayList<>(outputList.size()); - for(T _output : outputList) { + for (T _output : outputList) { addedList.add(outputs.add((T) prefix, _output)); } return addedList; @@ -114,7 +106,7 @@ public final class ListOfOutputs extends Outputs { } else { List outputList = (List) output; out.writeVInt(outputList.size()); - for(T eachOutput : outputList) { + for (T eachOutput : outputList) { outputs.write(eachOutput, out); } } @@ -124,7 +116,7 @@ public final class ListOfOutputs extends Outputs { public Object read(DataInput in) throws IOException { return outputs.read(in); } - + @Override public void skipOutput(DataInput in) throws IOException { outputs.skipOutput(in); @@ -137,17 +129,17 @@ public final class ListOfOutputs extends Outputs { return outputs.read(in); } else { List outputList = new ArrayList<>(count); - for(int i=0;i extends Outputs { StringBuilder b = new StringBuilder(); b.append('['); - - for(int i=0;i 0) { b.append(", "); } @@ -191,8 +183,9 @@ public final class ListOfOutputs extends Outputs { } else { outputList.addAll((List) second); } - //System.out.println("MERGE: now " + outputList.size() + " first=" + outputToString(first) + " second=" + outputToString(second)); - //System.out.println(" return " + outputToString(outputList)); + // System.out.println("MERGE: now " + outputList.size() + " first=" + outputToString(first) + " + // second=" + outputToString(second)); + // System.out.println(" return " + outputToString(outputList)); return outputList; } @@ -201,7 +194,7 @@ public final class ListOfOutputs extends Outputs { return "OneOrMoreOutputs(" + outputs + ")"; } - public List asList(Object output) { + public List asList(Object output) { if (!(output instanceof List)) { List result = new ArrayList<>(1); result.add((T) output); @@ -211,7 +204,8 @@ public final class ListOfOutputs extends Outputs { } } - private static final long BASE_LIST_NUM_BYTES = RamUsageEstimator.shallowSizeOf(new ArrayList()); + private static final long BASE_LIST_NUM_BYTES = + RamUsageEstimator.shallowSizeOf(new ArrayList()); @Override public long ramBytesUsed(Object output) { @@ -219,7 +213,7 @@ public final class ListOfOutputs extends Outputs { if (output instanceof List) { bytes += BASE_LIST_NUM_BYTES; List outputList = (List) output; - for(T _output : outputList) { + for (T _output : outputList) { bytes += outputs.ramBytesUsed(_output); } // 2 * to allow for ArrayList's oversizing: diff --git a/lucene/misc/src/java/org/apache/lucene/misc/util/fst/UpToTwoPositiveIntOutputs.java b/lucene/misc/src/java/org/apache/lucene/misc/util/fst/UpToTwoPositiveIntOutputs.java index afa8ac57850..8c49366d54b 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/util/fst/UpToTwoPositiveIntOutputs.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/util/fst/UpToTwoPositiveIntOutputs.java @@ -17,7 +17,6 @@ package org.apache.lucene.misc.util.fst; import java.io.IOException; - import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; import org.apache.lucene.util.RamUsageEstimator; @@ -26,34 +25,26 @@ import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.Outputs; /** - * An FST {@link Outputs} implementation where each output - * is one or two non-negative long values. If it's a - * single output, Long is returned; else, TwoLongs. Order - * is preserved in the TwoLongs case, ie .first is the first - * input/output added to Builder, and .second is the - * second. You cannot store 0 output with this (that's - * reserved to mean "no output")! + * An FST {@link Outputs} implementation where each output is one or two non-negative long values. + * If it's a single output, Long is returned; else, TwoLongs. Order is preserved in the TwoLongs + * case, ie .first is the first input/output added to Builder, and .second is the second. You cannot + * store 0 output with this (that's reserved to mean "no output")! * - *

    NOTE: the only way to create a TwoLongs output is to - * add the same input to the FST twice in a row. This is - * how the FST maps a single input to two outputs (e.g. you - * cannot pass a TwoLongs to {@link FSTCompiler#add}. If you - * need more than two then use {@link ListOfOutputs}, but if - * you only have at most 2 then this implementation will - * require fewer bytes as it steals one bit from each long - * value. + *

    NOTE: the only way to create a TwoLongs output is to add the same input to the FST twice in a + * row. This is how the FST maps a single input to two outputs (e.g. you cannot pass a TwoLongs to + * {@link FSTCompiler#add}. If you need more than two then use {@link ListOfOutputs}, but if you + * only have at most 2 then this implementation will require fewer bytes as it steals one bit from + * each long value. * - *

    NOTE: the resulting FST is not guaranteed to be minimal! - * See {@link FSTCompiler}. + *

    NOTE: the resulting FST is not guaranteed to be minimal! See {@link FSTCompiler}. * * @lucene.experimental */ - @SuppressForbidden(reason = "Uses a Long instance as a marker") public final class UpToTwoPositiveIntOutputs extends Outputs { /** Holds two long outputs. */ - public final static class TwoLongs { + public static final class TwoLongs { public final long first; public final long second; @@ -81,17 +72,19 @@ public final class UpToTwoPositiveIntOutputs extends Outputs { @Override public int hashCode() { - return (int) ((first^(first>>>32)) ^ (second^(second>>32))); + return (int) ((first ^ (first >>> 32)) ^ (second ^ (second >> 32))); } } @SuppressWarnings("deprecation") - private final static Long NO_OUTPUT = new Long(0); + private static final Long NO_OUTPUT = new Long(0); private final boolean doShare; - private final static UpToTwoPositiveIntOutputs singletonShare = new UpToTwoPositiveIntOutputs(true); - private final static UpToTwoPositiveIntOutputs singletonNoShare = new UpToTwoPositiveIntOutputs(false); + private static final UpToTwoPositiveIntOutputs singletonShare = + new UpToTwoPositiveIntOutputs(true); + private static final UpToTwoPositiveIntOutputs singletonNoShare = + new UpToTwoPositiveIntOutputs(false); private UpToTwoPositiveIntOutputs(boolean doShare) { this.doShare = doShare; @@ -175,10 +168,10 @@ public final class UpToTwoPositiveIntOutputs extends Outputs { assert valid(_output, true); if (_output instanceof Long) { final Long output = (Long) _output; - out.writeVLong(output<<1); + out.writeVLong(output << 1); } else { final TwoLongs output = (TwoLongs) _output; - out.writeVLong((output.first<<1) | 1); + out.writeVLong((output.first << 1) | 1); out.writeVLong(output.second); } } @@ -238,7 +231,8 @@ public final class UpToTwoPositiveIntOutputs extends Outputs { return new TwoLongs((Long) first, (Long) second); } - private static final long TWO_LONGS_NUM_BYTES = RamUsageEstimator.shallowSizeOf(new TwoLongs(0, 0)); + private static final long TWO_LONGS_NUM_BYTES = + RamUsageEstimator.shallowSizeOf(new TwoLongs(0, 0)); @Override public long ramBytesUsed(Object o) { diff --git a/lucene/misc/src/java/org/apache/lucene/misc/util/fst/package-info.java b/lucene/misc/src/java/org/apache/lucene/misc/util/fst/package-info.java index b7e79a9ea5f..c6783fb71ee 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/util/fst/package-info.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/util/fst/package-info.java @@ -16,4 +16,4 @@ */ /** Misc FST classes. */ -package org.apache.lucene.misc.util.fst; \ No newline at end of file +package org.apache.lucene.misc.util.fst; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/util/package-info.java b/lucene/misc/src/java/org/apache/lucene/misc/util/package-info.java index 691e8d9ac53..2bd4678e582 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/util/package-info.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/util/package-info.java @@ -15,6 +15,5 @@ * limitations under the License. */ -/** Memory Tracker interface which allows defining custom collector - level memory trackers */ -package org.apache.lucene.misc.util; \ No newline at end of file +/** Memory Tracker interface which allows defining custom collector level memory trackers */ +package org.apache.lucene.misc.util; diff --git a/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java b/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java index a9f51ae01a5..46f7da61266 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Collections; import java.util.stream.Collectors; import java.util.stream.IntStream; - import org.apache.lucene.document.Field.Store; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -36,11 +35,9 @@ import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -/** - * Test of the SweetSpotSimilarity - */ +/** Test of the SweetSpotSimilarity */ public class SweetSpotSimilarityTest extends LuceneTestCase { - + private static float computeNorm(Similarity sim, String field, int length) throws IOException { String value = IntStream.range(0, length).mapToObj(i -> "a").collect(Collectors.joining(" ")); Directory dir = new ByteBuffersDirectory(); @@ -75,171 +72,130 @@ public class SweetSpotSimilarityTest extends LuceneTestCase { // TODO: rewrite this test to not make thosuands of indexes. @Nightly public void testSweetSpotComputeNorm() throws IOException { - + final SweetSpotSimilarity ss = new SweetSpotSimilarity(); - ss.setLengthNormFactors(1,1,0.5f,true); + ss.setLengthNormFactors(1, 1, 0.5f, true); Similarity d = new ClassicSimilarity(); Similarity s = ss; - // base case, should degrade for (int i = 1; i < 1000; i++) { - assertEquals("base case: i="+i, - computeNorm(d, "bogus", i), - computeNorm(s, "bogus", i), - 0.0f); + assertEquals( + "base case: i=" + i, computeNorm(d, "bogus", i), computeNorm(s, "bogus", i), 0.0f); } // make a sweet spot - - ss.setLengthNormFactors(3,10,0.5f,true); - - for (int i = 3; i <=10; i++) { - assertEquals("3,10: spot i="+i, - 1.0f, - computeNorm(ss, "bogus", i), - 0.0f); + + ss.setLengthNormFactors(3, 10, 0.5f, true); + + for (int i = 3; i <= 10; i++) { + assertEquals("3,10: spot i=" + i, 1.0f, computeNorm(ss, "bogus", i), 0.0f); } - + for (int i = 10; i < 1000; i++) { final float normD = computeNorm(d, "bogus", i - 9); final float normS = computeNorm(s, "bogus", i); - assertEquals("3,10: 10 0) { - assertTrue ("out of order " + terms[i-1].docFreq + "should be >= " + terms[i].docFreq,terms[i-1].docFreq >= terms[i].docFreq); - } - } - } - - public void testNumTerms () throws Exception{ + public void testFirstTermHighestDocFreqAllFields() throws Exception { int numTerms = 12; String field = null; - TermStats[] terms = HighFreqTerms.getHighFreqTerms(reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); + assertEquals("Term with highest docfreq is first", 20, terms[0].docFreq); + } + + public void testFirstTermHighestDocFreq() throws Exception { + int numTerms = 12; + String field = "FIELD_1"; + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); + assertEquals("Term with highest docfreq is first", 10, terms[0].docFreq); + } + + public void testOrderedByDocFreqDescending() throws Exception { + int numTerms = 12; + String field = "FIELD_1"; + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); + for (int i = 0; i < terms.length; i++) { + if (i > 0) { + assertTrue( + "out of order " + terms[i - 1].docFreq + "should be >= " + terms[i].docFreq, + terms[i - 1].docFreq >= terms[i].docFreq); + } + } + } + + public void testNumTerms() throws Exception { + int numTerms = 12; + String field = null; + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); assertEquals("length of terms array equals numTerms :" + numTerms, numTerms, terms.length); } - - public void testGetHighFreqTerms () throws Exception{ - int numTerms=12; - String field="FIELD_1"; - TermStats[] terms = HighFreqTerms.getHighFreqTerms(reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); - + + public void testGetHighFreqTerms() throws Exception { + int numTerms = 12; + String field = "FIELD_1"; + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); + for (int i = 0; i < terms.length; i++) { String termtext = terms[i].termtext.utf8ToString(); // hardcoded highTF or highTFmedDF @@ -108,88 +122,98 @@ public class TestHighFreqTerms extends LuceneTestCase { } } else { int n = Integer.parseInt(termtext); - assertEquals("doc freq is not as expected", getExpecteddocFreq(n), - terms[i].docFreq); + assertEquals("doc freq is not as expected", getExpecteddocFreq(n), terms[i].docFreq); } } } - + /********************Test sortByTotalTermFreq**********************************/ - - public void testFirstTermHighestTotalTermFreq () throws Exception{ + + public void testFirstTermHighestTotalTermFreq() throws Exception { int numTerms = 20; String field = null; - TermStats[] terms = HighFreqTerms.getHighFreqTerms(reader, numTerms, field, new HighFreqTerms.TotalTermFreqComparator()); - assertEquals("Term with highest totalTermFreq is first",200, terms[0].totalTermFreq); + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.TotalTermFreqComparator()); + assertEquals("Term with highest totalTermFreq is first", 200, terms[0].totalTermFreq); } - public void testFirstTermHighestTotalTermFreqDifferentField () throws Exception{ + public void testFirstTermHighestTotalTermFreqDifferentField() throws Exception { int numTerms = 20; String field = "different_field"; - TermStats[] terms = HighFreqTerms.getHighFreqTerms(reader, numTerms, field, new HighFreqTerms.TotalTermFreqComparator()); - assertEquals("Term with highest totalTermFreq is first"+ terms[0].getTermText(),150, terms[0].totalTermFreq); + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.TotalTermFreqComparator()); + assertEquals( + "Term with highest totalTermFreq is first" + terms[0].getTermText(), + 150, + terms[0].totalTermFreq); } - - public void testOrderedByTermFreqDescending () throws Exception{ + + public void testOrderedByTermFreqDescending() throws Exception { int numTerms = 12; String field = "FIELD_1"; - TermStats[] terms = HighFreqTerms.getHighFreqTerms(reader, numTerms, field, new HighFreqTerms.TotalTermFreqComparator()); - + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.TotalTermFreqComparator()); + for (int i = 0; i < terms.length; i++) { // check that they are sorted by descending termfreq // order if (i > 0) { - assertTrue ("out of order" +terms[i-1]+ " > " +terms[i],terms[i-1].totalTermFreq >= terms[i].totalTermFreq); + assertTrue( + "out of order" + terms[i - 1] + " > " + terms[i], + terms[i - 1].totalTermFreq >= terms[i].totalTermFreq); } - } + } } - - public void testGetTermFreqOrdered () throws Exception{ + + public void testGetTermFreqOrdered() throws Exception { int numTerms = 12; String field = "FIELD_1"; - TermStats[] terms = HighFreqTerms.getHighFreqTerms(reader, numTerms, field, new HighFreqTerms.TotalTermFreqComparator()); - + TermStats[] terms = + HighFreqTerms.getHighFreqTerms( + reader, numTerms, field, new HighFreqTerms.TotalTermFreqComparator()); + for (int i = 0; i < terms.length; i++) { String text = terms[i].termtext.utf8ToString(); if (text.contains("highTF")) { if (text.contains("medDF")) { - assertEquals("total term freq is expected", 125, - terms[i].totalTermFreq); + assertEquals("total term freq is expected", 125, terms[i].totalTermFreq); } else { - assertEquals("total term freq is expected", 200, - terms[i].totalTermFreq); + assertEquals("total term freq is expected", 200, terms[i].totalTermFreq); } - + } else { int n = Integer.parseInt(text); - assertEquals("doc freq is expected", getExpecteddocFreq(n), - terms[i].docFreq); - assertEquals("total term freq is expected", getExpectedtotalTermFreq(n), - terms[i].totalTermFreq); + assertEquals("doc freq is expected", getExpecteddocFreq(n), terms[i].docFreq); + assertEquals( + "total term freq is expected", getExpectedtotalTermFreq(n), terms[i].totalTermFreq); } } } /********************Testing Utils**********************************/ - + private static void indexDocs(IndexWriter writer) throws Exception { Random rnd = random(); - + /** - * Generate 10 documents where term n has a docFreq of n and a totalTermFreq of n*2 (squared). + * Generate 10 documents where term n has a docFreq of n and a totalTermFreq of n*2 (squared). */ for (int i = 1; i <= 10; i++) { Document doc = new Document(); String content = getContent(i); - + doc.add(newTextField(rnd, "FIELD_1", content, Field.Store.YES)); - //add a different field + // add a different field doc.add(newTextField(rnd, "different_field", "diff", Field.Store.YES)); writer.addDocument(doc); } - - //add 10 more docs with the term "diff" this will make it have the highest docFreq if we don't ask for the - //highest freq terms for a specific field. + + // add 10 more docs with the term "diff" this will make it have the highest docFreq if we don't + // ask for the + // highest freq terms for a specific field. for (int i = 1; i <= 10; i++) { Document doc = new Document(); doc.add(newTextField(rnd, "different_field", "diff", Field.Store.YES)); @@ -218,7 +242,7 @@ public class TestHighFreqTerms extends LuceneTestCase { writer.addDocument(newdoc); } // add a doc with high tf in field different_field - int targetTF =150; + int targetTF = 150; doc = new Document(); content = ""; for (int i = 0; i < targetTF; i++) { @@ -227,15 +251,12 @@ public class TestHighFreqTerms extends LuceneTestCase { doc.add(newTextField(rnd, "different_field", content, Field.Store.YES)); writer.addDocument(doc); writer.close(); - } - + /** - * getContent - * return string containing numbers 1 to i with each number n occurring n times. - * i.e. for input of 3 return string "3 3 3 2 2 1" + * getContent return string containing numbers 1 to i with each number n occurring n times. i.e. + * for input of 3 return string "3 3 3 2 2 1" */ - private static String getContent(int i) { String s = ""; for (int j = 10; j >= i; j--) { @@ -246,11 +267,11 @@ public class TestHighFreqTerms extends LuceneTestCase { } return s; } - + private static int getExpectedtotalTermFreq(int i) { return getExpecteddocFreq(i) * i; } - + private static int getExpecteddocFreq(int i) { return i; } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/TestIndexMergeTool.java b/lucene/misc/src/test/org/apache/lucene/misc/TestIndexMergeTool.java index b9183ea75ab..ddb179a99ed 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/TestIndexMergeTool.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/TestIndexMergeTool.java @@ -24,42 +24,52 @@ import org.apache.lucene.util.PrintStreamInfoStream; public class TestIndexMergeTool extends LuceneTestCase { public void testNoParameters() throws Exception { - expectThrows(IllegalArgumentException.class, () -> { - Options.parse(new String[] {}); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + Options.parse(new String[] {}); + }); } public void testOneParameter() throws Exception { - expectThrows(IllegalArgumentException.class, () -> { - Options.parse(new String[] { "target" }); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + Options.parse(new String[] {"target"}); + }); } public void testTwoParameters() throws Exception { - expectThrows(IllegalArgumentException.class, () -> { - Options.parse(new String[] { "target", "source1" }); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + Options.parse(new String[] {"target", "source1"}); + }); } public void testThreeParameters() throws Exception { - Options options = Options.parse(new String[] { "target", "source1", "source2" }); + Options options = Options.parse(new String[] {"target", "source1", "source2"}); assertEquals("target", options.mergedIndexPath); - assertArrayEquals(new String[] { "source1", "source2" }, options.indexPaths); + assertArrayEquals(new String[] {"source1", "source2"}, options.indexPaths); } public void testVerboseOption() throws Exception { - Options options = Options.parse(new String[] { "-verbose", "target", "source1", "source2" }); + Options options = Options.parse(new String[] {"-verbose", "target", "source1", "source2"}); assertEquals(PrintStreamInfoStream.class, options.config.getInfoStream().getClass()); } public void testMergePolicyOption() throws Exception { - Options options = Options.parse(new String[] { "-merge-policy", LogDocMergePolicy.class.getName(), "target", "source1", "source2" }); + Options options = + Options.parse( + new String[] { + "-merge-policy", LogDocMergePolicy.class.getName(), "target", "source1", "source2" + }); assertEquals(LogDocMergePolicy.class, options.config.getMergePolicy().getClass()); } public void testMaxSegmentsOption() throws Exception { - Options options = Options.parse(new String[] { "-max-segments", "42", "target", "source1", "source2" }); + Options options = + Options.parse(new String[] {"-max-segments", "42", "target", "source1", "source2"}); assertEquals(42, options.maxSegments); } - } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/document/TestLazyDocument.java b/lucene/misc/src/test/org/apache/lucene/misc/document/TestLazyDocument.java index c59e113f3b4..d525de3db69 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/document/TestLazyDocument.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/document/TestLazyDocument.java @@ -23,7 +23,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; - import org.apache.lucene.analysis.*; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -39,19 +38,21 @@ import org.junit.Before; public class TestLazyDocument extends LuceneTestCase { public final int NUM_DOCS = atLeast(10); - public final String[] FIELDS = new String[] - { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k" }; + public final String[] FIELDS = + new String[] {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"}; public final int NUM_VALUES = atLeast(100); public Directory dir; - + @After public void removeIndex() { if (null != dir) { - try { - dir.close(); + try { + dir.close(); dir = null; - } catch (Exception e) { /* NOOP */ } + } catch (Exception e) { + /* NOOP */ + } } } @@ -60,16 +61,15 @@ public class TestLazyDocument extends LuceneTestCase { dir = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); - IndexWriter writer = new IndexWriter - (dir, newIndexWriterConfig(analyzer)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)); try { for (int docid = 0; docid < NUM_DOCS; docid++) { Document d = new Document(); - d.add(newStringField("docid", ""+docid, Field.Store.YES)); + d.add(newStringField("docid", "" + docid, Field.Store.YES)); d.add(newStringField("never_load", "fail", Field.Store.YES)); for (String f : FIELDS) { for (int val = 0; val < NUM_VALUES; val++) { - d.add(newStringField(f, docid+"_"+f+"_"+val, Field.Store.YES)); + d.add(newStringField(f, docid + "_" + f + "_" + val, Field.Store.YES)); } } d.add(newStringField("load_later", "yes", Field.Store.YES)); @@ -84,22 +84,21 @@ public class TestLazyDocument extends LuceneTestCase { final int id = random().nextInt(NUM_DOCS); IndexReader reader = DirectoryReader.open(dir); try { - Query q = new TermQuery(new Term("docid", ""+id)); + Query q = new TermQuery(new Term("docid", "" + id)); IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(q, 100).scoreDocs; assertEquals("Too many docs", 1, hits.length); - LazyTestingStoredFieldVisitor visitor - = new LazyTestingStoredFieldVisitor(new LazyDocument(reader, hits[0].doc), - FIELDS); + LazyTestingStoredFieldVisitor visitor = + new LazyTestingStoredFieldVisitor(new LazyDocument(reader, hits[0].doc), FIELDS); reader.document(hits[0].doc, visitor); Document d = visitor.doc; int numFieldValues = 0; - Map fieldValueCounts = new HashMap<>(); + Map fieldValueCounts = new HashMap<>(); // at this point, all FIELDS should be Lazy and unrealized for (IndexableField f : d) { - numFieldValues++; + numFieldValues++; if (f.name().equals("never_load")) { fail("never_load was loaded"); } @@ -109,34 +108,29 @@ public class TestLazyDocument extends LuceneTestCase { if (f.name().equals("docid")) { assertFalse(f.name(), f instanceof LazyDocument.LazyField); } else { - int count = fieldValueCounts.containsKey(f.name()) ? - fieldValueCounts.get(f.name()) : 0; + int count = fieldValueCounts.containsKey(f.name()) ? fieldValueCounts.get(f.name()) : 0; count++; fieldValueCounts.put(f.name(), count); - assertTrue(f.name() + " is " + f.getClass(), - f instanceof LazyDocument.LazyField); + assertTrue(f.name() + " is " + f.getClass(), f instanceof LazyDocument.LazyField); LazyDocument.LazyField lf = (LazyDocument.LazyField) f; assertFalse(f.name() + " is loaded", lf.hasBeenLoaded()); } } if (VERBOSE) System.out.println("numFieldValues == " + numFieldValues); - assertEquals("numFieldValues", 1 + (NUM_VALUES * FIELDS.length), - numFieldValues); - + assertEquals("numFieldValues", 1 + (NUM_VALUES * FIELDS.length), numFieldValues); + for (String fieldName : fieldValueCounts.keySet()) { - assertEquals("fieldName count: " + fieldName, - NUM_VALUES, (int)fieldValueCounts.get(fieldName)); + assertEquals( + "fieldName count: " + fieldName, NUM_VALUES, (int) fieldValueCounts.get(fieldName)); } // pick a single field name to load a single value final String fieldName = FIELDS[random().nextInt(FIELDS.length)]; final IndexableField[] fieldValues = d.getFields(fieldName); - assertEquals("#vals in field: " + fieldName, - NUM_VALUES, fieldValues.length); + assertEquals("#vals in field: " + fieldName, NUM_VALUES, fieldValues.length); final int valNum = random().nextInt(fieldValues.length); - assertEquals(id + "_" + fieldName + "_" + valNum, - fieldValues[valNum].stringValue()); - + assertEquals(id + "_" + fieldName + "_" + valNum, fieldValues[valNum].stringValue()); + // now every value of fieldName should be loaded for (IndexableField f : d) { if (f.name().equals("never_load")) { @@ -148,20 +142,18 @@ public class TestLazyDocument extends LuceneTestCase { if (f.name().equals("docid")) { assertFalse(f.name(), f instanceof LazyDocument.LazyField); } else { - assertTrue(f.name() + " is " + f.getClass(), - f instanceof LazyDocument.LazyField); + assertTrue(f.name() + " is " + f.getClass(), f instanceof LazyDocument.LazyField); LazyDocument.LazyField lf = (LazyDocument.LazyField) f; - assertEquals(f.name() + " is loaded?", - lf.name().equals(fieldName), lf.hasBeenLoaded()); + assertEquals(f.name() + " is loaded?", lf.name().equals(fieldName), lf.hasBeenLoaded()); } } // use the same LazyDoc to ask for one more lazy field - visitor = new LazyTestingStoredFieldVisitor(new LazyDocument(reader, hits[0].doc), - "load_later"); + visitor = + new LazyTestingStoredFieldVisitor(new LazyDocument(reader, hits[0].doc), "load_later"); reader.document(hits[0].doc, visitor); d = visitor.doc; - + // ensure we have all the values we expect now, and that // adding one more lazy field didn't "unload" the existing LazyField's // we already loaded. @@ -172,17 +164,16 @@ public class TestLazyDocument extends LuceneTestCase { if (f.name().equals("docid")) { assertFalse(f.name(), f instanceof LazyDocument.LazyField); } else { - assertTrue(f.name() + " is " + f.getClass(), - f instanceof LazyDocument.LazyField); + assertTrue(f.name() + " is " + f.getClass(), f instanceof LazyDocument.LazyField); LazyDocument.LazyField lf = (LazyDocument.LazyField) f; - assertEquals(f.name() + " is loaded?", - lf.name().equals(fieldName), lf.hasBeenLoaded()); + assertEquals(f.name() + " is loaded?", lf.name().equals(fieldName), lf.hasBeenLoaded()); } } // even the underlying doc shouldn't have never_load - assertNull("never_load was loaded in wrapped doc", - visitor.lazyDoc.getDocument().getField("never_load")); + assertNull( + "never_load was loaded in wrapped doc", + visitor.lazyDoc.getDocument().getField("never_load")); } finally { reader.close(); @@ -222,6 +213,5 @@ public class TestLazyDocument extends LuceneTestCase { Objects.requireNonNull(value, "String value should not be null"); doc.add(new Field(fieldInfo.name, value, ft)); } - } } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/index/TestIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/misc/index/TestIndexSplitter.java index 448012ca356..e473d8f3062 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/index/TestIndexSplitter.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/index/TestIndexSplitter.java @@ -17,7 +17,6 @@ package org.apache.lucene.misc.index; import java.nio.file.Path; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -40,29 +39,29 @@ public class TestIndexSplitter extends LuceneTestCase { // IndexSplitter.split makes its own commit directly with SIPC/SegmentInfos, // so the unreferenced files are expected. if (fsDir instanceof MockDirectoryWrapper) { - ((MockDirectoryWrapper)fsDir).setAssertNoUnrefencedFilesOnClose(false); + ((MockDirectoryWrapper) fsDir).setAssertNoUnrefencedFilesOnClose(false); } MergePolicy mergePolicy = new LogByteSizeMergePolicy(); mergePolicy.setNoCFSRatio(1.0); mergePolicy.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY); - IndexWriter iw = new IndexWriter( - fsDir, - new IndexWriterConfig(new MockAnalyzer(random())). - setOpenMode(OpenMode.CREATE). - setMergePolicy(mergePolicy) - ); - for (int x=0; x < 100; x++) { + IndexWriter iw = + new IndexWriter( + fsDir, + new IndexWriterConfig(new MockAnalyzer(random())) + .setOpenMode(OpenMode.CREATE) + .setMergePolicy(mergePolicy)); + for (int x = 0; x < 100; x++) { Document doc = DocHelper.createDocument(x, "index", 5); iw.addDocument(doc); } iw.commit(); - for (int x=100; x < 150; x++) { + for (int x = 100; x < 150; x++) { Document doc = DocHelper.createDocument(x, "index2", 5); iw.addDocument(doc); } iw.commit(); - for (int x=150; x < 200; x++) { + for (int x = 150; x < 200; x++) { Document doc = DocHelper.createDocument(x, "index3", 5); iw.addDocument(doc); } @@ -80,10 +79,13 @@ public class TestIndexSplitter extends LuceneTestCase { assertEquals(50, r.maxDoc()); r.close(); fsDirDest.close(); - + // now test cmdline Path destDir2 = createTempDir(LuceneTestCase.getTestClass().getSimpleName()); - IndexSplitter.main(new String[] {dir.toAbsolutePath().toString(), destDir2.toAbsolutePath().toString(), splitSegName}); + IndexSplitter.main( + new String[] { + dir.toAbsolutePath().toString(), destDir2.toAbsolutePath().toString(), splitSegName + }); Directory fsDirDest2 = newFSDirectory(destDir2); SegmentInfos sis = SegmentInfos.readLatestCommit(fsDirDest2); assertEquals(1, sis.size()); @@ -91,7 +93,7 @@ public class TestIndexSplitter extends LuceneTestCase { assertEquals(50, r.maxDoc()); r.close(); fsDirDest2.close(); - + // now remove the copied segment from src IndexSplitter.main(new String[] {dir.toAbsolutePath().toString(), "-d", splitSegName}); r = DirectoryReader.open(fsDir); @@ -99,5 +101,4 @@ public class TestIndexSplitter extends LuceneTestCase { r.close(); fsDir.close(); } - } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/index/TestMultiPassIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/misc/index/TestMultiPassIndexSplitter.java index 0fa318a959e..7924347b21d 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/index/TestMultiPassIndexSplitter.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/index/TestMultiPassIndexSplitter.java @@ -34,43 +34,41 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase { IndexReader input; int NUM_DOCS = 11; Directory dir; - + @Override public void setUp() throws Exception { super.setUp(); dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE)); + IndexWriter w = + new IndexWriter( + dir, + newIndexWriterConfig(new MockAnalyzer(random())) + .setMergePolicy(NoMergePolicy.INSTANCE)); Document doc; for (int i = 0; i < NUM_DOCS; i++) { doc = new Document(); doc.add(newStringField("id", i + "", Field.Store.YES)); doc.add(newTextField("f", i + " " + i, Field.Store.YES)); w.addDocument(doc); - if (i%3==0) w.commit(); + if (i % 3 == 0) w.commit(); } w.commit(); - w.deleteDocuments(new Term("id", "" + (NUM_DOCS-1))); + w.deleteDocuments(new Term("id", "" + (NUM_DOCS - 1))); w.close(); input = DirectoryReader.open(dir); } - + @Override public void tearDown() throws Exception { input.close(); dir.close(); super.tearDown(); } - - /** - * Test round-robin splitting. - */ + + /** Test round-robin splitting. */ public void testSplitRR() throws Exception { MultiPassIndexSplitter splitter = new MultiPassIndexSplitter(); - Directory[] dirs = new Directory[]{ - newDirectory(), - newDirectory(), - newDirectory() - }; + Directory[] dirs = new Directory[] {newDirectory(), newDirectory(), newDirectory()}; splitter.split(input, dirs, false); IndexReader ir; ir = DirectoryReader.open(dirs[0]); @@ -102,20 +100,13 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase { assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("0"))); assertNotSame("0", te.term().utf8ToString()); ir.close(); - for (Directory d : dirs) - d.close(); + for (Directory d : dirs) d.close(); } - - /** - * Test sequential splitting. - */ + + /** Test sequential splitting. */ public void testSplitSeq() throws Exception { MultiPassIndexSplitter splitter = new MultiPassIndexSplitter(); - Directory[] dirs = new Directory[]{ - newDirectory(), - newDirectory(), - newDirectory() - }; + Directory[] dirs = new Directory[] {newDirectory(), newDirectory(), newDirectory()}; splitter.split(input, dirs, true); IndexReader ir; ir = DirectoryReader.open(dirs[0]); @@ -140,7 +131,6 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase { assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef(t.text()))); assertNotSame(t.text(), te.term().utf8ToString()); ir.close(); - for (Directory d : dirs) - d.close(); + for (Directory d : dirs) d.close(); } } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/index/TestPKIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/misc/index/TestPKIndexSplitter.java index 0fc8f444daa..2472dc2f2d0 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/index/TestPKIndexSplitter.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/index/TestPKIndexSplitter.java @@ -20,7 +20,6 @@ import java.text.DecimalFormat; import java.text.DecimalFormatSymbols; import java.text.NumberFormat; import java.util.Locale; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.document.Document; @@ -38,62 +37,77 @@ import org.apache.lucene.util.LuceneTestCase; public class TestPKIndexSplitter extends LuceneTestCase { - public void testSplit() throws Exception { - NumberFormat format = new DecimalFormat("000000000", DecimalFormatSymbols.getInstance(Locale.ROOT)); + public void testSplit() throws Exception { + NumberFormat format = + new DecimalFormat("000000000", DecimalFormatSymbols.getInstance(Locale.ROOT)); Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) - .setOpenMode(OpenMode.CREATE).setMergePolicy(NoMergePolicy.INSTANCE)); + IndexWriter w = + new IndexWriter( + dir, + newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) + .setOpenMode(OpenMode.CREATE) + .setMergePolicy(NoMergePolicy.INSTANCE)); for (int x = 0; x < 11; x++) { Document doc = createDocument(x, "1", 3, format); w.addDocument(doc); - if (x%3==0) w.commit(); + if (x % 3 == 0) w.commit(); } for (int x = 11; x < 20; x++) { Document doc = createDocument(x, "2", 3, format); w.addDocument(doc); - if (x%3==0) w.commit(); + if (x % 3 == 0) w.commit(); } w.close(); - + final Term midTerm = new Term("id", format.format(11)); - + checkSplitting(dir, midTerm, 11, 9); - + // delete some documents - w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) - .setOpenMode(OpenMode.APPEND).setMergePolicy(NoMergePolicy.INSTANCE)); + w = + new IndexWriter( + dir, + newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) + .setOpenMode(OpenMode.APPEND) + .setMergePolicy(NoMergePolicy.INSTANCE)); w.deleteDocuments(midTerm); w.deleteDocuments(new Term("id", format.format(2))); w.close(); - + checkSplitting(dir, midTerm, 10, 8); - + dir.close(); } - - private void checkSplitting(Directory dir, Term splitTerm, int leftCount, int rightCount) throws Exception { + + private void checkSplitting(Directory dir, Term splitTerm, int leftCount, int rightCount) + throws Exception { Directory dir1 = newDirectory(); Directory dir2 = newDirectory(); - PKIndexSplitter splitter = new PKIndexSplitter(dir, dir1, dir2, splitTerm, - newIndexWriterConfig(new MockAnalyzer(random())), - newIndexWriterConfig(new MockAnalyzer(random()))); + PKIndexSplitter splitter = + new PKIndexSplitter( + dir, + dir1, + dir2, + splitTerm, + newIndexWriterConfig(new MockAnalyzer(random())), + newIndexWriterConfig(new MockAnalyzer(random()))); splitter.split(); - + IndexReader ir1 = DirectoryReader.open(dir1); IndexReader ir2 = DirectoryReader.open(dir2); assertEquals(leftCount, ir1.numDocs()); assertEquals(rightCount, ir2.numDocs()); - + checkContents(ir1, "1"); checkContents(ir2, "2"); - + ir1.close(); ir2.close(); - + dir1.close(); dir2.close(); } - + private void checkContents(IndexReader ir, String indexname) throws Exception { final Bits liveDocs = MultiBits.getLiveDocs(ir); for (int i = 0; i < ir.maxDoc(); i++) { @@ -102,9 +116,8 @@ public class TestPKIndexSplitter extends LuceneTestCase { } } } - - private Document createDocument(int n, String indexName, - int numFields, NumberFormat format) { + + private Document createDocument(int n, String indexName, int numFields, NumberFormat format) { StringBuilder sb = new StringBuilder(); Document doc = new Document(); String id = format.format(n); diff --git a/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java b/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java index 07860716efe..93953ae68d5 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java @@ -19,7 +19,6 @@ package org.apache.lucene.misc.search; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FloatDocValuesField; @@ -54,12 +53,10 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; /** - * Demonstrates an application of the {@link DiversifiedTopDocsCollector} in - * assembling a collection of top results but without over-representation of any - * one source (in this case top-selling singles from the 60s without having them - * all be Beatles records...). Results are ranked by the number of weeks a - * single is top of the charts and de-duped by the artist name. - * + * Demonstrates an application of the {@link DiversifiedTopDocsCollector} in assembling a collection + * of top results but without over-representation of any one source (in this case top-selling + * singles from the 60s without having them all be Beatles records...). Results are ranked by the + * number of weeks a single is top of the charts and de-duped by the artist name. */ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { @@ -68,7 +65,7 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { int expectedMinNumOfBeatlesHits = 5; TopDocs res = searcher.search(getTestQuery(), numberOfTracksOnCompilation); assertEquals(numberOfTracksOnCompilation, res.scoreDocs.length); - // due to randomization of segment merging in tests the exact number of Beatles hits + // due to randomization of segment merging in tests the exact number of Beatles hits // selected varies between 5 and 6 but we prove the point they are over-represented // in our result set using a standard search. assertTrue(getMaxNumRecordsPerArtist(res.scoreDocs) >= expectedMinNumOfBeatlesHits); @@ -79,8 +76,8 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { // any one artist. int requiredMaxHitsPerArtist = 2; int numberOfTracksOnCompilation = 10; - DiversifiedTopDocsCollector tdc = doDiversifiedSearch( - numberOfTracksOnCompilation, requiredMaxHitsPerArtist); + DiversifiedTopDocsCollector tdc = + doDiversifiedSearch(numberOfTracksOnCompilation, requiredMaxHitsPerArtist); ScoreDoc[] sd = tdc.topDocs(0).scoreDocs; assertEquals(numberOfTracksOnCompilation, sd.length); assertTrue(getMaxNumRecordsPerArtist(sd) <= requiredMaxHitsPerArtist); @@ -92,14 +89,13 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { int requiredMaxHitsPerArtist = 1; // Volume 2 of our hits compilation - start at position 10 - DiversifiedTopDocsCollector tdc = doDiversifiedSearch( - numberOfTracksPerCompilation * numberOfCompilations, - requiredMaxHitsPerArtist); - ScoreDoc[] volume2 = tdc.topDocs(numberOfTracksPerCompilation, - numberOfTracksPerCompilation).scoreDocs; + DiversifiedTopDocsCollector tdc = + doDiversifiedSearch( + numberOfTracksPerCompilation * numberOfCompilations, requiredMaxHitsPerArtist); + ScoreDoc[] volume2 = + tdc.topDocs(numberOfTracksPerCompilation, numberOfTracksPerCompilation).scoreDocs; assertEquals(numberOfTracksPerCompilation, volume2.length); assertTrue(getMaxNumRecordsPerArtist(volume2) <= requiredMaxHitsPerArtist); - } public void testInvalidArguments() throws Exception { @@ -107,11 +103,15 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { DiversifiedTopDocsCollector tdc = doDiversifiedSearch(numResults, 15); // start < 0 - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - tdc.topDocs(-1); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + tdc.topDocs(-1); + }); - assertEquals("Expected value of starting position is between 0 and 5, got -1", expected.getMessage()); + assertEquals( + "Expected value of starting position is between 0 and 5, got -1", expected.getMessage()); // start > pq.size() assertEquals(0, tdc.topDocs(numResults + 1).scoreDocs.length); @@ -120,25 +120,26 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { assertEquals(0, tdc.topDocs(numResults).scoreDocs.length); // howMany < 0 - expected = expectThrows(IllegalArgumentException.class, () -> { - tdc.topDocs(0, -1); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + tdc.topDocs(0, -1); + }); - assertEquals("Number of hits requested must be greater than 0 but value was -1", expected.getMessage()); + assertEquals( + "Number of hits requested must be greater than 0 but value was -1", expected.getMessage()); // howMany == 0 assertEquals(0, tdc.topDocs(0, 0).scoreDocs.length); - } // Diversifying collector that looks up de-dup keys using SortedDocValues // from a top-level Reader - private static final class DocValuesDiversifiedCollector extends - DiversifiedTopDocsCollector { + private static final class DocValuesDiversifiedCollector extends DiversifiedTopDocsCollector { private final SortedDocValues sdv; - public DocValuesDiversifiedCollector(int size, int maxHitsPerKey, - SortedDocValues sdv) { + public DocValuesDiversifiedCollector(int size, int maxHitsPerKey, SortedDocValues sdv) { super(size, maxHitsPerKey); this.sdv = sdv; } @@ -172,7 +173,7 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { public long cost() { return 0; } - + @Override public long longValue() throws IOException { // Keys are always expressed as a long so we obtain the @@ -185,13 +186,13 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { // Alternative, faster implementation for converting String keys to longs // but with the potential for hash collisions - private static final class HashedDocValuesDiversifiedCollector extends DiversifiedTopDocsCollector { + private static final class HashedDocValuesDiversifiedCollector + extends DiversifiedTopDocsCollector { private final String field; private BinaryDocValues vals; - public HashedDocValuesDiversifiedCollector(int size, int maxHitsPerKey, - String field) { + public HashedDocValuesDiversifiedCollector(int size, int maxHitsPerKey, String field) { super(size, maxHitsPerKey); this.field = field; } @@ -203,22 +204,27 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { public int docID() { return vals.docID(); } + @Override public int nextDoc() throws IOException { return vals.nextDoc(); } + @Override public int advance(int target) throws IOException { return vals.advance(target); } + @Override public boolean advanceExact(int target) throws IOException { return vals.advanceExact(target); } + @Override public long cost() { return vals.cost(); } + @Override public long longValue() throws IOException { return vals == null ? -1 : vals.binaryValue().hashCode(); @@ -227,8 +233,7 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { } @Override - public LeafCollector getLeafCollector(LeafReaderContext context) - throws IOException { + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { this.vals = DocValues.getBinary(context.reader(), field); return super.getLeafCollector(context); } @@ -236,78 +241,82 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { // Test data - format is artist, song, weeks at top of charts private static String[] hitsOfThe60s = { - "1966\tSPENCER DAVIS GROUP\tKEEP ON RUNNING\t1", - "1966\tOVERLANDERS\tMICHELLE\t3", - "1966\tNANCY SINATRA\tTHESE BOOTS ARE MADE FOR WALKIN'\t4", - "1966\tWALKER BROTHERS\tTHE SUN AIN'T GONNA SHINE ANYMORE\t4", - "1966\tSPENCER DAVIS GROUP\tSOMEBODY HELP ME\t2", - "1966\tDUSTY SPRINGFIELD\tYOU DON'T HAVE TO SAY YOU LOVE ME\t1", - "1966\tMANFRED MANN\tPRETTY FLAMINGO\t3", - "1966\tROLLING STONES\tPAINT IT, BLACK\t1", - "1966\tFRANK SINATRA\tSTRANGERS IN THE NIGHT\t3", - "1966\tBEATLES\tPAPERBACK WRITER\t5", - "1966\tKINKS\tSUNNY AFTERNOON\t2", - "1966\tGEORGIE FAME AND THE BLUE FLAMES\tGETAWAY\t1", - "1966\tCHRIS FARLOWE\tOUT OF TIME\t1", - "1966\tTROGGS\tWITH A GIRL LIKE YOU\t2", - "1966\tBEATLES\tYELLOW SUBMARINE/ELEANOR RIGBY\t4", - "1966\tSMALL FACES\tALL OR NOTHING\t1", - "1966\tJIM REEVES\tDISTANT DRUMS\t5", - "1966\tFOUR TOPS\tREACH OUT I'LL BE THERE\t3", - "1966\tBEACH BOYS\tGOOD VIBRATIONS\t2", - "1966\tTOM JONES\tGREEN GREEN GRASS OF HOME\t4", - "1967\tMONKEES\tI'M A BELIEVER\t4", - "1967\tPETULA CLARK\tTHIS IS MY SONG\t2", - "1967\tENGELBERT HUMPERDINCK\tRELEASE ME\t4", - "1967\tNANCY SINATRA AND FRANK SINATRA\tSOMETHIN' STUPID\t2", - "1967\tSANDIE SHAW\tPUPPET ON A STRING\t3", - "1967\tTREMELOES\tSILENCE IS GOLDEN\t3", - "1967\tPROCOL HARUM\tA WHITER SHADE OF PALE\t4", - "1967\tBEATLES\tALL YOU NEED IS LOVE\t7", - "1967\tSCOTT MCKENZIE\tSAN FRANCISCO (BE SURE TO WEAR SOME FLOWERS INYOUR HAIR)\t4", - "1967\tENGELBERT HUMPERDINCK\tTHE LAST WALTZ\t5", - "1967\tBEE GEES\tMASSACHUSETTS (THE LIGHTS WENT OUT IN)\t4", - "1967\tFOUNDATIONS\tBABY NOW THAT I'VE FOUND YOU\t2", - "1967\tLONG JOHN BALDRY\tLET THE HEARTACHES BEGIN\t2", - "1967\tBEATLES\tHELLO GOODBYE\t5", - "1968\tGEORGIE FAME\tTHE BALLAD OF BONNIE AND CLYDE\t1", - "1968\tLOVE AFFAIR\tEVERLASTING LOVE\t2", - "1968\tMANFRED MANN\tMIGHTY QUINN\t2", - "1968\tESTHER AND ABI OFARIM\tCINDERELLA ROCKEFELLA\t3", - "1968\tDAVE DEE, DOZY, BEAKY, MICK AND TICH\tTHE LEGEND OF XANADU\t1", - "1968\tBEATLES\tLADY MADONNA\t2", - "1968\tCLIFF RICHARD\tCONGRATULATIONS\t2", - "1968\tLOUIS ARMSTRONG\tWHAT A WONDERFUL WORLD/CABARET\t4", - "1968\tGARRY PUCKETT AND THE UNION GAP\tYOUNG GIRL\t4", - "1968\tROLLING STONES\tJUMPING JACK FLASH\t2", - "1968\tEQUALS\tBABY COME BACK\t3", "1968\tDES O'CONNOR\tI PRETEND\t1", - "1968\tTOMMY JAMES AND THE SHONDELLS\tMONY MONY\t2", - "1968\tCRAZY WORLD OF ARTHUR BROWN\tFIRE!\t1", - "1968\tTOMMY JAMES AND THE SHONDELLS\tMONY MONY\t1", - "1968\tBEACH BOYS\tDO IT AGAIN\t1", - "1968\tBEE GEES\tI'VE GOTTA GET A MESSAGE TO YOU\t1", - "1968\tBEATLES\tHEY JUDE\t8", - "1968\tMARY HOPKIN\tTHOSE WERE THE DAYS\t6", - "1968\tJOE COCKER\tWITH A LITTLE HELP FROM MY FRIENDS\t1", - "1968\tHUGO MONTENEGRO\tTHE GOOD THE BAD AND THE UGLY\t4", - "1968\tSCAFFOLD\tLILY THE PINK\t3", - "1969\tMARMALADE\tOB-LA-DI, OB-LA-DA\t1", - "1969\tSCAFFOLD\tLILY THE PINK\t1", - "1969\tMARMALADE\tOB-LA-DI, OB-LA-DA\t2", - "1969\tFLEETWOOD MAC\tALBATROSS\t1", "1969\tMOVE\tBLACKBERRY WAY\t1", - "1969\tAMEN CORNER\t(IF PARADISE IS) HALF AS NICE\t2", - "1969\tPETER SARSTEDT\tWHERE DO YOU GO TO (MY LOVELY)\t4", - "1969\tMARVIN GAYE\tI HEARD IT THROUGH THE GRAPEVINE\t3", - "1969\tDESMOND DEKKER AND THE ACES\tTHE ISRAELITES\t1", - "1969\tBEATLES\tGET BACK\t6", "1969\tTOMMY ROE\tDIZZY\t1", - "1969\tBEATLES\tTHE BALLAD OF JOHN AND YOKO\t3", - "1969\tTHUNDERCLAP NEWMAN\tSOMETHING IN THE AIR\t3", - "1969\tROLLING STONES\tHONKY TONK WOMEN\t5", - "1969\tZAGER AND EVANS\tIN THE YEAR 2525 (EXORDIUM AND TERMINUS)\t3", - "1969\tCREEDENCE CLEARWATER REVIVAL\tBAD MOON RISING\t3", - "1969\tJANE BIRKIN AND SERGE GAINSBOURG\tJE T'AIME... MOI NON PLUS\t1", - "1969\tBOBBIE GENTRY\tI'LL NEVER FALL IN LOVE AGAIN\t1", - "1969\tARCHIES\tSUGAR, SUGAR\t4" }; + "1966\tSPENCER DAVIS GROUP\tKEEP ON RUNNING\t1", + "1966\tOVERLANDERS\tMICHELLE\t3", + "1966\tNANCY SINATRA\tTHESE BOOTS ARE MADE FOR WALKIN'\t4", + "1966\tWALKER BROTHERS\tTHE SUN AIN'T GONNA SHINE ANYMORE\t4", + "1966\tSPENCER DAVIS GROUP\tSOMEBODY HELP ME\t2", + "1966\tDUSTY SPRINGFIELD\tYOU DON'T HAVE TO SAY YOU LOVE ME\t1", + "1966\tMANFRED MANN\tPRETTY FLAMINGO\t3", + "1966\tROLLING STONES\tPAINT IT, BLACK\t1", + "1966\tFRANK SINATRA\tSTRANGERS IN THE NIGHT\t3", + "1966\tBEATLES\tPAPERBACK WRITER\t5", + "1966\tKINKS\tSUNNY AFTERNOON\t2", + "1966\tGEORGIE FAME AND THE BLUE FLAMES\tGETAWAY\t1", + "1966\tCHRIS FARLOWE\tOUT OF TIME\t1", + "1966\tTROGGS\tWITH A GIRL LIKE YOU\t2", + "1966\tBEATLES\tYELLOW SUBMARINE/ELEANOR RIGBY\t4", + "1966\tSMALL FACES\tALL OR NOTHING\t1", + "1966\tJIM REEVES\tDISTANT DRUMS\t5", + "1966\tFOUR TOPS\tREACH OUT I'LL BE THERE\t3", + "1966\tBEACH BOYS\tGOOD VIBRATIONS\t2", + "1966\tTOM JONES\tGREEN GREEN GRASS OF HOME\t4", + "1967\tMONKEES\tI'M A BELIEVER\t4", + "1967\tPETULA CLARK\tTHIS IS MY SONG\t2", + "1967\tENGELBERT HUMPERDINCK\tRELEASE ME\t4", + "1967\tNANCY SINATRA AND FRANK SINATRA\tSOMETHIN' STUPID\t2", + "1967\tSANDIE SHAW\tPUPPET ON A STRING\t3", + "1967\tTREMELOES\tSILENCE IS GOLDEN\t3", + "1967\tPROCOL HARUM\tA WHITER SHADE OF PALE\t4", + "1967\tBEATLES\tALL YOU NEED IS LOVE\t7", + "1967\tSCOTT MCKENZIE\tSAN FRANCISCO (BE SURE TO WEAR SOME FLOWERS INYOUR HAIR)\t4", + "1967\tENGELBERT HUMPERDINCK\tTHE LAST WALTZ\t5", + "1967\tBEE GEES\tMASSACHUSETTS (THE LIGHTS WENT OUT IN)\t4", + "1967\tFOUNDATIONS\tBABY NOW THAT I'VE FOUND YOU\t2", + "1967\tLONG JOHN BALDRY\tLET THE HEARTACHES BEGIN\t2", + "1967\tBEATLES\tHELLO GOODBYE\t5", + "1968\tGEORGIE FAME\tTHE BALLAD OF BONNIE AND CLYDE\t1", + "1968\tLOVE AFFAIR\tEVERLASTING LOVE\t2", + "1968\tMANFRED MANN\tMIGHTY QUINN\t2", + "1968\tESTHER AND ABI OFARIM\tCINDERELLA ROCKEFELLA\t3", + "1968\tDAVE DEE, DOZY, BEAKY, MICK AND TICH\tTHE LEGEND OF XANADU\t1", + "1968\tBEATLES\tLADY MADONNA\t2", + "1968\tCLIFF RICHARD\tCONGRATULATIONS\t2", + "1968\tLOUIS ARMSTRONG\tWHAT A WONDERFUL WORLD/CABARET\t4", + "1968\tGARRY PUCKETT AND THE UNION GAP\tYOUNG GIRL\t4", + "1968\tROLLING STONES\tJUMPING JACK FLASH\t2", + "1968\tEQUALS\tBABY COME BACK\t3", + "1968\tDES O'CONNOR\tI PRETEND\t1", + "1968\tTOMMY JAMES AND THE SHONDELLS\tMONY MONY\t2", + "1968\tCRAZY WORLD OF ARTHUR BROWN\tFIRE!\t1", + "1968\tTOMMY JAMES AND THE SHONDELLS\tMONY MONY\t1", + "1968\tBEACH BOYS\tDO IT AGAIN\t1", + "1968\tBEE GEES\tI'VE GOTTA GET A MESSAGE TO YOU\t1", + "1968\tBEATLES\tHEY JUDE\t8", + "1968\tMARY HOPKIN\tTHOSE WERE THE DAYS\t6", + "1968\tJOE COCKER\tWITH A LITTLE HELP FROM MY FRIENDS\t1", + "1968\tHUGO MONTENEGRO\tTHE GOOD THE BAD AND THE UGLY\t4", + "1968\tSCAFFOLD\tLILY THE PINK\t3", + "1969\tMARMALADE\tOB-LA-DI, OB-LA-DA\t1", + "1969\tSCAFFOLD\tLILY THE PINK\t1", + "1969\tMARMALADE\tOB-LA-DI, OB-LA-DA\t2", + "1969\tFLEETWOOD MAC\tALBATROSS\t1", + "1969\tMOVE\tBLACKBERRY WAY\t1", + "1969\tAMEN CORNER\t(IF PARADISE IS) HALF AS NICE\t2", + "1969\tPETER SARSTEDT\tWHERE DO YOU GO TO (MY LOVELY)\t4", + "1969\tMARVIN GAYE\tI HEARD IT THROUGH THE GRAPEVINE\t3", + "1969\tDESMOND DEKKER AND THE ACES\tTHE ISRAELITES\t1", + "1969\tBEATLES\tGET BACK\t6", + "1969\tTOMMY ROE\tDIZZY\t1", + "1969\tBEATLES\tTHE BALLAD OF JOHN AND YOKO\t3", + "1969\tTHUNDERCLAP NEWMAN\tSOMETHING IN THE AIR\t3", + "1969\tROLLING STONES\tHONKY TONK WOMEN\t5", + "1969\tZAGER AND EVANS\tIN THE YEAR 2525 (EXORDIUM AND TERMINUS)\t3", + "1969\tCREEDENCE CLEARWATER REVIVAL\tBAD MOON RISING\t3", + "1969\tJANE BIRKIN AND SERGE GAINSBOURG\tJE T'AIME... MOI NON PLUS\t1", + "1969\tBOBBIE GENTRY\tI'LL NEVER FALL IN LOVE AGAIN\t1", + "1969\tARCHIES\tSUGAR, SUGAR\t4" + }; private static final Map parsedRecords = new HashMap(); private Directory dir; @@ -322,8 +331,7 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { float weeks; String id; - public Record(String id, String year, String artist, String song, - float weeks) { + public Record(String id, String year, String artist, String song, float weeks) { super(); this.id = id; this.year = year; @@ -334,14 +342,22 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { @Override public String toString() { - return "Record [id=" + id + ", artist=" + artist + ", weeks=" + weeks - + ", year=" + year + ", song=" + song + "]"; + return "Record [id=" + + id + + ", artist=" + + artist + + ", weeks=" + + weeks + + ", year=" + + year + + ", song=" + + song + + "]"; } - } - private DiversifiedTopDocsCollector doDiversifiedSearch(int numResults, - int maxResultsPerArtist) throws IOException { + private DiversifiedTopDocsCollector doDiversifiedSearch(int numResults, int maxResultsPerArtist) + throws IOException { // Alternate between implementations used for key lookups if (random().nextBoolean()) { // Faster key lookup but with potential for collisions on larger datasets @@ -352,32 +368,28 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { } } - private DiversifiedTopDocsCollector doFuzzyDiversifiedSearch(int numResults, - int maxResultsPerArtist) throws IOException { - DiversifiedTopDocsCollector tdc = new HashedDocValuesDiversifiedCollector( - numResults, maxResultsPerArtist, "artist"); + private DiversifiedTopDocsCollector doFuzzyDiversifiedSearch( + int numResults, int maxResultsPerArtist) throws IOException { + DiversifiedTopDocsCollector tdc = + new HashedDocValuesDiversifiedCollector(numResults, maxResultsPerArtist, "artist"); searcher.search(getTestQuery(), tdc); return tdc; } private DiversifiedTopDocsCollector doAccurateDiversifiedSearch( int numResults, int maxResultsPerArtist) throws IOException { - DiversifiedTopDocsCollector tdc = new DocValuesDiversifiedCollector( - numResults, maxResultsPerArtist, artistDocValues); + DiversifiedTopDocsCollector tdc = + new DocValuesDiversifiedCollector(numResults, maxResultsPerArtist, artistDocValues); searcher.search(getTestQuery(), tdc); return tdc; } private Query getTestQuery() { BooleanQuery.Builder testQuery = new BooleanQuery.Builder(); - testQuery.add(new BooleanClause(new TermQuery(new Term("year", "1966")), - Occur.SHOULD)); - testQuery.add(new BooleanClause(new TermQuery(new Term("year", "1967")), - Occur.SHOULD)); - testQuery.add(new BooleanClause(new TermQuery(new Term("year", "1968")), - Occur.SHOULD)); - testQuery.add(new BooleanClause(new TermQuery(new Term("year", "1969")), - Occur.SHOULD)); + testQuery.add(new BooleanClause(new TermQuery(new Term("year", "1966")), Occur.SHOULD)); + testQuery.add(new BooleanClause(new TermQuery(new Term("year", "1967")), Occur.SHOULD)); + testQuery.add(new BooleanClause(new TermQuery(new Term("year", "1968")), Occur.SHOULD)); + testQuery.add(new BooleanClause(new TermQuery(new Term("year", "1969")), Occur.SHOULD)); return new DocValueScoreQuery(testQuery.build(), "weeksAtNumberOne"); } @@ -391,10 +403,8 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { Document doc = new Document(); Field yearField = newTextField("year", "", Field.Store.NO); - SortedDocValuesField artistField = new SortedDocValuesField("artist", - new BytesRef("")); - Field weeksAtNumberOneField = new FloatDocValuesField("weeksAtNumberOne", - 0.0F); + SortedDocValuesField artistField = new SortedDocValuesField("artist", new BytesRef("")); + Field weeksAtNumberOneField = new FloatDocValuesField("weeksAtNumberOne", 0.0F); Field weeksStoredField = new StoredField("weeks", 0.0F); Field idField = newStringField("id", "", Field.Store.YES); Field songField = newTextField("song", "", Field.Store.NO); @@ -411,8 +421,8 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { parsedRecords.clear(); for (int i = 0; i < hitsOfThe60s.length; i++) { String cols[] = hitsOfThe60s[i].split("\t"); - Record record = new Record(String.valueOf(i), cols[0], cols[1], cols[2], - Float.parseFloat(cols[3])); + Record record = + new Record(String.valueOf(i), cols[0], cols[1], cols[2], Float.parseFloat(cols[3])); parsedRecords.put(record.id, record); idField.setStringValue(record.id); yearField.setStringValue(record.year); @@ -462,12 +472,12 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { private final Query query; private final String scoreField; - + DocValueScoreQuery(Query query, String scoreField) { this.query = query; this.scoreField = scoreField; } - + @Override public String toString(String field) { return "DocValueScore(" + query.toString(field) + ")"; @@ -504,24 +514,25 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { if (scoreMode.needsScores() == false) { return query.createWeight(searcher, scoreMode, boost); } Weight inner = query.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost); return new Weight(this) { - + @Override public boolean isCacheable(LeafReaderContext ctx) { return true; } - + @Override public Scorer scorer(LeafReaderContext context) throws IOException { Scorer innerScorer = inner.scorer(context); NumericDocValues scoreFactors = DocValues.getNumeric(context.reader(), scoreField); return new Scorer(this) { - + @Override public float score() throws IOException { if (scoreFactors.advanceExact(docID())) { @@ -529,17 +540,17 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { } return 0; } - + @Override public float getMaxScore(int upTo) throws IOException { return Float.POSITIVE_INFINITY; } - + @Override public DocIdSetIterator iterator() { return innerScorer.iterator(); } - + @Override public int docID() { return innerScorer.docID(); @@ -561,5 +572,4 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase { }; } } - } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/search/TestDocValuesStatsCollector.java b/lucene/misc/src/test/org/apache/lucene/misc/search/TestDocValuesStatsCollector.java index e88823a1443..ab021c251e1 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/search/TestDocValuesStatsCollector.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/search/TestDocValuesStatsCollector.java @@ -25,7 +25,6 @@ import java.util.function.Predicate; import java.util.stream.DoubleStream; import java.util.stream.LongStream; import java.util.stream.Stream; - import org.apache.lucene.document.Document; import org.apache.lucene.document.DoubleDocValuesField; import org.apache.lucene.document.Field.Store; @@ -244,13 +243,16 @@ public class TestDocValuesStatsCollector extends LuceneTestCase { int numDocsWithoutField = (int) isNull(docValues).count(); assertEquals(computeExpMissing(numDocsWithoutField, numDocs, reader), stats.missing()); if (stats.count() > 0) { - LongSummaryStatistics sumStats = filterAndFlatValues(docValues, (v) -> v != null).summaryStatistics(); + LongSummaryStatistics sumStats = + filterAndFlatValues(docValues, (v) -> v != null).summaryStatistics(); assertEquals(sumStats.getMax(), stats.max().longValue()); assertEquals(sumStats.getMin(), stats.min().longValue()); assertEquals(sumStats.getAverage(), stats.mean(), 0.00001); assertEquals(sumStats.getSum(), stats.sum().longValue()); assertEquals(sumStats.getCount(), stats.valuesCount()); - double variance = computeVariance(filterAndFlatValues(docValues, (v) -> v != null), stats.mean, stats.count()); + double variance = + computeVariance( + filterAndFlatValues(docValues, (v) -> v != null), stats.mean, stats.count()); assertEquals(variance, stats.variance(), 0.00001); assertEquals(Math.sqrt(variance), stats.stdev(), 0.00001); } @@ -299,13 +301,16 @@ public class TestDocValuesStatsCollector extends LuceneTestCase { int numDocsWithoutField = (int) isNull(docValues).count(); assertEquals(computeExpMissing(numDocsWithoutField, numDocs, reader), stats.missing()); if (stats.count() > 0) { - DoubleSummaryStatistics sumStats = filterAndFlatValues(docValues, (v) -> v != null).summaryStatistics(); + DoubleSummaryStatistics sumStats = + filterAndFlatValues(docValues, (v) -> v != null).summaryStatistics(); assertEquals(sumStats.getMax(), stats.max().longValue(), 0.00001); assertEquals(sumStats.getMin(), stats.min().longValue(), 0.00001); assertEquals(sumStats.getAverage(), stats.mean(), 0.00001); assertEquals(sumStats.getSum(), stats.sum().doubleValue(), 0.00001); assertEquals(sumStats.getCount(), stats.valuesCount()); - double variance = computeVariance(filterAndFlatValues(docValues, (v) -> v != null), stats.mean, stats.count()); + double variance = + computeVariance( + filterAndFlatValues(docValues, (v) -> v != null), stats.mean, stats.count()); assertEquals(variance, stats.variance(), 0.00001); assertEquals(Math.sqrt(variance), stats.stdev(), 0.00001); } @@ -392,15 +397,21 @@ public class TestDocValuesStatsCollector extends LuceneTestCase { IndexSearcher searcher = new IndexSearcher(reader); SortedSetDocValuesStats stats = new SortedSetDocValuesStats(field); TotalHitCountCollector totalHitCount = new TotalHitCountCollector(); - searcher.search(new MatchAllDocsQuery(), MultiCollector.wrap(totalHitCount, new DocValuesStatsCollector(stats))); + searcher.search( + new MatchAllDocsQuery(), + MultiCollector.wrap(totalHitCount, new DocValuesStatsCollector(stats))); int expCount = (int) nonNull(docValues).count(); assertEquals(expCount, stats.count()); int numDocsWithoutField = (int) isNull(docValues).count(); assertEquals(computeExpMissing(numDocsWithoutField, numDocs, reader), stats.missing()); if (stats.count() > 0) { - assertEquals(nonNull(docValues).flatMap(Arrays::stream).min(BytesRef::compareTo).get(), stats.min()); - assertEquals(nonNull(docValues).flatMap(Arrays::stream).max(BytesRef::compareTo).get(), stats.max()); + assertEquals( + nonNull(docValues).flatMap(Arrays::stream).min(BytesRef::compareTo).get(), + stats.min()); + assertEquals( + nonNull(docValues).flatMap(Arrays::stream).max(BytesRef::compareTo).get(), + stats.max()); } } } @@ -423,27 +434,28 @@ public class TestDocValuesStatsCollector extends LuceneTestCase { } private static double computeVariance(long[] values, double mean, int count) { - return getPositiveValues(values).mapToDouble(v -> (v - mean) * (v-mean)).sum() / count; + return getPositiveValues(values).mapToDouble(v -> (v - mean) * (v - mean)).sum() / count; } private static double computeVariance(double[] values, double mean, int count) { - return getPositiveValues(values).map(v -> (v - mean) * (v-mean)).sum() / count; + return getPositiveValues(values).map(v -> (v - mean) * (v - mean)).sum() / count; } private static LongStream filterAndFlatValues(long[][] values, Predicate p) { return nonNull(values).flatMapToLong(Arrays::stream); } - private static DoubleStream filterAndFlatValues(double[][] values, Predicate p) { + private static DoubleStream filterAndFlatValues( + double[][] values, Predicate p) { return nonNull(values).flatMapToDouble(Arrays::stream); } private static double computeVariance(LongStream values, double mean, int count) { - return values.mapToDouble(v -> (v - mean) * (v-mean)).sum() / count; + return values.mapToDouble(v -> (v - mean) * (v - mean)).sum() / count; } private static double computeVariance(DoubleStream values, double mean, int count) { - return values.map(v -> (v - mean) * (v-mean)).sum() / count; + return values.map(v -> (v - mean) * (v - mean)).sum() / count; } private static Stream nonNull(T[] values) { @@ -458,9 +470,12 @@ public class TestDocValuesStatsCollector extends LuceneTestCase { return Arrays.stream(values).filter(p); } - private static int computeExpMissing(int numDocsWithoutField, int numIndexedDocs, IndexReader reader) { - // The number of missing documents equals the number of docs without the field (not indexed with it, or were - // deleted). However, in case we deleted all documents in a segment before the reader was opened, there will be + private static int computeExpMissing( + int numDocsWithoutField, int numIndexedDocs, IndexReader reader) { + // The number of missing documents equals the number of docs without the field (not indexed with + // it, or were + // deleted). However, in case we deleted all documents in a segment before the reader was + // opened, there will be // a mismatch between numDocs (how many we indexed) to reader.maxDoc(), so compensate for that. return numDocsWithoutField - reader.numDeletedDocs() - (numIndexedDocs - reader.maxDoc()); } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/search/TestMemoryAccountingBitsetCollector.java b/lucene/misc/src/test/org/apache/lucene/misc/search/TestMemoryAccountingBitsetCollector.java index 9a5c4cdfcbb..2329525731e 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/search/TestMemoryAccountingBitsetCollector.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/search/TestMemoryAccountingBitsetCollector.java @@ -22,13 +22,13 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.misc.CollectorMemoryTracker; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.misc.CollectorMemoryTracker; import org.apache.lucene.util.LuceneTestCase; public class TestMemoryAccountingBitsetCollector extends LuceneTestCase { @@ -61,13 +61,17 @@ public class TestMemoryAccountingBitsetCollector extends LuceneTestCase { public void testMemoryAccountingBitsetCollectorMemoryLimit() { long perCollectorMemoryLimit = 150; - CollectorMemoryTracker tracker = new CollectorMemoryTracker("testMemoryTracker", perCollectorMemoryLimit); + CollectorMemoryTracker tracker = + new CollectorMemoryTracker("testMemoryTracker", perCollectorMemoryLimit); MemoryAccountingBitsetCollector bitSetCollector = new MemoryAccountingBitsetCollector(tracker); TotalHitCountCollector hitCountCollector = new TotalHitCountCollector(); IndexSearcher searcher = new IndexSearcher(reader); - expectThrows(IllegalStateException.class, () -> { - searcher.search(new MatchAllDocsQuery(), MultiCollector.wrap(hitCountCollector, bitSetCollector)); - }); + expectThrows( + IllegalStateException.class, + () -> { + searcher.search( + new MatchAllDocsQuery(), MultiCollector.wrap(hitCountCollector, bitSetCollector)); + }); } } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/search/similarity/TestLegacyBM25Similarity.java b/lucene/misc/src/test/org/apache/lucene/misc/search/similarity/TestLegacyBM25Similarity.java index 9552741754c..f596c73e1bc 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/search/similarity/TestLegacyBM25Similarity.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/search/similarity/TestLegacyBM25Similarity.java @@ -18,7 +18,6 @@ package org.apache.lucene.misc.search.similarity; import java.util.Random; - import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BaseSimilarityTestCase; import org.apache.lucene.search.similarities.Similarity; @@ -27,41 +26,62 @@ import org.apache.lucene.search.similarities.Similarity; public class TestLegacyBM25Similarity extends BaseSimilarityTestCase { public void testIllegalK1() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - new LegacyBM25Similarity(Float.POSITIVE_INFINITY, 0.75f); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new LegacyBM25Similarity(Float.POSITIVE_INFINITY, 0.75f); + }); assertTrue(expected.getMessage().contains("illegal k1 value")); - expected = expectThrows(IllegalArgumentException.class, () -> { - new LegacyBM25Similarity(-1, 0.75f); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new LegacyBM25Similarity(-1, 0.75f); + }); assertTrue(expected.getMessage().contains("illegal k1 value")); - expected = expectThrows(IllegalArgumentException.class, () -> { - new LegacyBM25Similarity(Float.NaN, 0.75f); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new LegacyBM25Similarity(Float.NaN, 0.75f); + }); assertTrue(expected.getMessage().contains("illegal k1 value")); } public void testIllegalB() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - new LegacyBM25Similarity(1.2f, 2f); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new LegacyBM25Similarity(1.2f, 2f); + }); assertTrue(expected.getMessage().contains("illegal b value")); - expected = expectThrows(IllegalArgumentException.class, () -> { - new LegacyBM25Similarity(1.2f, -1f); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new LegacyBM25Similarity(1.2f, -1f); + }); assertTrue(expected.getMessage().contains("illegal b value")); - expected = expectThrows(IllegalArgumentException.class, () -> { - new LegacyBM25Similarity(1.2f, Float.POSITIVE_INFINITY); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new LegacyBM25Similarity(1.2f, Float.POSITIVE_INFINITY); + }); assertTrue(expected.getMessage().contains("illegal b value")); - expected = expectThrows(IllegalArgumentException.class, () -> { - new LegacyBM25Similarity(1.2f, Float.NaN); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new LegacyBM25Similarity(1.2f, Float.NaN); + }); assertTrue(expected.getMessage().contains("illegal b value")); } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/store/NativeLibEnableRule.java b/lucene/misc/src/test/org/apache/lucene/misc/store/NativeLibEnableRule.java index 33c4b573aed..5586b4f5665 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/store/NativeLibEnableRule.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/store/NativeLibEnableRule.java @@ -17,11 +17,10 @@ package org.apache.lucene.misc.store; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; +import java.util.Set; import org.apache.lucene.util.Constants; import org.junit.Assume; -import java.util.Set; - public class NativeLibEnableRule extends TestRuleAdapter { enum OperatingSystem { LINUX(Constants.LINUX), @@ -45,10 +44,12 @@ public class NativeLibEnableRule extends TestRuleAdapter { @Override protected void before() { - Assume.assumeTrue("Test ignored (tests.native is false)", + Assume.assumeTrue( + "Test ignored (tests.native is false)", Boolean.parseBoolean(System.getProperty("tests.native", "false"))); - Assume.assumeTrue("Test ignored, only applies to architectures: " + runOn, + Assume.assumeTrue( + "Test ignored, only applies to architectures: " + runOn, runOn.stream().anyMatch(os -> os.enabled)); } } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/store/NativeUnixDirectoryTest.java b/lucene/misc/src/test/org/apache/lucene/misc/store/NativeUnixDirectoryTest.java index d275950aedd..b1bda893ad0 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/store/NativeUnixDirectoryTest.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/store/NativeUnixDirectoryTest.java @@ -18,6 +18,8 @@ package org.apache.lucene.misc.store; import com.carrotsearch.randomizedtesting.LifecycleScope; import com.carrotsearch.randomizedtesting.RandomizedTest; +import java.io.IOException; +import java.util.EnumSet; import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -26,21 +28,21 @@ import org.apache.lucene.util.LuceneTestCase; import org.junit.Rule; import org.junit.rules.TestRule; -import java.io.IOException; -import java.util.EnumSet; - public class NativeUnixDirectoryTest extends LuceneTestCase { @Rule - public static TestRule requiresNative = new NativeLibEnableRule( - EnumSet.of(NativeLibEnableRule.OperatingSystem.MAC, - NativeLibEnableRule.OperatingSystem.FREE_BSD, - NativeLibEnableRule.OperatingSystem.LINUX)); + public static TestRule requiresNative = + new NativeLibEnableRule( + EnumSet.of( + NativeLibEnableRule.OperatingSystem.MAC, + NativeLibEnableRule.OperatingSystem.FREE_BSD, + NativeLibEnableRule.OperatingSystem.LINUX)); public void testLibraryLoaded() throws IOException { try (ByteBuffersDirectory ramDir = new ByteBuffersDirectory(); - Directory dir = new NativeUnixDirectory(RandomizedTest.newTempDir(LifecycleScope.TEST), ramDir)) { + Directory dir = + new NativeUnixDirectory(RandomizedTest.newTempDir(LifecycleScope.TEST), ramDir)) { MergeInfo mergeInfo = new MergeInfo(1000, Integer.MAX_VALUE, true, 1); dir.createOutput("test", new IOContext(mergeInfo)).close(); } } -} \ No newline at end of file +} diff --git a/lucene/misc/src/test/org/apache/lucene/misc/store/TestHardLinkCopyDirectoryWrapper.java b/lucene/misc/src/test/org/apache/lucene/misc/store/TestHardLinkCopyDirectoryWrapper.java index 555029318ee..2ab154877fe 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/store/TestHardLinkCopyDirectoryWrapper.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/store/TestHardLinkCopyDirectoryWrapper.java @@ -24,9 +24,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; import java.util.Collections; - import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.misc.store.HardlinkCopyDirectoryWrapper; import org.apache.lucene.mockfile.FilterPath; import org.apache.lucene.mockfile.WindowsFS; import org.apache.lucene.store.BaseDirectoryTestCase; @@ -41,7 +39,8 @@ import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; -// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows machines occasionally +// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows +// machines occasionally public class TestHardLinkCopyDirectoryWrapper extends BaseDirectoryTestCase { @Override @@ -55,9 +54,7 @@ public class TestHardLinkCopyDirectoryWrapper extends BaseDirectoryTestCase { return new HardlinkCopyDirectoryWrapper(open); } - /** - * Tests that we use hardlinks if possible on Directory#copyFrom - */ + /** Tests that we use hardlinks if possible on Directory#copyFrom */ public void testCopyHardLinks() throws IOException { Path tempDir = createTempDir(); Path dir_1 = tempDir.resolve("dir_1"); @@ -77,10 +74,13 @@ public class TestHardLinkCopyDirectoryWrapper extends BaseDirectoryTestCase { luceneDir_1.sync(Collections.singleton("foo.bar")); try { Files.createLink(tempDir.resolve("test"), dir_1.resolve("foo.bar")); - BasicFileAttributes destAttr = Files.readAttributes(tempDir.resolve("test"), BasicFileAttributes.class); - BasicFileAttributes sourceAttr = Files.readAttributes(dir_1.resolve("foo.bar"), BasicFileAttributes.class); - assumeTrue("hardlinks are not supported", destAttr.fileKey() != null - && destAttr.fileKey().equals(sourceAttr.fileKey())); + BasicFileAttributes destAttr = + Files.readAttributes(tempDir.resolve("test"), BasicFileAttributes.class); + BasicFileAttributes sourceAttr = + Files.readAttributes(dir_1.resolve("foo.bar"), BasicFileAttributes.class); + assumeTrue( + "hardlinks are not supported", + destAttr.fileKey() != null && destAttr.fileKey().equals(sourceAttr.fileKey())); } catch (UnsupportedOperationException ex) { assumeFalse("hardlinks are not supported", true); } @@ -88,10 +88,13 @@ public class TestHardLinkCopyDirectoryWrapper extends BaseDirectoryTestCase { HardlinkCopyDirectoryWrapper wrapper = new HardlinkCopyDirectoryWrapper(luceneDir_2); wrapper.copyFrom(luceneDir_1, "foo.bar", "bar.foo", IOContext.DEFAULT); assertTrue(Files.exists(dir_2.resolve("bar.foo"))); - BasicFileAttributes destAttr = Files.readAttributes(dir_2.resolve("bar.foo"), BasicFileAttributes.class); - BasicFileAttributes sourceAttr = Files.readAttributes(dir_1.resolve("foo.bar"), BasicFileAttributes.class); + BasicFileAttributes destAttr = + Files.readAttributes(dir_2.resolve("bar.foo"), BasicFileAttributes.class); + BasicFileAttributes sourceAttr = + Files.readAttributes(dir_1.resolve("foo.bar"), BasicFileAttributes.class); assertEquals(destAttr.fileKey(), sourceAttr.fileKey()); - try (ChecksumIndexInput indexInput = wrapper.openChecksumInput("bar.foo", IOContext.DEFAULT)) { + try (ChecksumIndexInput indexInput = + wrapper.openChecksumInput("bar.foo", IOContext.DEFAULT)) { CodecUtil.checkHeader(indexInput, "foo", 0, 0); assertEquals("hey man, nice shot!", indexInput.readString()); CodecUtil.checkFooter(indexInput); @@ -100,7 +103,6 @@ public class TestHardLinkCopyDirectoryWrapper extends BaseDirectoryTestCase { // close them in a finally block we might run into an assume here IOUtils.close(luceneDir_1, luceneDir_2); } - } public void testRenameWithHardLink() throws Exception { diff --git a/lucene/misc/src/test/org/apache/lucene/misc/store/TestRAFDirectory.java b/lucene/misc/src/test/org/apache/lucene/misc/store/TestRAFDirectory.java index f0ca0ec92d3..2d34f0a5c97 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/store/TestRAFDirectory.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/store/TestRAFDirectory.java @@ -16,16 +16,14 @@ */ package org.apache.lucene.misc.store; +import java.io.IOException; +import java.nio.file.Path; import org.apache.lucene.store.BaseDirectoryTestCase; import org.apache.lucene.store.Directory; -import java.io.IOException; -import java.nio.file.Path; - -/** - * Tests RAFDirectory - */ -// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows machines occasionally +/** Tests RAFDirectory */ +// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows +// machines occasionally public class TestRAFDirectory extends BaseDirectoryTestCase { @Override diff --git a/lucene/misc/src/test/org/apache/lucene/misc/store/WindowsDirectoryTest.java b/lucene/misc/src/test/org/apache/lucene/misc/store/WindowsDirectoryTest.java index 2323c2adb1a..8994e691e2c 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/store/WindowsDirectoryTest.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/store/WindowsDirectoryTest.java @@ -18,23 +18,22 @@ package org.apache.lucene.misc.store; import com.carrotsearch.randomizedtesting.LifecycleScope; import com.carrotsearch.randomizedtesting.RandomizedTest; +import java.io.IOException; +import java.util.EnumSet; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.util.LuceneTestCase; import org.junit.Rule; import org.junit.rules.TestRule; -import java.io.IOException; -import java.util.EnumSet; - public class WindowsDirectoryTest extends LuceneTestCase { @Rule - public static TestRule requiresNative = new NativeLibEnableRule( - EnumSet.of(NativeLibEnableRule.OperatingSystem.WINDOWS)); + public static TestRule requiresNative = + new NativeLibEnableRule(EnumSet.of(NativeLibEnableRule.OperatingSystem.WINDOWS)); public void testLibraryLoaded() throws IOException { try (Directory dir = new WindowsDirectory(RandomizedTest.newTempDir(LifecycleScope.TEST))) { dir.createOutput("test", IOContext.DEFAULT).close(); } } -} \ No newline at end of file +} diff --git a/lucene/misc/src/test/org/apache/lucene/misc/util/TestCollectorMemoryTracker.java b/lucene/misc/src/test/org/apache/lucene/misc/util/TestCollectorMemoryTracker.java index 402ce04aa19..5c39d56e20e 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/util/TestCollectorMemoryTracker.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/util/TestCollectorMemoryTracker.java @@ -22,22 +22,26 @@ import org.apache.lucene.util.LuceneTestCase; public class TestCollectorMemoryTracker extends LuceneTestCase { public void testAdditionsAndDeletions() { - long perCollectorMemoryLimit = 100; //100 Bytes - CollectorMemoryTracker collectorMemoryTracker = new CollectorMemoryTracker("testMemoryTracker", - perCollectorMemoryLimit); + long perCollectorMemoryLimit = 100; // 100 Bytes + CollectorMemoryTracker collectorMemoryTracker = + new CollectorMemoryTracker("testMemoryTracker", perCollectorMemoryLimit); collectorMemoryTracker.updateBytes(50); assertEquals(collectorMemoryTracker.getBytes(), 50); collectorMemoryTracker.updateBytes(-30); assertEquals(collectorMemoryTracker.getBytes(), 20); - expectThrows(IllegalStateException.class, () -> { - collectorMemoryTracker.updateBytes(130); - }); + expectThrows( + IllegalStateException.class, + () -> { + collectorMemoryTracker.updateBytes(130); + }); collectorMemoryTracker.updateBytes(-110); assertEquals(collectorMemoryTracker.getBytes(), 40); - expectThrows(IllegalStateException.class, () -> { - collectorMemoryTracker.updateBytes(-90); - }); + expectThrows( + IllegalStateException.class, + () -> { + collectorMemoryTracker.updateBytes(-90); + }); } } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/util/fst/TestFSTsMisc.java b/lucene/misc/src/test/org/apache/lucene/misc/util/fst/TestFSTsMisc.java index ba1944d96d1..540ce259488 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/util/fst/TestFSTsMisc.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/util/fst/TestFSTsMisc.java @@ -23,14 +23,13 @@ import java.util.HashSet; import java.util.List; import java.util.Random; import java.util.Set; - +import org.apache.lucene.misc.util.fst.UpToTwoPositiveIntOutputs.TwoLongs; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.IntsRefBuilder; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -import org.apache.lucene.misc.util.fst.UpToTwoPositiveIntOutputs.TwoLongs; import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.FSTTester; @@ -56,20 +55,20 @@ public class TestFSTsMisc extends LuceneTestCase { public void testRandomWords() throws IOException { testRandomWords(1000, LuceneTestCase.atLeast(random(), 2)); - //testRandomWords(100, 1); + // testRandomWords(100, 1); } private void testRandomWords(int maxNumWords, int numIter) throws IOException { Random random = new Random(random().nextLong()); - for(int iter=0;iter termsSet = new HashSet<>(); IntsRef[] terms = new IntsRef[numWords]; - while(termsSet.size() < numWords) { + while (termsSet.size() < numWords) { final String term = FSTTester.getRandomString(random); termsSet.add(FSTTester.toIntsRef(term, inputMode)); } @@ -90,16 +89,16 @@ public class TestFSTsMisc extends LuceneTestCase { final UpToTwoPositiveIntOutputs outputs = UpToTwoPositiveIntOutputs.getSingleton(true); final List> pairs = new ArrayList<>(terms.length); long lastOutput = 0; - for(int idx=0;idx values = new ArrayList<>(); @@ -136,14 +135,14 @@ public class TestFSTsMisc extends LuceneTestCase { final ListOfOutputs outputs = new ListOfOutputs<>(_outputs); final List> pairs = new ArrayList<>(terms.length); long lastOutput = 0; - for(int idx=0;idx values = new ArrayList<>(); - for(int i=0;i fst = fstCompiler.compile(); Object output = Util.get(fst, new BytesRef("")); @@ -234,5 +233,3 @@ public class TestFSTsMisc extends LuceneTestCase { assertEquals(0L, outputList.get(0).longValue()); } } - - diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/CandidateMatcher.java b/lucene/monitor/src/java/org/apache/lucene/monitor/CandidateMatcher.java index 39b8ad598c5..711aff372f2 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/CandidateMatcher.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/CandidateMatcher.java @@ -23,19 +23,13 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; - import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -/** - * Class used to match candidate queries selected by a Presearcher from a Monitor - * query index. - */ +/** Class used to match candidate queries selected by a Presearcher from a Monitor query index. */ public abstract class CandidateMatcher { - /** - * The searcher to run candidate queries against - */ + /** The searcher to run candidate queries against */ protected final IndexSearcher searcher; private final Map errors = new HashMap<>(); @@ -62,15 +56,16 @@ public abstract class CandidateMatcher { } /** - * Runs the supplied query against this CandidateMatcher's set of documents, storing any - * resulting match, and recording the query in the presearcher hits + * Runs the supplied query against this CandidateMatcher's set of documents, storing any resulting + * match, and recording the query in the presearcher hits * - * @param queryId the query id + * @param queryId the query id * @param matchQuery the query to run - * @param metadata the query metadata + * @param metadata the query metadata * @throws IOException on IO errors */ - protected abstract void matchQuery(String queryId, Query matchQuery, Map metadata) throws IOException; + protected abstract void matchQuery(String queryId, Query matchQuery, Map metadata) + throws IOException; /** * Record a match @@ -79,12 +74,14 @@ public abstract class CandidateMatcher { */ protected final void addMatch(T match, int doc) { MatchHolder docMatches = matches.get(doc); - docMatches.matches.compute(match.getQueryId(), (key, oldValue) -> { - if (oldValue != null) { - return resolve(match, oldValue); - } - return match; - }); + docMatches.matches.compute( + match.getQueryId(), + (key, oldValue) -> { + if (oldValue != null) { + return resolve(match, oldValue); + } + return match; + }); } /** @@ -97,37 +94,30 @@ public abstract class CandidateMatcher { */ public abstract T resolve(T match1, T match2); - /** - * Called by the Monitor if running a query throws an Exception - */ + /** Called by the Monitor if running a query throws an Exception */ void reportError(String queryId, Exception e) { this.errors.put(queryId, e); } - /** - * @return the matches from this matcher - */ + /** @return the matches from this matcher */ final MultiMatchingQueries finish(long buildTime, int queryCount) { doFinish(); - this.searchTime = TimeUnit.MILLISECONDS.convert(System.nanoTime() - searchTime, TimeUnit.NANOSECONDS); + this.searchTime = + TimeUnit.MILLISECONDS.convert(System.nanoTime() - searchTime, TimeUnit.NANOSECONDS); List> results = new ArrayList<>(); for (MatchHolder matchHolder : matches) { results.add(matchHolder.matches); } - return new MultiMatchingQueries<>(results, errors, buildTime, searchTime, queryCount, matches.size()); + return new MultiMatchingQueries<>( + results, errors, buildTime, searchTime, queryCount, matches.size()); } - /** - * Called when all monitoring of a batch of documents is complete - */ - protected void doFinish() { } + /** Called when all monitoring of a batch of documents is complete */ + protected void doFinish() {} - /** - * Copy all matches from another CandidateMatcher - */ + /** Copy all matches from another CandidateMatcher */ protected void copyMatches(CandidateMatcher other) { this.matches.clear(); this.matches.addAll(other.matches); } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/CollectingMatcher.java b/lucene/monitor/src/java/org/apache/lucene/monitor/CollectingMatcher.java index 40a1957e488..02d8e3b6ba2 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/CollectingMatcher.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/CollectingMatcher.java @@ -19,7 +19,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.Map; - import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorable; @@ -36,7 +35,8 @@ abstract class CollectingMatcher extends CandidateMatcher< } @Override - protected void matchQuery(final String queryId, Query matchQuery, Map metadata) throws IOException { + protected void matchQuery(final String queryId, Query matchQuery, Map metadata) + throws IOException { searcher.search(matchQuery, new MatchCollector(queryId, scoreMode)); } @@ -44,8 +44,8 @@ abstract class CollectingMatcher extends CandidateMatcher< * Called when a query matches a Document * * @param queryId the query ID - * @param doc the index of the document in the DocumentBatch - * @param scorer the Scorer for this query + * @param doc the index of the document in the DocumentBatch + * @param scorer the Scorer for this query * @return a match object * @throws IOException on IO error */ @@ -80,5 +80,4 @@ abstract class CollectingMatcher extends CandidateMatcher< return scoreMode; } } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/ConcurrentQueryLoader.java b/lucene/monitor/src/java/org/apache/lucene/monitor/ConcurrentQueryLoader.java index fafbd2290af..13bf0c659fc 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/ConcurrentQueryLoader.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/ConcurrentQueryLoader.java @@ -29,16 +29,16 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; - import org.apache.lucene.util.NamedThreadFactory; /** * Utility class for concurrently loading queries into a Monitor. - *

    - * This is useful to speed up startup times for a Monitor. You can use multiple - * threads to parse and index queries before starting matches. - *

    - * Use as follows: + * + *

    This is useful to speed up startup times for a Monitor. You can use multiple threads to parse + * and index queries before starting matches. + * + *

    Use as follows: + * *

      *     List<QueryError> errors = new ArrayList<>();
      *     try (ConcurrentQueryLoader loader = new ConcurrentQueryLoader(monitor, errors)) {
    @@ -47,8 +47,8 @@ import org.apache.lucene.util.NamedThreadFactory;
      *         }
      *     }
      * 
    - *

    - * The Monitor's MonitorQueryParser must be thread-safe for this to work correctly. + * + *

    The Monitor's MonitorQueryParser must be thread-safe for this to work correctly. */ public class ConcurrentQueryLoader implements Closeable { @@ -74,8 +74,8 @@ public class ConcurrentQueryLoader implements Closeable { /** * Create a new ConcurrentQueryLoader * - * @param monitor the Monitor to load queries to - * @param threads the number of threads to use + * @param monitor the Monitor to load queries to + * @param threads the number of threads to use * @param queueSize the size of the buffer to hold queries in */ public ConcurrentQueryLoader(Monitor monitor, int threads, int queueSize) { @@ -90,16 +90,16 @@ public class ConcurrentQueryLoader implements Closeable { /** * Add a MonitorQuery to the loader's internal buffer - *

    - * If the buffer is full, this will block until there is room to add - * the MonitorQuery + * + *

    If the buffer is full, this will block until there is room to add the MonitorQuery * * @param mq the monitor query * @throws InterruptedException if interrupted while waiting */ public void add(MonitorQuery mq) throws InterruptedException { if (shutdown) - throw new IllegalStateException("ConcurrentQueryLoader has been shutdown, cannot add new queries"); + throw new IllegalStateException( + "ConcurrentQueryLoader has been shutdown, cannot add new queries"); this.queue.put(mq); } @@ -136,8 +136,7 @@ public class ConcurrentQueryLoader implements Closeable { while (running) { workerQueue.clear(); drain(queue, workerQueue, queueSize, 100, TimeUnit.MILLISECONDS); - if (workerQueue.size() == 0 && shutdown) - running = false; + if (workerQueue.size() == 0 && shutdown) running = false; if (workerQueue.size() > 0) { monitor.register(workerQueue); } @@ -153,23 +152,27 @@ public class ConcurrentQueryLoader implements Closeable { } /** - * Drains the queue as {@link BlockingQueue#drainTo(Collection, int)}, but if the requested - * {@code numElements} elements are not available, it will wait for them up to the specified - * timeout. - *

    - * Taken from Google Guava 18.0 Queues + * Drains the queue as {@link BlockingQueue#drainTo(Collection, int)}, but if the requested {@code + * numElements} elements are not available, it will wait for them up to the specified timeout. * - * @param q the blocking queue to be drained - * @param buffer where to add the transferred elements + *

    Taken from Google Guava 18.0 Queues + * + * @param q the blocking queue to be drained + * @param buffer where to add the transferred elements * @param numElements the number of elements to be waited for - * @param timeout how long to wait before giving up, in units of {@code unit} - * @param unit a {@code TimeUnit} determining how to interpret the timeout parameter - * @param the type of the queue + * @param timeout how long to wait before giving up, in units of {@code unit} + * @param unit a {@code TimeUnit} determining how to interpret the timeout parameter + * @param the type of the queue * @return the number of elements transferred * @throws InterruptedException if interrupted while waiting */ - private static int drain(BlockingQueue q, Collection buffer, int numElements, - long timeout, TimeUnit unit) throws InterruptedException { + private static int drain( + BlockingQueue q, + Collection buffer, + int numElements, + long timeout, + TimeUnit unit) + throws InterruptedException { Objects.requireNonNull(buffer); /* * This code performs one System.nanoTime() more than necessary, and in return, the time to diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/CustomQueryHandler.java b/lucene/monitor/src/java/org/apache/lucene/monitor/CustomQueryHandler.java index 2e240432d63..9f3db0c8cf4 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/CustomQueryHandler.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/CustomQueryHandler.java @@ -23,23 +23,16 @@ import org.apache.lucene.search.Query; /** * Builds a {@link QueryTree} for a query that needs custom treatment * - * The default query analyzers will use the QueryVisitor API to extract - * terms from queries. If different handling is needed, implement a - * CustomQueryHandler and pass it to the presearcher + *

    The default query analyzers will use the QueryVisitor API to extract terms from queries. If + * different handling is needed, implement a CustomQueryHandler and pass it to the presearcher */ public interface CustomQueryHandler { - /** - * Builds a {@link QueryTree} node from a query - */ + /** Builds a {@link QueryTree} node from a query */ QueryTree handleQuery(Query query, TermWeightor termWeightor); - /** - * Adds additional processing to the {@link TokenStream} over a document's - * terms index - */ + /** Adds additional processing to the {@link TokenStream} over a document's terms index */ default TokenStream wrapTermStream(String field, TokenStream in) { return in; } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/DocumentBatch.java b/lucene/monitor/src/java/org/apache/lucene/monitor/DocumentBatch.java index 0ca58c9e9dc..2bf0dc886b3 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/DocumentBatch.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/DocumentBatch.java @@ -21,7 +21,6 @@ import java.io.Closeable; import java.io.IOException; import java.util.Arrays; import java.util.function.Supplier; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -49,8 +48,8 @@ abstract class DocumentBatch implements Closeable, Supplier { /** * Create a DocumentBatch containing a set of InputDocuments * - * @param docs Collection of documents to add. There must be at least one - * document in the collection. + * @param docs Collection of documents to add. There must be at least one document in the + * collection. * @return the batch containing the input documents */ public static DocumentBatch of(Analyzer analyzer, Document... docs) { @@ -70,12 +69,12 @@ abstract class DocumentBatch implements Closeable, Supplier { private final LeafReader reader; MultiDocumentBatch(Analyzer analyzer, Document... docs) { - assert(docs.length > 0); + assert (docs.length > 0); IndexWriterConfig iwc = new IndexWriterConfig(analyzer); try (IndexWriter writer = new IndexWriter(directory, iwc)) { this.reader = build(writer, docs); } catch (IOException e) { - throw new RuntimeException(e); // This is a RAMDirectory, so should never happen... + throw new RuntimeException(e); // This is a RAMDirectory, so should never happen... } } @@ -97,7 +96,6 @@ abstract class DocumentBatch implements Closeable, Supplier { public void close() throws IOException { IOUtils.close(reader, directory); } - } // Specialized class for batches containing a single object - MemoryIndex benchmarks as @@ -125,5 +123,4 @@ abstract class DocumentBatch implements Closeable, Supplier { reader.close(); } } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/ExplainingMatch.java b/lucene/monitor/src/java/org/apache/lucene/monitor/ExplainingMatch.java index adcc667d7b2..c36d3d4fb8d 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/ExplainingMatch.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/ExplainingMatch.java @@ -20,36 +20,38 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.Map; import java.util.Objects; - import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; -/** - * A query match containing the score explanation of the match - */ +/** A query match containing the score explanation of the match */ public class ExplainingMatch extends QueryMatch { - /** - * A MatcherFactory for producing ExplainingMatches - */ - public static final MatcherFactory MATCHER = searcher -> new CandidateMatcher(searcher) { - @Override - protected void matchQuery(String queryId, Query matchQuery, Map metadata) throws IOException { - int maxDocs = searcher.getIndexReader().maxDoc(); - for (int i = 0; i < maxDocs; i++) { - Explanation explanation = searcher.explain(matchQuery, i); - if (explanation.isMatch()) - addMatch(new ExplainingMatch(queryId, explanation), i); - } - } + /** A MatcherFactory for producing ExplainingMatches */ + public static final MatcherFactory MATCHER = + searcher -> + new CandidateMatcher(searcher) { + @Override + protected void matchQuery( + String queryId, Query matchQuery, Map metadata) throws IOException { + int maxDocs = searcher.getIndexReader().maxDoc(); + for (int i = 0; i < maxDocs; i++) { + Explanation explanation = searcher.explain(matchQuery, i); + if (explanation.isMatch()) addMatch(new ExplainingMatch(queryId, explanation), i); + } + } - @Override - public ExplainingMatch resolve(ExplainingMatch match1, ExplainingMatch match2) { - return new ExplainingMatch(match1.getQueryId(), - Explanation.match(match1.getExplanation().getValue().doubleValue() + match2.getExplanation().getValue().doubleValue(), - "sum of:", match1.getExplanation(), match2.getExplanation())); - } - }; + @Override + public ExplainingMatch resolve(ExplainingMatch match1, ExplainingMatch match2) { + return new ExplainingMatch( + match1.getQueryId(), + Explanation.match( + match1.getExplanation().getValue().doubleValue() + + match2.getExplanation().getValue().doubleValue(), + "sum of:", + match1.getExplanation(), + match2.getExplanation())); + } + }; private final Explanation explanation; @@ -58,9 +60,7 @@ public class ExplainingMatch extends QueryMatch { this.explanation = explanation; } - /** - * @return the Explanation - */ + /** @return the Explanation */ public Explanation getExplanation() { return explanation; } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/ForceNoBulkScoringQuery.java b/lucene/monitor/src/java/org/apache/lucene/monitor/ForceNoBulkScoringQuery.java index 7b1078ad35f..2af23b4a561 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/ForceNoBulkScoringQuery.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/ForceNoBulkScoringQuery.java @@ -19,16 +19,12 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.Objects; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.*; import org.apache.lucene.search.Matches; -/** - * Query wrapper that forces its wrapped Query to use the default doc-by-doc - * BulkScorer. - */ +/** Query wrapper that forces its wrapped Query to use the default doc-by-doc BulkScorer. */ class ForceNoBulkScoringQuery extends Query { private final Query inner; @@ -40,8 +36,7 @@ class ForceNoBulkScoringQuery extends Query { @Override public Query rewrite(IndexReader reader) throws IOException { Query rewritten = inner.rewrite(reader); - if (rewritten != inner) - return new ForceNoBulkScoringQuery(rewritten); + if (rewritten != inner) return new ForceNoBulkScoringQuery(rewritten); return super.rewrite(reader); } @@ -68,7 +63,8 @@ class ForceNoBulkScoringQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { final Weight innerWeight = inner.createWeight(searcher, scoreMode, boost); diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/HighlightsMatch.java b/lucene/monitor/src/java/org/apache/lucene/monitor/HighlightsMatch.java index 74b03325cc6..50cc859eac6 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/HighlightsMatch.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/HighlightsMatch.java @@ -19,7 +19,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.*; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Matches; import org.apache.lucene.search.MatchesIterator; @@ -29,52 +28,66 @@ import org.apache.lucene.search.Weight; /** * QueryMatch object that contains the hit positions of a matching Query - *

    - * If the Query does not support interval iteration (eg, if it gets re-written to - * a Filter), then no hits will be reported, but an IntervalsQueryMatch will still - * be returned from an IntervalsMatcher to indicate a match. + * + *

    If the Query does not support interval iteration (eg, if it gets re-written to a Filter), then + * no hits will be reported, but an IntervalsQueryMatch will still be returned from an + * IntervalsMatcher to indicate a match. */ public class HighlightsMatch extends QueryMatch { - public static final MatcherFactory MATCHER = searcher -> new CandidateMatcher(searcher) { + public static final MatcherFactory MATCHER = + searcher -> + new CandidateMatcher(searcher) { - @Override - protected void matchQuery(String queryId, Query matchQuery, Map metadata) throws IOException { - Weight w = searcher.createWeight(searcher.rewrite(matchQuery), ScoreMode.COMPLETE_NO_SCORES, 1); - for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) { - for (int i = 0; i < ctx.reader().maxDoc(); i++) { - Matches matches = w.matches(ctx, i); - if (matches != null) { - addMatch(buildMatch(matches, queryId), i); - } - } - } - } - - @Override - public HighlightsMatch resolve(HighlightsMatch match1, HighlightsMatch match2) { - return HighlightsMatch.merge(match1.getQueryId(), match1, match2); - } - - private HighlightsMatch buildMatch(Matches matches, String queryId) throws IOException { - HighlightsMatch m = new HighlightsMatch(queryId); - for (String field : matches) { - MatchesIterator mi = matches.getMatches(field); - while (mi.next()) { - MatchesIterator sub = mi.getSubMatches(); - if (sub != null) { - while (sub.next()) { - m.addHit(field, sub.startPosition(), sub.endPosition(), sub.startOffset(), sub.endOffset()); + @Override + protected void matchQuery( + String queryId, Query matchQuery, Map metadata) throws IOException { + Weight w = + searcher.createWeight( + searcher.rewrite(matchQuery), ScoreMode.COMPLETE_NO_SCORES, 1); + for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) { + for (int i = 0; i < ctx.reader().maxDoc(); i++) { + Matches matches = w.matches(ctx, i); + if (matches != null) { + addMatch(buildMatch(matches, queryId), i); + } + } + } } - } - else { - m.addHit(field, mi.startPosition(), mi.endPosition(), mi.startOffset(), mi.endOffset()); - } - } - } - return m; - } - }; + + @Override + public HighlightsMatch resolve(HighlightsMatch match1, HighlightsMatch match2) { + return HighlightsMatch.merge(match1.getQueryId(), match1, match2); + } + + private HighlightsMatch buildMatch(Matches matches, String queryId) throws IOException { + HighlightsMatch m = new HighlightsMatch(queryId); + for (String field : matches) { + MatchesIterator mi = matches.getMatches(field); + while (mi.next()) { + MatchesIterator sub = mi.getSubMatches(); + if (sub != null) { + while (sub.next()) { + m.addHit( + field, + sub.startPosition(), + sub.endPosition(), + sub.startOffset(), + sub.endOffset()); + } + } else { + m.addHit( + field, + mi.startPosition(), + mi.endPosition(), + mi.startOffset(), + mi.endOffset()); + } + } + } + return m; + } + }; private final Map> hits; @@ -83,16 +96,12 @@ public class HighlightsMatch extends QueryMatch { this.hits = new TreeMap<>(); } - /** - * @return a map of hits per field - */ + /** @return a map of hits per field */ public Map> getHits() { return Collections.unmodifiableMap(this.hits); } - /** - * @return the fields in which matches have been found - */ + /** @return the fields in which matches have been found */ public Set getFields() { return Collections.unmodifiableSet(hits.keySet()); } @@ -105,14 +114,11 @@ public class HighlightsMatch extends QueryMatch { */ public Collection getHits(String field) { Collection found = hits.get(field); - if (found != null) - return Collections.unmodifiableCollection(found); + if (found != null) return Collections.unmodifiableCollection(found); return Collections.emptyList(); } - /** - * @return the total number of hits for the query - */ + /** @return the total number of hits for the query */ public int getHitCount() { int c = 0; for (Set fieldhits : hits.values()) { @@ -162,29 +168,19 @@ public class HighlightsMatch extends QueryMatch { hitSet.add(new Hit(startPos, startOffset, endPos, endOffset)); } - /** - * Represents an individual hit - */ + /** Represents an individual hit */ public static class Hit implements Comparable { - /** - * The start position - */ + /** The start position */ public final int startPosition; - /** - * The start offset - */ + /** The start offset */ public final int startOffset; - /** - * The end positions - */ + /** The end positions */ public final int endPosition; - /** - * The end offset - */ + /** The end offset */ public final int endOffset; public Hit(int startPosition, int startOffset, int endPosition, int endOffset) { @@ -197,13 +193,12 @@ public class HighlightsMatch extends QueryMatch { @Override public boolean equals(Object obj) { if (this == obj) return true; - if (!(obj instanceof Hit)) - return false; + if (!(obj instanceof Hit)) return false; Hit other = (Hit) obj; - return this.startOffset == other.startOffset && - this.endOffset == other.endOffset && - this.startPosition == other.startPosition && - this.endPosition == other.endPosition; + return this.startOffset == other.startOffset + && this.endOffset == other.endOffset + && this.startPosition == other.startPosition + && this.endPosition == other.endPosition; } @Override @@ -217,7 +212,8 @@ public class HighlightsMatch extends QueryMatch { @Override public String toString() { - return String.format(Locale.ROOT, "%d(%d)->%d(%d)", startPosition, startOffset, endPosition, endOffset); + return String.format( + Locale.ROOT, "%d(%d)->%d(%d)", startPosition, startOffset, endPosition, endOffset); } @Override diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/MatcherFactory.java b/lucene/monitor/src/java/org/apache/lucene/monitor/MatcherFactory.java index 5870c09a3bd..8cfef9a32da 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/MatcherFactory.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/MatcherFactory.java @@ -27,9 +27,8 @@ import org.apache.lucene.search.IndexSearcher; public interface MatcherFactory { /** - * Create a new {@link CandidateMatcher} object, to select - * queries to match against the passed-in IndexSearcher + * Create a new {@link CandidateMatcher} object, to select queries to match against the passed-in + * IndexSearcher */ CandidateMatcher createMatcher(IndexSearcher searcher); - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/MatchingQueries.java b/lucene/monitor/src/java/org/apache/lucene/monitor/MatchingQueries.java index 36cf0f19a4f..b490bad03cc 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/MatchingQueries.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/MatchingQueries.java @@ -34,8 +34,12 @@ public class MatchingQueries { private final long searchTime; private final int queriesRun; - MatchingQueries(Map matches, Map errors, - long queryBuildTime, long searchTime, int queriesRun) { + MatchingQueries( + Map matches, + Map errors, + long queryBuildTime, + long searchTime, + int queriesRun) { this.matches = Collections.unmodifiableMap(matches); this.errors = Collections.unmodifiableMap(errors); this.queryBuildTime = queryBuildTime; @@ -52,46 +56,33 @@ public class MatchingQueries { return matches.get(queryId); } - /** - * @return all matches - */ + /** @return all matches */ public Collection getMatches() { return matches.values(); } - /** - * @return the number of queries that matched - */ + /** @return the number of queries that matched */ public int getMatchCount() { return matches.size(); } - /** - * @return how long (in ns) it took to build the Presearcher query for the matcher run - */ + /** @return how long (in ns) it took to build the Presearcher query for the matcher run */ public long getQueryBuildTime() { return queryBuildTime; } - /** - * @return how long (in ms) it took to run the selected queries - */ + /** @return how long (in ms) it took to run the selected queries */ public long getSearchTime() { return searchTime; } - /** - * @return the number of queries passed to this CandidateMatcher during the matcher run - */ + /** @return the number of queries passed to this CandidateMatcher during the matcher run */ public int getQueriesRun() { return queriesRun; } - /** - * @return a List of any MatchErrors created during the matcher run - */ + /** @return a List of any MatchErrors created during the matcher run */ public Map getErrors() { return errors; } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/Monitor.java b/lucene/monitor/src/java/org/apache/lucene/monitor/Monitor.java index edde451c1ad..2faf05d25ee 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/Monitor.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/Monitor.java @@ -28,7 +28,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.LeafReader; @@ -42,8 +41,8 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.NamedThreadFactory; /** - * A Monitor contains a set of {@link Query} objects with associated IDs, and efficiently - * matches them against sets of {@link Document} objects. + * A Monitor contains a set of {@link Query} objects with associated IDs, and efficiently matches + * them against sets of {@link Document} objects. */ public class Monitor implements Closeable { @@ -83,7 +82,7 @@ public class Monitor implements Closeable { * Create a new Monitor instance with a specific configuration * * @param analyzer to analyze {@link Document}s at match time - * @param config the configuration + * @param config the configuration */ public Monitor(Analyzer analyzer, MonitorConfiguration config) throws IOException { this(analyzer, new TermFilteredPresearcher(), config); @@ -92,33 +91,38 @@ public class Monitor implements Closeable { /** * Create a new Monitor instance * - * @param analyzer to analyze {@link Document}s at match time - * @param presearcher the presearcher to use + * @param analyzer to analyze {@link Document}s at match time + * @param presearcher the presearcher to use * @param configuration the configuration */ - public Monitor(Analyzer analyzer, Presearcher presearcher, - MonitorConfiguration configuration) throws IOException { + public Monitor(Analyzer analyzer, Presearcher presearcher, MonitorConfiguration configuration) + throws IOException { this.analyzer = analyzer; this.presearcher = presearcher; this.queryIndex = new QueryIndex(configuration, presearcher); long purgeFrequency = configuration.getPurgeFrequency(); - this.purgeExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("cache-purge")); - this.purgeExecutor.scheduleAtFixedRate(() -> { - try { - purgeCache(); - } catch (Throwable e) { - listeners.forEach(l -> l.onPurgeError(e)); - } - }, purgeFrequency, purgeFrequency, configuration.getPurgeFrequencyUnits()); + this.purgeExecutor = + Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("cache-purge")); + this.purgeExecutor.scheduleAtFixedRate( + () -> { + try { + purgeCache(); + } catch (Throwable e) { + listeners.forEach(l -> l.onPurgeError(e)); + } + }, + purgeFrequency, + purgeFrequency, + configuration.getPurgeFrequencyUnits()); this.commitBatchSize = configuration.getQueryUpdateBufferSize(); } /** - * Register a {@link MonitorUpdateListener} that will be notified whenever changes - * are made to the Monitor's queryindex + * Register a {@link MonitorUpdateListener} that will be notified whenever changes are made to the + * Monitor's queryindex * * @param listener listener to register */ @@ -126,31 +130,21 @@ public class Monitor implements Closeable { listeners.add(listener); } - /** - * @return Statistics for the internal query index and cache - */ + /** @return Statistics for the internal query index and cache */ public QueryCacheStats getQueryCacheStats() { return new QueryCacheStats(queryIndex.numDocs(), queryIndex.cacheSize(), lastPurged); } - /** - * Statistics for the query cache and query index - */ + /** Statistics for the query cache and query index */ public static class QueryCacheStats { - /** - * Total number of queries in the query index - */ + /** Total number of queries in the query index */ public final int queries; - /** - * Total number of queries int the query cache - */ + /** Total number of queries int the query cache */ public final int cachedQueries; - /** - * Time the query cache was last purged - */ + /** Time the query cache was last purged */ public final long lastPurged; public QueryCacheStats(int queries, int cachedQueries, long lastPurged) { @@ -162,8 +156,8 @@ public class Monitor implements Closeable { /** * Remove unused queries from the query cache. - *

    - * This is normally called from a background thread at a rate set by configurePurgeFrequency(). + * + *

    This is normally called from a background thread at a rate set by configurePurgeFrequency(). * * @throws IOException on IO errors */ @@ -205,7 +199,7 @@ public class Monitor implements Closeable { * Add new queries to the monitor * * @param queries the MonitorQueries to add - * @throws IOException on IO errors + * @throws IOException on IO errors */ public void register(MonitorQuery... queries) throws IOException { register(Arrays.asList(queries)); @@ -243,16 +237,18 @@ public class Monitor implements Closeable { } /** - * Match an array of {@link Document}s against the queryindex, calling a {@link CandidateMatcher} produced by the - * supplied {@link MatcherFactory} for each possible matching query. + * Match an array of {@link Document}s against the queryindex, calling a {@link CandidateMatcher} + * produced by the supplied {@link MatcherFactory} for each possible matching query. * - * @param docs the DocumentBatch to match - * @param factory a {@link MatcherFactory} to use to create a {@link CandidateMatcher} for the match run - * @param the type of {@link QueryMatch} to return + * @param docs the DocumentBatch to match + * @param factory a {@link MatcherFactory} to use to create a {@link CandidateMatcher} for the + * match run + * @param the type of {@link QueryMatch} to return * @return a {@link MatchingQueries} object summarizing the match run. * @throws IOException on IO errors */ - public MultiMatchingQueries match(Document[] docs, MatcherFactory factory) throws IOException { + public MultiMatchingQueries match( + Document[] docs, MatcherFactory factory) throws IOException { try (DocumentBatch batch = DocumentBatch.of(analyzer, docs)) { LeafReader reader = batch.get(); CandidateMatcher matcher = factory.createMatcher(new IndexSearcher(batch.get())); @@ -263,17 +259,19 @@ public class Monitor implements Closeable { } /** - * Match a single {@link Document} against the queryindex, calling a {@link CandidateMatcher} produced by the - * supplied {@link MatcherFactory} for each possible matching query. + * Match a single {@link Document} against the queryindex, calling a {@link CandidateMatcher} + * produced by the supplied {@link MatcherFactory} for each possible matching query. * - * @param doc the InputDocument to match - * @param factory a {@link MatcherFactory} to use to create a {@link CandidateMatcher} for the match run - * @param the type of {@link QueryMatch} to return + * @param doc the InputDocument to match + * @param factory a {@link MatcherFactory} to use to create a {@link CandidateMatcher} for the + * match run + * @param the type of {@link QueryMatch} to return * @return a {@link MatchingQueries} object summarizing the match run. * @throws IOException on IO errors */ - public MatchingQueries match(Document doc, MatcherFactory factory) throws IOException { - return match(new Document[]{ doc }, factory).singleton(); + public MatchingQueries match(Document doc, MatcherFactory factory) + throws IOException { + return match(new Document[] {doc}, factory).singleton(); } /** @@ -281,16 +279,14 @@ public class Monitor implements Closeable { * * @param queryId the id of the query to get * @return the MonitorQuery stored for this id, or null if not found - * @throws IOException on IO errors + * @throws IOException on IO errors * @throws IllegalStateException if queries are not stored in the queryindex */ public MonitorQuery getQuery(final String queryId) throws IOException { return queryIndex.getQuery(queryId); } - /** - * @return the number of queries (after decomposition) stored in this Monitor - */ + /** @return the number of queries (after decomposition) stored in this Monitor */ public int getDisjunctCount() { return queryIndex.numDocs(); } @@ -314,7 +310,8 @@ public class Monitor implements Closeable { } // For each query selected by the presearcher, pass on to a CandidateMatcher - private static class StandardQueryCollector implements QueryIndex.QueryCollector { + private static class StandardQueryCollector + implements QueryIndex.QueryCollector { final CandidateMatcher matcher; int queryCount = 0; @@ -324,9 +321,9 @@ public class Monitor implements Closeable { } @Override - public void matchQuery(String id, QueryCacheEntry query, QueryIndex.DataValues dataValues) throws IOException { - if (query == null) - return; + public void matchQuery(String id, QueryCacheEntry query, QueryIndex.DataValues dataValues) + throws IOException { + if (query == null) return; try { queryCount++; matcher.matchQuery(id, query.matchQuery, query.metadata); @@ -334,43 +331,48 @@ public class Monitor implements Closeable { matcher.reportError(id, e); } } - } /** * Match a DocumentBatch against the queries stored in the Monitor, also returning information * about which queries were selected by the presearcher, and why. * - * @param docs a DocumentBatch to match against the index - * @param factory a {@link MatcherFactory} to use to create a {@link CandidateMatcher} for the match run - * @param the type of QueryMatch produced by the CandidateMatcher + * @param docs a DocumentBatch to match against the index + * @param factory a {@link MatcherFactory} to use to create a {@link CandidateMatcher} for the + * match run + * @param the type of QueryMatch produced by the CandidateMatcher * @return a {@link PresearcherMatches} object containing debug information * @throws IOException on IO errors */ - public PresearcherMatches debug(Document[] docs, MatcherFactory factory) - throws IOException { + public PresearcherMatches debug( + Document[] docs, MatcherFactory factory) throws IOException { try (DocumentBatch batch = DocumentBatch.of(analyzer, docs)) { LeafReader reader = batch.get(); IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); - PresearcherQueryCollector collector = new PresearcherQueryCollector<>(factory.createMatcher(searcher)); - long buildTime = queryIndex.search(t -> new ForceNoBulkScoringQuery(presearcher.buildQuery(reader, t)), collector); + PresearcherQueryCollector collector = + new PresearcherQueryCollector<>(factory.createMatcher(searcher)); + long buildTime = + queryIndex.search( + t -> new ForceNoBulkScoringQuery(presearcher.buildQuery(reader, t)), collector); return collector.getMatches(buildTime); } } /** - * Match a single {@link Document} against the queries stored in the Monitor, also returning information - * about which queries were selected by the presearcher, and why. + * Match a single {@link Document} against the queries stored in the Monitor, also returning + * information about which queries were selected by the presearcher, and why. * - * @param doc an InputDocument to match against the index - * @param factory a {@link MatcherFactory} to use to create a {@link CandidateMatcher} for the match run - * @param the type of QueryMatch produced by the CandidateMatcher + * @param doc an InputDocument to match against the index + * @param factory a {@link MatcherFactory} to use to create a {@link CandidateMatcher} for the + * match run + * @param the type of QueryMatch produced by the CandidateMatcher * @return a {@link PresearcherMatches} object containing debug information * @throws IOException on IO errors */ - public PresearcherMatches debug(Document doc, MatcherFactory factory) throws IOException { - return debug(new Document[]{doc}, factory); + public PresearcherMatches debug(Document doc, MatcherFactory factory) + throws IOException { + return debug(new Document[] {doc}, factory); } private class PresearcherQueryCollector extends StandardQueryCollector { @@ -391,19 +393,20 @@ public class Monitor implements Closeable { } @Override - public void matchQuery(final String id, QueryCacheEntry query, QueryIndex.DataValues dataValues) throws IOException { - Weight w = ((Scorer)dataValues.scorer).getWeight(); + public void matchQuery(final String id, QueryCacheEntry query, QueryIndex.DataValues dataValues) + throws IOException { + Weight w = ((Scorer) dataValues.scorer).getWeight(); Matches matches = w.matches(dataValues.ctx, dataValues.scorer.docID()); for (String field : matches) { MatchesIterator mi = matches.getMatches(field); while (mi.next()) { - matchingTerms.computeIfAbsent(id, i -> new StringBuilder()) - .append(" ").append(mi.getQuery()); + matchingTerms + .computeIfAbsent(id, i -> new StringBuilder()) + .append(" ") + .append(mi.getQuery()); } } super.matchQuery(id, query, dataValues); } - } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorConfiguration.java b/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorConfiguration.java index 48f0c9159b0..8a5735ca4e7 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorConfiguration.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorConfiguration.java @@ -20,7 +20,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.nio.file.Path; import java.util.concurrent.TimeUnit; - import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -29,9 +28,7 @@ import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; -/** - * Encapsulates various configuration settings for a Monitor's query index - */ +/** Encapsulates various configuration settings for a Monitor's query index */ public class MonitorConfiguration { private int queryUpdateBufferSize = 5000; @@ -57,7 +54,8 @@ public class MonitorConfiguration { } public IndexWriter buildIndexWriter() throws IOException { - Directory directory = indexPath == null ? new ByteBuffersDirectory() : FSDirectory.open(indexPath); + Directory directory = + indexPath == null ? new ByteBuffersDirectory() : FSDirectory.open(indexPath); return new IndexWriter(directory, getIndexWriterConfig()); } @@ -80,9 +78,7 @@ public class MonitorConfiguration { return this; } - /** - * @return the QueryDecomposer used by the Monitor - */ + /** @return the QueryDecomposer used by the Monitor */ public QueryDecomposer getQueryDecomposer() { return queryDecomposer; } @@ -91,7 +87,7 @@ public class MonitorConfiguration { * Set the frequency with with the Monitor's querycache will be garbage-collected * * @param frequency the frequency value - * @param units the frequency units + * @param units the frequency units * @return the current configuration */ public MonitorConfiguration setPurgeFrequency(long frequency, TimeUnit units) { @@ -100,16 +96,12 @@ public class MonitorConfiguration { return this; } - /** - * @return the value of Monitor's querycache garbage-collection frequency - */ + /** @return the value of Monitor's querycache garbage-collection frequency */ public long getPurgeFrequency() { return purgeFrequency; } - /** - * @return Get the units of the Monitor's querycache garbage-collection frequency - */ + /** @return Get the units of the Monitor's querycache garbage-collection frequency */ public TimeUnit getPurgeFrequencyUnits() { return purgeFrequencyUnits; } @@ -117,7 +109,8 @@ public class MonitorConfiguration { /** * Set how many queries will be buffered in memory before being committed to the queryindex * - * @param size how many queries will be buffered in memory before being committed to the queryindex + * @param size how many queries will be buffered in memory before being committed to the + * queryindex * @return the current configuration */ public MonitorConfiguration setQueryUpdateBufferSize(int size) { @@ -125,11 +118,8 @@ public class MonitorConfiguration { return this; } - /** - * @return the size of the queryindex's in-memory buffer - */ + /** @return the size of the queryindex's in-memory buffer */ public int getQueryUpdateBufferSize() { return queryUpdateBufferSize; } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorQuery.java b/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorQuery.java index 29308177a78..0f264f925b4 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorQuery.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorQuery.java @@ -21,12 +21,9 @@ import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.TreeMap; - import org.apache.lucene.search.Query; -/** - * Defines a query to be stored in a Monitor - */ +/** Defines a query to be stored in a Monitor */ public class MonitorQuery { private final String id; @@ -37,11 +34,11 @@ public class MonitorQuery { /** * Creates a new MonitorQuery * - * @param id the query ID - * @param query the query to store + * @param id the query ID + * @param query the query to store * @param queryString an optional string representation of the query, for persistent Monitors - * @param metadata metadata passed to {@link Presearcher#indexQuery(Query, Map)}. Must not - * have any null values + * @param metadata metadata passed to {@link Presearcher#indexQuery(Query, Map)}. Must not have + * any null values */ public MonitorQuery(String id, Query query, String queryString, Map metadata) { this.id = id; @@ -54,7 +51,7 @@ public class MonitorQuery { /** * Creates a new MonitorQuery with empty metadata and no string representation * - * @param id the ID + * @param id the ID * @param query the query */ public MonitorQuery(String id, Query query) { @@ -64,34 +61,27 @@ public class MonitorQuery { private static void checkNullEntries(Map metadata) { for (Map.Entry entry : metadata.entrySet()) { if (entry.getValue() == null) - throw new IllegalArgumentException("Null value for key " + entry.getKey() + " in metadata map"); + throw new IllegalArgumentException( + "Null value for key " + entry.getKey() + " in metadata map"); } } - /** - * @return this MonitorQuery's ID - */ + /** @return this MonitorQuery's ID */ public String getId() { return id; } - /** - * @return this MonitorQuery's query - */ + /** @return this MonitorQuery's query */ public Query getQuery() { return query; } - /** - * @return this MonitorQuery's string representation - */ + /** @return this MonitorQuery's string representation */ public String getQueryString() { return queryString; } - /** - * @return this MonitorQuery's metadata - */ + /** @return this MonitorQuery's metadata */ public Map getMetadata() { return metadata; } @@ -101,7 +91,9 @@ public class MonitorQuery { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MonitorQuery that = (MonitorQuery) o; - return Objects.equals(id, that.id) && Objects.equals(query, that.query) && Objects.equals(metadata, that.metadata); + return Objects.equals(id, that.id) + && Objects.equals(query, that.query) + && Objects.equals(metadata, that.metadata); } @Override @@ -115,8 +107,7 @@ public class MonitorQuery { sb.append(": "); if (queryString == null) { sb.append(query.toString()); - } - else { + } else { sb.append(queryString); } if (metadata.size() != 0) { @@ -125,8 +116,7 @@ public class MonitorQuery { for (Map.Entry entry : metadata.entrySet()) { n--; sb.append(entry.getKey()).append(": ").append(entry.getValue()); - if (n > 0) - sb.append(", "); + if (n > 0) sb.append(", "); } sb.append(" }"); } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorQuerySerializer.java b/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorQuerySerializer.java index 292a647cb52..087ed6c1905 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorQuerySerializer.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorQuerySerializer.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.function.Function; - import org.apache.lucene.search.Query; import org.apache.lucene.store.InputStreamDataInput; import org.apache.lucene.store.OutputStreamDataOutput; @@ -32,18 +31,14 @@ import org.apache.lucene.util.BytesRef; /** * Serializes and deserializes MonitorQuery objects into byte streams * - * Use this for persistent query indexes + *

    Use this for persistent query indexes */ public interface MonitorQuerySerializer { - /** - * Builds a MonitorQuery from a byte representation - */ + /** Builds a MonitorQuery from a byte representation */ MonitorQuery deserialize(BytesRef binaryValue); - /** - * Converts a MonitorQuery into a byte representation - */ + /** Converts a MonitorQuery into a byte representation */ BytesRef serialize(MonitorQuery query); /** @@ -55,7 +50,8 @@ public interface MonitorQuerySerializer { return new MonitorQuerySerializer() { @Override public MonitorQuery deserialize(BytesRef binaryValue) { - ByteArrayInputStream is = new ByteArrayInputStream(binaryValue.bytes, binaryValue.offset, binaryValue.length); + ByteArrayInputStream is = + new ByteArrayInputStream(binaryValue.bytes, binaryValue.offset, binaryValue.length); try (InputStreamDataInput data = new InputStreamDataInput(is)) { String id = data.readString(); String query = data.readString(); @@ -65,7 +61,7 @@ public interface MonitorQuerySerializer { } return new MonitorQuery(id, parser.apply(query), query, metadata); } catch (IOException e) { - throw new RuntimeException(e); // shouldn't happen, we're reading from a bytearray! + throw new RuntimeException(e); // shouldn't happen, we're reading from a bytearray! } } @@ -81,12 +77,10 @@ public interface MonitorQuerySerializer { data.writeString(entry.getValue()); } return new BytesRef(os.toByteArray()); - } - catch (IOException e) { - throw new RuntimeException(e); // All in memory, so no IOException should be thrown + } catch (IOException e) { + throw new RuntimeException(e); // All in memory, so no IOException should be thrown } } }; } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorUpdateListener.java b/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorUpdateListener.java index e3d26121c9d..a502dd71fe9 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorUpdateListener.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/MonitorUpdateListener.java @@ -19,34 +19,26 @@ package org.apache.lucene.monitor; import java.util.List; -/** - * For reporting events on a Monitor's query index - */ +/** For reporting events on a Monitor's query index */ public interface MonitorUpdateListener { - /** - * Called after a set of queries have been added to the Monitor's query index - */ - default void afterUpdate(List updates) {}; + /** Called after a set of queries have been added to the Monitor's query index */ + default void afterUpdate(List updates) {} + ; - /** - * Called after a set of queries have been deleted from the Monitor's query index - */ - default void afterDelete(List queryIds) {}; + /** Called after a set of queries have been deleted from the Monitor's query index */ + default void afterDelete(List queryIds) {} + ; - /** - * Called after all queries have been removed from the Monitor's query index - */ - default void afterClear() {}; + /** Called after all queries have been removed from the Monitor's query index */ + default void afterClear() {} + ; - /** - * Called after the Monitor's query cache has been purged of deleted queries - */ - default void onPurge() {}; - - /** - * Called if there was an error removing deleted queries from the Monitor's query cache - */ - default void onPurgeError(Throwable t) {}; + /** Called after the Monitor's query cache has been purged of deleted queries */ + default void onPurge() {} + ; + /** Called if there was an error removing deleted queries from the Monitor's query cache */ + default void onPurgeError(Throwable t) {} + ; } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/MultiMatchingQueries.java b/lucene/monitor/src/java/org/apache/lucene/monitor/MultiMatchingQueries.java index 5c752c8686e..1777251a3c4 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/MultiMatchingQueries.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/MultiMatchingQueries.java @@ -38,8 +38,13 @@ public class MultiMatchingQueries { private final int queriesRun; private final int batchSize; - MultiMatchingQueries(List> matches, Map errors, - long queryBuildTime, long searchTime, int queriesRun, int batchSize) { + MultiMatchingQueries( + List> matches, + Map errors, + long queryBuildTime, + long searchTime, + int queriesRun, + int batchSize) { this.matches = Collections.unmodifiableList(matches); this.errors = Collections.unmodifiableMap(errors); this.queryBuildTime = queryBuildTime; @@ -52,13 +57,12 @@ public class MultiMatchingQueries { * Returns the QueryMatch for the given query and document, or null if it did not match * * @param queryId the query id - * @param docId the doc id + * @param docId the doc id * @return the QueryMatch for the given query and document, or null if it did not match */ public T matches(String queryId, int docId) { Map docMatches = matches.get(docId); - if (docMatches == null) - return null; + if (docMatches == null) return null; return docMatches.get(queryId); } @@ -76,42 +80,31 @@ public class MultiMatchingQueries { */ public int getMatchCount(int docId) { Map docMatches = matches.get(docId); - if (docMatches == null) - return 0; + if (docMatches == null) return 0; return docMatches.size(); } - /** - * @return how long (in ns) it took to build the Presearcher query for the matcher run - */ + /** @return how long (in ns) it took to build the Presearcher query for the matcher run */ public long getQueryBuildTime() { return queryBuildTime; } - /** - * @return how long (in ms) it took to run the selected queries - */ + /** @return how long (in ms) it took to run the selected queries */ public long getSearchTime() { return searchTime; } - /** - * @return the number of queries passed to this CandidateMatcher during the matcher run - */ + /** @return the number of queries passed to this CandidateMatcher during the matcher run */ public int getQueriesRun() { return queriesRun; } - /** - * @return the number of documents in the batch - */ + /** @return the number of documents in the batch */ public int getBatchSize() { return batchSize; } - /** - * @return a List of any MatchErrors created during the matcher run - */ + /** @return a List of any MatchErrors created during the matcher run */ public Map getErrors() { return errors; } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/MultipassTermFilteredPresearcher.java b/lucene/monitor/src/java/org/apache/lucene/monitor/MultipassTermFilteredPresearcher.java index 8f25da91717..ed2a1856e68 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/MultipassTermFilteredPresearcher.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/MultipassTermFilteredPresearcher.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.search.BooleanClause; @@ -33,23 +32,23 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefHash; /** - * A TermFilteredPresearcher that indexes queries multiple times, with terms collected - * from different routes through a querytree. Each route will produce a set of terms - * that are *sufficient* to select the query, and are indexed into a separate, suffixed field. - *

    - * Incoming documents are then converted to a set of Disjunction queries over each - * suffixed field, and these queries are combined into a conjunction query, such that the - * document's set of terms must match a term from each route. - *

    - * This allows filtering out of documents that contain one half of a two-term phrase query, for - * example. The query {@code "hello world"} will be indexed twice, once under 'hello' and once - * under 'world'. A document containing the terms "hello there" would match the first field, - * but not the second, and so would not be selected for matching. - *

    - * The number of passes the presearcher makes is configurable. More passes will improve the + * A TermFilteredPresearcher that indexes queries multiple times, with terms collected from + * different routes through a querytree. Each route will produce a set of terms that are + * *sufficient* to select the query, and are indexed into a separate, suffixed field. + * + *

    Incoming documents are then converted to a set of Disjunction queries over each suffixed + * field, and these queries are combined into a conjunction query, such that the document's set of + * terms must match a term from each route. + * + *

    This allows filtering out of documents that contain one half of a two-term phrase query, for + * example. The query {@code "hello world"} will be indexed twice, once under 'hello' and once under + * 'world'. A document containing the terms "hello there" would match the first field, but not the + * second, and so would not be selected for matching. + * + *

    The number of passes the presearcher makes is configurable. More passes will improve the * selected/matched ratio, but will take longer to index and will use more RAM. - *

    - * A minimum weight can we set for terms to be chosen for the second and subsequent passes. This + * + *

    A minimum weight can we set for terms to be chosen for the second and subsequent passes. This * allows users to avoid indexing stopwords, for example. */ public class MultipassTermFilteredPresearcher extends TermFilteredPresearcher { @@ -60,25 +59,30 @@ public class MultipassTermFilteredPresearcher extends TermFilteredPresearcher { /** * Construct a new MultipassTermFilteredPresearcher * - * @param passes the number of times a query should be indexed - * @param minWeight the minimum weight a querytree should be advanced over - * @param weightor the TreeWeightor to use + * @param passes the number of times a query should be indexed + * @param minWeight the minimum weight a querytree should be advanced over + * @param weightor the TreeWeightor to use * @param queryHandlers a list of custom query handlers - * @param filterFields a set of fields to use as filters + * @param filterFields a set of fields to use as filters */ - public MultipassTermFilteredPresearcher(int passes, float minWeight, TermWeightor weightor, - List queryHandlers, Set filterFields) { + public MultipassTermFilteredPresearcher( + int passes, + float minWeight, + TermWeightor weightor, + List queryHandlers, + Set filterFields) { super(weightor, queryHandlers, filterFields); this.passes = passes; this.minWeight = minWeight; } /** - * Construct a new MultipassTermFilteredPresearcher using {@link TermFilteredPresearcher#DEFAULT_WEIGHTOR} - *

    - * Note that this will be constructed with a minimum advance weight of zero + * Construct a new MultipassTermFilteredPresearcher using {@link + * TermFilteredPresearcher#DEFAULT_WEIGHTOR} * - * @param passes the number of times a query should be indexed + *

    Note that this will be constructed with a minimum advance weight of zero + * + * @param passes the number of times a query should be indexed */ public MultipassTermFilteredPresearcher(int passes) { this(passes, 0, DEFAULT_WEIGHTOR, Collections.emptyList(), Collections.emptySet()); @@ -120,7 +124,9 @@ public class MultipassTermFilteredPresearcher extends TermFilteredPresearcher { for (int i = 0; i < passes; i++) { BooleanQuery.Builder child = new BooleanQuery.Builder(); for (String field : terms.keySet()) { - child.add(new TermInSetQuery(field(field, i), collectedTerms.get(field)), BooleanClause.Occur.SHOULD); + child.add( + new TermInSetQuery(field(field, i), collectedTerms.get(field)), + BooleanClause.Occur.SHOULD); } parent.add(child.build(), BooleanClause.Occur.MUST); } @@ -138,10 +144,16 @@ public class MultipassTermFilteredPresearcher extends TermFilteredPresearcher { for (Map.Entry entry : fieldTerms.entrySet()) { // we add the index terms once under a suffixed field for the multipass query, and // once under the plan field name for the TermsEnumTokenFilter - doc.add(new Field(field(entry.getKey(), i), - new TermsEnumTokenStream(new BytesRefHashIterator(entry.getValue())), QUERYFIELDTYPE)); - doc.add(new Field(entry.getKey(), - new TermsEnumTokenStream(new BytesRefHashIterator(entry.getValue())), QUERYFIELDTYPE)); + doc.add( + new Field( + field(entry.getKey(), i), + new TermsEnumTokenStream(new BytesRefHashIterator(entry.getValue())), + QUERYFIELDTYPE)); + doc.add( + new Field( + entry.getKey(), + new TermsEnumTokenStream(new BytesRefHashIterator(entry.getValue())), + QUERYFIELDTYPE)); } querytree.advancePhase(minWeight); } @@ -157,5 +169,4 @@ public class MultipassTermFilteredPresearcher extends TermFilteredPresearcher { } return terms; } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/ParallelMatcher.java b/lucene/monitor/src/java/org/apache/lucene/monitor/ParallelMatcher.java index 237c9e2b2a1..fd6d7efceb3 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/ParallelMatcher.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/ParallelMatcher.java @@ -28,20 +28,18 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; - import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; /** * Matcher class that runs matching queries in parallel. - *

    - * This class delegates the actual matching to separate CandidateMatcher classes, - * built from a passed in MatcherFactory. - *

    - * Use this when individual queries can take a long time to run, and you want - * to minimize latency. The matcher distributes queries amongst its worker - * threads using a BlockingQueue, and synchronization overhead may affect performance - * if the individual queries are very fast. + * + *

    This class delegates the actual matching to separate CandidateMatcher classes, built from a + * passed in MatcherFactory. + * + *

    Use this when individual queries can take a long time to run, and you want to minimize + * latency. The matcher distributes queries amongst its worker threads using a BlockingQueue, and + * synchronization overhead may affect performance if the individual queries are very fast. * * @param the QueryMatch type returned * @see PartitionMatcher @@ -57,13 +55,16 @@ public class ParallelMatcher extends CandidateMatcher { /** * Create a new ParallelMatcher * - * @param searcher the IndexSearcher to match against - * @param executor an ExecutorService to use for parallel execution + * @param searcher the IndexSearcher to match against + * @param executor an ExecutorService to use for parallel execution * @param matcherFactory MatcherFactory to use to create CandidateMatchers - * @param threads the number of threads to execute on + * @param threads the number of threads to execute on */ - private ParallelMatcher(IndexSearcher searcher, ExecutorService executor, - MatcherFactory matcherFactory, int threads) { + private ParallelMatcher( + IndexSearcher searcher, + ExecutorService executor, + MatcherFactory matcherFactory, + int threads) { super(searcher); for (int i = 0; i < threads; i++) { MatcherWorker mw = new MatcherWorker(matcherFactory); @@ -73,7 +74,8 @@ public class ParallelMatcher extends CandidateMatcher { } @Override - protected void matchQuery(String queryId, Query matchQuery, Map metadata) throws IOException { + protected void matchQuery(String queryId, Query matchQuery, Map metadata) + throws IOException { try { queue.put(new MatcherTask(queryId, matchQuery, metadata)); } catch (InterruptedException e) { @@ -134,7 +136,6 @@ public class ParallelMatcher extends CandidateMatcher { } return matcher; } - } private static class MatcherTask { @@ -151,7 +152,7 @@ public class ParallelMatcher extends CandidateMatcher { } /* Marker object placed on the queue after all matches are done, to indicate to the - worker threads that they should finish */ + worker threads that they should finish */ private static final MatcherTask END = new MatcherTask("", null, Collections.emptyMap()); private static class ParallelMatcherFactory implements MatcherFactory { @@ -160,8 +161,8 @@ public class ParallelMatcher extends CandidateMatcher { private final MatcherFactory matcherFactory; private final int threads; - ParallelMatcherFactory(ExecutorService executor, MatcherFactory matcherFactory, - int threads) { + ParallelMatcherFactory( + ExecutorService executor, MatcherFactory matcherFactory, int threads) { this.executor = executor; this.matcherFactory = matcherFactory; this.threads = threads; @@ -176,30 +177,29 @@ public class ParallelMatcher extends CandidateMatcher { /** * Create a new MatcherFactory for a ParallelMatcher * - * @param executor the ExecutorService to use + * @param executor the ExecutorService to use * @param matcherFactory the MatcherFactory to use to create submatchers - * @param threads the number of threads to use - * @param the type of QueryMatch generated + * @param threads the number of threads to use + * @param the type of QueryMatch generated */ - public static MatcherFactory factory(ExecutorService executor, - MatcherFactory matcherFactory, int threads) { + public static MatcherFactory factory( + ExecutorService executor, MatcherFactory matcherFactory, int threads) { return new ParallelMatcherFactory<>(executor, matcherFactory, threads); } /** * Create a new MatcherFactory for a ParallelMatcher - *

    - * This factory will create a ParallelMatcher that uses as many threads as there are cores available - * to the JVM (as determined by {@code Runtime.getRuntime().availableProcessors()}). * - * @param executor the ExecutorService to use + *

    This factory will create a ParallelMatcher that uses as many threads as there are cores + * available to the JVM (as determined by {@code Runtime.getRuntime().availableProcessors()}). + * + * @param executor the ExecutorService to use * @param matcherFactory the MatcherFactory to use to create submatchers - * @param the type of QueryMatch generated + * @param the type of QueryMatch generated */ - public static MatcherFactory factory(ExecutorService executor, - MatcherFactory matcherFactory) { + public static MatcherFactory factory( + ExecutorService executor, MatcherFactory matcherFactory) { int threads = Runtime.getRuntime().availableProcessors(); return new ParallelMatcherFactory<>(executor, matcherFactory, threads); } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/PartitionMatcher.java b/lucene/monitor/src/java/org/apache/lucene/monitor/PartitionMatcher.java index 42857c79d73..aaf1f576ecb 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/PartitionMatcher.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/PartitionMatcher.java @@ -25,21 +25,18 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; - import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; /** - * A multi-threaded matcher that collects all possible matches in one pass, and - * then partitions them amongst a number of worker threads to perform the actual - * matching. - *

    - * This class delegates the matching to separate CandidateMatcher classes, - * built from a passed in MatcherFactory. - *

    - * Use this if your query sets contain large numbers of very fast queries, where - * the synchronization overhead of {@link ParallelMatcher} - * can outweigh the benefit of multithreading. + * A multi-threaded matcher that collects all possible matches in one pass, and then partitions them + * amongst a number of worker threads to perform the actual matching. + * + *

    This class delegates the matching to separate CandidateMatcher classes, built from a passed in + * MatcherFactory. + * + *

    Use this if your query sets contain large numbers of very fast queries, where the + * synchronization overhead of {@link ParallelMatcher} can outweigh the benefit of multithreading. * * @param the type of QueryMatch to return * @see ParallelMatcher @@ -69,7 +66,11 @@ public class PartitionMatcher extends CandidateMatcher private final List tasks = new ArrayList<>(); - private PartitionMatcher(IndexSearcher searcher, ExecutorService executor, MatcherFactory matcherFactory, int threads) { + private PartitionMatcher( + IndexSearcher searcher, + ExecutorService executor, + MatcherFactory matcherFactory, + int threads) { super(searcher); this.executor = executor; this.matcherFactory = matcherFactory; @@ -109,7 +110,6 @@ public class PartitionMatcher extends CandidateMatcher } catch (InterruptedException | ExecutionException e) { throw new RuntimeException("Interrupted during match", e); } - } private class MatcherWorker implements Callable> { @@ -141,8 +141,8 @@ public class PartitionMatcher extends CandidateMatcher private final MatcherFactory matcherFactory; private final int threads; - PartitionMatcherFactory(ExecutorService executor, MatcherFactory matcherFactory, - int threads) { + PartitionMatcherFactory( + ExecutorService executor, MatcherFactory matcherFactory, int threads) { this.executor = executor; this.matcherFactory = matcherFactory; this.threads = threads; @@ -157,28 +157,28 @@ public class PartitionMatcher extends CandidateMatcher /** * Create a new MatcherFactory for a PartitionMatcher * - * @param executor the ExecutorService to use + * @param executor the ExecutorService to use * @param matcherFactory the MatcherFactory to use to create submatchers - * @param threads the number of threads to use - * @param the type of QueryMatch generated + * @param threads the number of threads to use + * @param the type of QueryMatch generated */ - public static MatcherFactory factory(ExecutorService executor, - MatcherFactory matcherFactory, int threads) { + public static MatcherFactory factory( + ExecutorService executor, MatcherFactory matcherFactory, int threads) { return new PartitionMatcherFactory<>(executor, matcherFactory, threads); } /** * Create a new MatcherFactory for a PartitionMatcher - *

    - * This factory will create a PartitionMatcher that uses as many threads as there are cores available - * to the JVM (as determined by {@code Runtime.getRuntime().availableProcessors()}). * - * @param executor the ExecutorService to use + *

    This factory will create a PartitionMatcher that uses as many threads as there are cores + * available to the JVM (as determined by {@code Runtime.getRuntime().availableProcessors()}). + * + * @param executor the ExecutorService to use * @param matcherFactory the MatcherFactory to use to create submatchers - * @param the type of QueryMatch generated + * @param the type of QueryMatch generated */ - public static MatcherFactory factory(ExecutorService executor, - MatcherFactory matcherFactory) { + public static MatcherFactory factory( + ExecutorService executor, MatcherFactory matcherFactory) { int threads = Runtime.getRuntime().availableProcessors(); return new PartitionMatcherFactory<>(executor, matcherFactory, threads); } @@ -190,13 +190,11 @@ public class PartitionMatcher extends CandidateMatcher List> list = new ArrayList<>(slices); for (int i = 0; i < slices; i++) { int end = (int) Math.floor(accum + size); - if (i == slices - 1) - end = items.size(); + if (i == slices - 1) end = items.size(); list.add(items.subList(start, end)); accum += size; start = (int) Math.floor(accum); } return list; } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/Presearcher.java b/lucene/monitor/src/java/org/apache/lucene/monitor/Presearcher.java index bc5a5f18d6b..7d08d916e39 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/Presearcher.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/Presearcher.java @@ -19,7 +19,6 @@ package org.apache.lucene.monitor; import java.util.Map; import java.util.function.BiPredicate; - import org.apache.lucene.document.Document; import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.MatchAllDocsQuery; @@ -27,34 +26,31 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; /** - * A Presearcher is used by the Monitor to reduce the number of queries actually - * run against a Document. It defines how queries are stored in the monitor's - * internal index, and how a Document is converted to a query against that - * index. + * A Presearcher is used by the Monitor to reduce the number of queries actually run against a + * Document. It defines how queries are stored in the monitor's internal index, and how a Document + * is converted to a query against that index. */ public abstract class Presearcher { - /** - * A Presearcher implementation that does no query filtering, and runs all - * registered queries - */ - public static final Presearcher NO_FILTERING = new Presearcher() { - @Override - public Query buildQuery(LeafReader reader, BiPredicate termAcceptor) { - return new MatchAllDocsQuery(); - } + /** A Presearcher implementation that does no query filtering, and runs all registered queries */ + public static final Presearcher NO_FILTERING = + new Presearcher() { + @Override + public Query buildQuery(LeafReader reader, BiPredicate termAcceptor) { + return new MatchAllDocsQuery(); + } - @Override - public Document indexQuery(Query query, Map metadata) { - return new Document(); - } - }; + @Override + public Document indexQuery(Query query, Map metadata) { + return new Document(); + } + }; /** * Build a query for a Monitor's queryindex from a LeafReader over a set of documents to monitor. * - * @param reader a {@link LeafReader} over the input documents - * @param termAcceptor a predicate indicating if a term should be added to the query + * @param reader a {@link LeafReader} over the input documents + * @param termAcceptor a predicate indicating if a term should be added to the query * @return a Query to run over a Monitor's queryindex */ public abstract Query buildQuery(LeafReader reader, BiPredicate termAcceptor); @@ -62,10 +58,9 @@ public abstract class Presearcher { /** * Build a lucene Document to index the query in a Monitor's queryindex * - * @param query the Query to index + * @param query the Query to index * @param metadata a Map of arbitrary query metadata * @return a lucene Document to add to the queryindex */ public abstract Document indexQuery(Query query, Map metadata); - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/PresearcherMatch.java b/lucene/monitor/src/java/org/apache/lucene/monitor/PresearcherMatch.java index c74988b3b0f..b673550b0a5 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/PresearcherMatch.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/PresearcherMatch.java @@ -22,19 +22,13 @@ package org.apache.lucene.monitor; */ public class PresearcherMatch { - /** - * The presearcher hits - */ + /** The presearcher hits */ public final String presearcherMatches; - /** - * The QueryMatch - */ + /** The QueryMatch */ public final T queryMatch; - /** - * The query id - */ + /** The query id */ public final String queryId; PresearcherMatch(String id, String presearcherMatches, T queryMatch) { diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/PresearcherMatches.java b/lucene/monitor/src/java/org/apache/lucene/monitor/PresearcherMatches.java index 464cf886536..364510e4ef3 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/PresearcherMatches.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/PresearcherMatches.java @@ -29,22 +29,18 @@ public class PresearcherMatches { /** The wrapped Matches */ public final MultiMatchingQueries matcher; - /** - * Builds a new PresearcherMatches - */ - public PresearcherMatches(Map matchingTerms, MultiMatchingQueries matcher) { + /** Builds a new PresearcherMatches */ + public PresearcherMatches( + Map matchingTerms, MultiMatchingQueries matcher) { this.matcher = matcher; this.matchingTerms = matchingTerms; } - /** - * Returns match information for a given query - */ + /** Returns match information for a given query */ public PresearcherMatch match(String queryId, int doc) { StringBuilder found = matchingTerms.get(queryId); if (found != null) return new PresearcherMatch<>(queryId, found.toString(), matcher.matches(queryId, doc)); return null; } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryAnalyzer.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryAnalyzer.java index 91b7a08ef57..9fe8c56a6eb 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryAnalyzer.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryAnalyzer.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.function.BiFunction; import java.util.function.Function; - import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -29,8 +28,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; /** - * Class to analyze and extract terms from a lucene query, to be used by - * a {@link Presearcher} in indexing. + * Class to analyze and extract terms from a lucene query, to be used by a {@link Presearcher} in + * indexing. */ class QueryAnalyzer { @@ -44,7 +43,8 @@ class QueryAnalyzer { this.unknownQueryMapper = (q, w) -> null; } - private static BiFunction buildMapper(List mappers) { + private static BiFunction buildMapper( + List mappers) { return (q, w) -> { for (CustomQueryHandler mapper : mappers) { QueryTree qt = mapper.handleQuery(q, w); @@ -83,9 +83,10 @@ class QueryAnalyzer { // Check if we're in a pure negative disjunction if (parent instanceof BooleanQuery) { BooleanQuery bq = (BooleanQuery) parent; - long positiveCount = bq.clauses().stream() - .filter(c -> c.getOccur() != BooleanClause.Occur.MUST_NOT) - .count(); + long positiveCount = + bq.clauses().stream() + .filter(c -> c.getOccur() != BooleanClause.Occur.MUST_NOT) + .count(); if (positiveCount == 0) { children.add(w -> QueryTree.anyTerm("PURE NEGATIVE QUERY[" + parent + "]")); } @@ -96,9 +97,13 @@ class QueryAnalyzer { // ignore it if (parent instanceof BooleanQuery) { BooleanQuery bq = (BooleanQuery) parent; - long requiredCount = bq.clauses().stream() - .filter(c -> c.getOccur() == BooleanClause.Occur.MUST || c.getOccur() == BooleanClause.Occur.FILTER) - .count(); + long requiredCount = + bq.clauses().stream() + .filter( + c -> + c.getOccur() == BooleanClause.Occur.MUST + || c.getOccur() == BooleanClause.Occur.FILTER) + .count(); if (requiredCount > 0) { return QueryVisitor.EMPTY_VISITOR; } @@ -117,13 +122,14 @@ class QueryAnalyzer { @Override public void visitLeaf(Query query) { - children.add(w -> { - QueryTree q = unknownQueryMapper.apply(query, w); - if (q == null) { - return QueryTree.anyTerm(query.toString()); - } - return q; - }); + children.add( + w -> { + QueryTree q = unknownQueryMapper.apply(query, w); + if (q == null) { + return QueryTree.anyTerm(query.toString()); + } + return q; + }); } @Override @@ -139,5 +145,4 @@ class QueryAnalyzer { return QueryTree.disjunction(children, termWeightor); } } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryCacheEntry.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryCacheEntry.java index cec278485d9..0e7b0df2dfd 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryCacheEntry.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryCacheEntry.java @@ -20,34 +20,28 @@ package org.apache.lucene.monitor; import java.util.ArrayList; import java.util.List; import java.util.Map; - import org.apache.lucene.search.Query; class QueryCacheEntry { - /** - * The (possibly partial due to decomposition) query - */ + /** The (possibly partial due to decomposition) query */ final Query matchQuery; - /** - * The id of this query - */ + /** The id of this query */ final String cacheId; /** * The id of the MonitorQuery that produced this entry * - * Note that this may be different to {@link #cacheId} due to decomposition + *

    Note that this may be different to {@link #cacheId} due to decomposition */ final String queryId; - /** - * The metadata from the entry's parent {@link MonitorQuery} - */ + /** The metadata from the entry's parent {@link MonitorQuery} */ final Map metadata; - private QueryCacheEntry(String cacheId, String queryId, Query matchQuery, Map metadata) { + private QueryCacheEntry( + String cacheId, String queryId, Query matchQuery, Map metadata) { this.cacheId = cacheId; this.queryId = queryId; this.matchQuery = matchQuery; @@ -58,7 +52,8 @@ class QueryCacheEntry { int upto = 0; List cacheEntries = new ArrayList<>(); for (Query subquery : decomposer.decompose(mq.getQuery())) { - cacheEntries.add(new QueryCacheEntry(mq.getId() + "_" + upto, mq.getId(), subquery, mq.getMetadata())); + cacheEntries.add( + new QueryCacheEntry(mq.getId() + "_" + upto, mq.getId(), subquery, mq.getMetadata())); upto++; } return cacheEntries; diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryDecomposer.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryDecomposer.java index 93053f6d678..ef1ed6a9836 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryDecomposer.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryDecomposer.java @@ -20,7 +20,6 @@ package org.apache.lucene.monitor; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -28,8 +27,8 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; /** - * Split a disjunction query into its consituent parts, so that they can be indexed - * and run separately in the Monitor. + * Split a disjunction query into its consituent parts, so that they can be indexed and run + * separately in the Monitor. */ public class QueryDecomposer { @@ -41,8 +40,7 @@ public class QueryDecomposer { */ public Set decompose(Query q) { - if (q instanceof BooleanQuery) - return decomposeBoolean((BooleanQuery) q); + if (q instanceof BooleanQuery) return decomposeBoolean((BooleanQuery) q); if (q instanceof DisjunctionMaxQuery) { Set subqueries = new HashSet<>(); @@ -60,8 +58,7 @@ public class QueryDecomposer { } public Set decomposeBoostQuery(BoostQuery q) { - if (q.getBoost() == 1.0) - return decompose(q.getQuery()); + if (q.getBoost() == 1.0) return decompose(q.getQuery()); Set boostedDecomposedQueries = new HashSet<>(); for (Query subq : decompose(q.getQuery())) { @@ -77,18 +74,16 @@ public class QueryDecomposer { * @return a collection of subqueries */ public Set decomposeBoolean(BooleanQuery q) { - if (q.getMinimumNumberShouldMatch() > 1) - return Collections.singleton(q); + if (q.getMinimumNumberShouldMatch() > 1) return Collections.singleton(q); Set subqueries = new HashSet<>(); Set exclusions = new HashSet<>(); Set mandatory = new HashSet<>(); for (BooleanClause clause : q) { - if (clause.getOccur() == BooleanClause.Occur.MUST || clause.getOccur() == BooleanClause.Occur.FILTER) - mandatory.add(clause.getQuery()); - else if (clause.getOccur() == BooleanClause.Occur.MUST_NOT) - exclusions.add(clause.getQuery()); + if (clause.getOccur() == BooleanClause.Occur.MUST + || clause.getOccur() == BooleanClause.Occur.FILTER) mandatory.add(clause.getQuery()); + else if (clause.getOccur() == BooleanClause.Occur.MUST_NOT) exclusions.add(clause.getQuery()); else { subqueries.addAll(decompose(clause.getQuery())); } @@ -104,8 +99,7 @@ public class QueryDecomposer { subqueries.addAll(decompose(mandatory.iterator().next())); } - if (exclusions.size() == 0) - return subqueries; + if (exclusions.size() == 0) return subqueries; // If there are exclusions, then we need to add them to all the decomposed // queries diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryIndex.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryIndex.java index 9a58f585f35..22e8cc488d4 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryIndex.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryIndex.java @@ -31,7 +31,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiPredicate; - import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -96,41 +95,45 @@ class QueryIndex implements Closeable { populateQueryCache(serializer, decomposer); } - private void populateQueryCache(MonitorQuerySerializer serializer, QueryDecomposer decomposer) throws IOException { + private void populateQueryCache(MonitorQuerySerializer serializer, QueryDecomposer decomposer) + throws IOException { if (serializer == null) { // No query serialization happening here - check that the cache is empty IndexSearcher searcher = manager.acquire(); try { if (searcher.count(new MatchAllDocsQuery()) != 0) { - throw new IllegalStateException("Attempting to open a non-empty monitor query index with no MonitorQuerySerializer"); + throw new IllegalStateException( + "Attempting to open a non-empty monitor query index with no MonitorQuerySerializer"); } - } - finally { + } finally { manager.release(searcher); } return; } Set ids = new HashSet<>(); List errors = new ArrayList<>(); - purgeCache(newCache -> scan((id, cacheEntry, dataValues) -> { - if (ids.contains(id)) { - // this is a branch of a query that has already been reconstructed, but - // then split by decomposition - we don't need to parse it again - return; - } - ids.add(id); - try { - MonitorQuery mq = serializer.deserialize(dataValues.mq.binaryValue()); - for (QueryCacheEntry entry : QueryCacheEntry.decompose(mq, decomposer)) { - newCache.put(entry.cacheId, entry); - } - } - catch (Exception e) { - errors.add(e); - } - })); + purgeCache( + newCache -> + scan( + (id, cacheEntry, dataValues) -> { + if (ids.contains(id)) { + // this is a branch of a query that has already been reconstructed, but + // then split by decomposition - we don't need to parse it again + return; + } + ids.add(id); + try { + MonitorQuery mq = serializer.deserialize(dataValues.mq.binaryValue()); + for (QueryCacheEntry entry : QueryCacheEntry.decompose(mq, decomposer)) { + newCache.put(entry.cacheId, entry); + } + } catch (Exception e) { + errors.add(e); + } + })); if (errors.size() > 0) { - IllegalStateException e = new IllegalStateException("Couldn't parse some queries from the index"); + IllegalStateException e = + new IllegalStateException("Couldn't parse some queries from the index"); for (Exception parseError : errors) { e.addSuppressed(parseError); } @@ -140,7 +143,8 @@ class QueryIndex implements Closeable { private class TermsHashBuilder extends SearcherFactory { @Override - public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { + public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) + throws IOException { IndexSearcher searcher = super.newSearcher(reader, previousReader); searcher.setQueryCache(null); termFilters.put(reader.getReaderCacheHelper().getKey(), new QueryTermFilter(reader)); @@ -193,7 +197,8 @@ class QueryIndex implements Closeable { List indexables = new ArrayList<>(); for (MonitorQuery mq : updates) { if (serializer != null && mq.getQueryString() == null) { - throw new IllegalArgumentException("Cannot add a MonitorQuery with a null string representation to a non-ephemeral Monitor"); + throw new IllegalArgumentException( + "Cannot add a MonitorQuery with a null string representation to a non-ephemeral Monitor"); } BytesRef serialized = serializer == null ? EMPTY : serializer.serialize(mq); for (QueryCacheEntry qce : QueryCacheEntry.decompose(mq, decomposer)) { @@ -244,10 +249,12 @@ class QueryIndex implements Closeable { MonitorQuery getQuery(String queryId) throws IOException { if (serializer == null) { - throw new IllegalStateException("Cannot get queries from an index with no MonitorQuerySerializer"); + throw new IllegalStateException( + "Cannot get queries from an index with no MonitorQuerySerializer"); } BytesRef[] bytesHolder = new BytesRef[1]; - search(new TermQuery(new Term(FIELDS.query_id, queryId)), + search( + new TermQuery(new Term(FIELDS.query_id, queryId)), (id, query, dataValues) -> bytesHolder[0] = dataValues.mq.binaryValue()); return serializer.deserialize(bytesHolder[0]); } @@ -276,7 +283,9 @@ class QueryIndex implements Closeable { MonitorQueryCollector collector = new MonitorQueryCollector(queries, matcher); long buildTime = System.nanoTime(); - Query query = queryBuilder.buildQuery(termFilters.get(searcher.getIndexReader().getReaderCacheHelper().getKey())); + Query query = + queryBuilder.buildQuery( + termFilters.get(searcher.getIndexReader().getReaderCacheHelper().getKey())); buildTime = System.nanoTime() - buildTime; searcher.search(query, collector); return buildTime; @@ -292,16 +301,18 @@ class QueryIndex implements Closeable { } void purgeCache() throws IOException { - purgeCache(newCache -> scan((id, query, dataValues) -> { - if (query != null) - newCache.put(query.cacheId, query); - })); + purgeCache( + newCache -> + scan( + (id, query, dataValues) -> { + if (query != null) newCache.put(query.cacheId, query); + })); } /** * Remove unused queries from the query cache. - *

    - * This is normally called from a background thread at a rate set by configurePurgeFrequency(). + * + *

    This is normally called from a background thread at a rate set by configurePurgeFrequency(). * * @throws IOException on IO errors */ @@ -343,7 +354,6 @@ class QueryIndex implements Closeable { } } - // --------------------------------------------- // Proxy trivial operations... // --------------------------------------------- @@ -380,7 +390,6 @@ class QueryIndex implements Closeable { default ScoreMode scoreMode() { return ScoreMode.COMPLETE_NO_SCORES; } - } // --------------------------------------------- @@ -404,9 +413,7 @@ class QueryIndex implements Closeable { } } - /** - * A Collector that decodes the stored query for each document hit. - */ + /** A Collector that decodes the stored query for each document hit. */ static final class MonitorQueryCollector extends SimpleCollector { private final Map queries; @@ -444,6 +451,5 @@ class QueryIndex implements Closeable { public ScoreMode scoreMode() { return matcher.scoreMode(); } - } } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryMatch.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryMatch.java index 8290213c7f0..ee4008e65fb 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryMatch.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryMatch.java @@ -18,14 +18,13 @@ package org.apache.lucene.monitor; import java.util.Objects; - import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; /** * Represents a match for a specific query and document - *

    - * Derived classes may contain more information (such as scores, highlights, etc) + * + *

    Derived classes may contain more information (such as scores, highlights, etc) * * @see ExplainingMatch * @see ScoringMatch @@ -36,17 +35,18 @@ public class QueryMatch { private final String queryId; public static final MatcherFactory SIMPLE_MATCHER = - searcher -> new CollectingMatcher(searcher, ScoreMode.COMPLETE_NO_SCORES) { - @Override - public QueryMatch resolve(QueryMatch match1, QueryMatch match2) { - return match1; - } + searcher -> + new CollectingMatcher(searcher, ScoreMode.COMPLETE_NO_SCORES) { + @Override + public QueryMatch resolve(QueryMatch match1, QueryMatch match2) { + return match1; + } - @Override - protected QueryMatch doMatch(String queryId, int doc, Scorable scorer) { - return new QueryMatch(queryId); - } - }; + @Override + protected QueryMatch doMatch(String queryId, int doc, Scorable scorer) { + return new QueryMatch(queryId); + } + }; /** * Creates a new QueryMatch for a specific query and document @@ -57,9 +57,7 @@ public class QueryMatch { this.queryId = Objects.requireNonNull(queryId); } - /** - * @return the queryid of this match - */ + /** @return the queryid of this match */ public String getQueryId() { return queryId; } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTimeListener.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTimeListener.java index c198c8f5c34..a787724d181 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTimeListener.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTimeListener.java @@ -19,30 +19,28 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.Map; - import org.apache.lucene.search.Query; -/** - * Notified of the time it takes to run individual queries against a set of documents - */ +/** Notified of the time it takes to run individual queries against a set of documents */ public interface QueryTimeListener { - /** - * How long it took to run a particular query - */ + /** How long it took to run a particular query */ void logQueryTime(String queryId, long timeInNanos); /** * A wrapping matcher factory to log query times to a QueryTimeListener - * @param factory a matcher factory to use for the actual matching - * @param listener the QueryTimeListener + * + * @param factory a matcher factory to use for the actual matching + * @param listener the QueryTimeListener */ - static MatcherFactory timingMatcher(MatcherFactory factory, QueryTimeListener listener) { + static MatcherFactory timingMatcher( + MatcherFactory factory, QueryTimeListener listener) { return searcher -> { CandidateMatcher matcher = factory.createMatcher(searcher); return new CandidateMatcher(searcher) { @Override - protected void matchQuery(String queryId, Query matchQuery, Map metadata) throws IOException { + protected void matchQuery(String queryId, Query matchQuery, Map metadata) + throws IOException { long t = System.nanoTime(); matcher.matchQuery(queryId, matchQuery, metadata); t = System.nanoTime() - t; diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java index cfae8eca33d..0a827b3690a 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java @@ -25,34 +25,29 @@ import java.util.Optional; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.stream.Collectors; - import org.apache.lucene.index.Term; import org.apache.lucene.util.BytesRef; /** * A representation of a node in a query tree * - * Queries are analyzed and converted into an abstract tree, consisting - * of conjunction and disjunction nodes, and leaf nodes containing terms. + *

    Queries are analyzed and converted into an abstract tree, consisting of conjunction and + * disjunction nodes, and leaf nodes containing terms. * - * Terms may be collected from a node, which will use the weights of its - * sub-nodes to determine which paths are followed. The path may be changed - * by calling {@link #advancePhase(double)} + *

    Terms may be collected from a node, which will use the weights of its sub-nodes to determine + * which paths are followed. The path may be changed by calling {@link #advancePhase(double)} */ public abstract class QueryTree { - /** - * The weight of this node - */ + /** The weight of this node */ public abstract double weight(); - /** - * Collect terms from the most highly-weighted path below this node - */ + /** Collect terms from the most highly-weighted path below this node */ public abstract void collectTerms(BiConsumer termCollector); /** * Find the next-most highly-weighted path below this node + * * @param minWeight do not advance if the next path has a weight below this value * @return {@code false} if there are no more paths above the minimum weight */ @@ -60,6 +55,7 @@ public abstract class QueryTree { /** * Returns a string representation of the node + * * @param depth the current depth of this node in the overall query tree */ public abstract String toString(int depth); @@ -69,9 +65,7 @@ public abstract class QueryTree { return toString(0); } - /** - * Returns a string of {@code width} spaces - */ + /** Returns a string of {@code width} spaces */ protected String space(int width) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < width; i++) { @@ -80,9 +74,7 @@ public abstract class QueryTree { return sb.toString(); } - /** - * Returns a leaf node for a particular term - */ + /** Returns a leaf node for a particular term */ public static QueryTree term(Term term, TermWeightor weightor) { return term(term.field(), term.bytes(), weightor.applyAsDouble(term)); } @@ -90,7 +82,7 @@ public abstract class QueryTree { /** * Returns a leaf node for a particular term and weight * - * The weight must be greater than 0 + *

    The weight must be greater than 0 */ public static QueryTree term(Term term, double weight) { return term(term.field(), term.bytes(), weight); @@ -99,7 +91,7 @@ public abstract class QueryTree { /** * Returns a leaf node for a particular term and weight * - * The weight must be greater than 0 + *

    The weight must be greater than 0 */ public static QueryTree term(String field, BytesRef term, double weight) { return new QueryTree() { @@ -128,9 +120,7 @@ public abstract class QueryTree { }; } - /** - * Returns a leaf node that will match any document - */ + /** Returns a leaf node that will match any document */ public static QueryTree anyTerm(String reason) { return new QueryTree() { @Override @@ -140,7 +130,8 @@ public abstract class QueryTree { @Override public void collectTerms(BiConsumer termCollector) { - termCollector.accept(TermFilteredPresearcher.ANYTOKEN_FIELD, new BytesRef(TermFilteredPresearcher.ANYTOKEN)); + termCollector.accept( + TermFilteredPresearcher.ANYTOKEN_FIELD, new BytesRef(TermFilteredPresearcher.ANYTOKEN)); } @Override @@ -155,19 +146,18 @@ public abstract class QueryTree { }; } - /** - * Returns a conjunction of a set of child nodes - */ - public static QueryTree conjunction(List> children, TermWeightor weightor) { + /** Returns a conjunction of a set of child nodes */ + public static QueryTree conjunction( + List> children, TermWeightor weightor) { if (children.size() == 0) { throw new IllegalArgumentException("Cannot build a conjunction with no children"); } if (children.size() == 1) { return children.get(0).apply(weightor); } - List qt = children.stream() - .map(f -> f.apply(weightor)).collect(Collectors.toList()); - List restricted = qt.stream().filter(t -> t.weight() > 0).collect(Collectors.toList()); + List qt = children.stream().map(f -> f.apply(weightor)).collect(Collectors.toList()); + List restricted = + qt.stream().filter(t -> t.weight() > 0).collect(Collectors.toList()); if (restricted.size() == 0) { // all children are ANY, so just return the first one return qt.get(0); @@ -179,18 +169,16 @@ public abstract class QueryTree { return new ConjunctionQueryTree(Arrays.asList(children)); } - /** - * Returns a disjunction of a set of child nodes - */ - public static QueryTree disjunction(List> children, TermWeightor weightor) { + /** Returns a disjunction of a set of child nodes */ + public static QueryTree disjunction( + List> children, TermWeightor weightor) { if (children.size() == 0) { throw new IllegalArgumentException("Cannot build a disjunction with no children"); } if (children.size() == 1) { return children.get(0).apply(weightor); } - List qt = children.stream() - .map(f -> f.apply(weightor)).collect(Collectors.toList()); + List qt = children.stream().map(f -> f.apply(weightor)).collect(Collectors.toList()); Optional firstAnyChild = qt.stream().filter(q -> q.weight() == 0).findAny(); // if any of the children is an ANY node, just return that, otherwise build the disjunction return firstAnyChild.orElseGet(() -> new DisjunctionQueryTree(qt)); @@ -202,7 +190,8 @@ public abstract class QueryTree { private static class ConjunctionQueryTree extends QueryTree { - private static final Comparator COMPARATOR = Comparator.comparingDouble(QueryTree::weight).reversed(); + private static final Comparator COMPARATOR = + Comparator.comparingDouble(QueryTree::weight).reversed(); final List children = new ArrayList<>(); @@ -239,11 +228,13 @@ public abstract class QueryTree { @Override public String toString(int depth) { - StringBuilder sb = new StringBuilder(space(depth)).append("Conjunction[") - .append(children.size()) - .append("]^") - .append(weight()) - .append("\n"); + StringBuilder sb = + new StringBuilder(space(depth)) + .append("Conjunction[") + .append(children.size()) + .append("]^") + .append(weight()) + .append("\n"); for (QueryTree child : children) { sb.append(child.toString(depth + 2)).append("\n"); } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/RegexpQueryHandler.java b/lucene/monitor/src/java/org/apache/lucene/monitor/RegexpQueryHandler.java index cdc436d9337..c08f1256108 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/RegexpQueryHandler.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/RegexpQueryHandler.java @@ -20,7 +20,6 @@ package org.apache.lucene.monitor; import java.util.Collections; import java.util.Set; import java.util.function.BiConsumer; - import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; @@ -28,30 +27,23 @@ import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.BytesRef; /** - * A query handler implementation that matches Regexp queries by indexing regex - * terms by their longest static substring, and generates ngrams from Document - * tokens to match them. - *

    - * This implementation will filter out more wildcard queries than TermFilteredPresearcher, - * at the expense of longer document build times. Which one is more performant will depend - * on the type and number of queries registered in the Monitor, and the size of documents - * to be monitored. Profiling is recommended. + * A query handler implementation that matches Regexp queries by indexing regex terms by their + * longest static substring, and generates ngrams from Document tokens to match them. + * + *

    This implementation will filter out more wildcard queries than TermFilteredPresearcher, at the + * expense of longer document build times. Which one is more performant will depend on the type and + * number of queries registered in the Monitor, and the size of documents to be monitored. Profiling + * is recommended. */ public class RegexpQueryHandler implements CustomQueryHandler { - /** - * The default suffix with which to mark ngrams - */ + /** The default suffix with which to mark ngrams */ public static final String DEFAULT_NGRAM_SUFFIX = "XX"; - /** - * The default maximum length of an input token before ANYTOKENS are generated - */ + /** The default maximum length of an input token before ANYTOKENS are generated */ public static final int DEFAULT_MAX_TOKEN_SIZE = 30; - /** - * The default token to emit if a term is longer than MAX_TOKEN_SIZE - */ + /** The default token to emit if a term is longer than MAX_TOKEN_SIZE */ public static final String DEFAULT_WILDCARD_TOKEN = "__WILDCARD__"; private final String ngramSuffix; @@ -66,12 +58,13 @@ public class RegexpQueryHandler implements CustomQueryHandler { /** * Creates a new RegexpQueryHandler * - * @param ngramSuffix the suffix with which to mark ngrams - * @param maxTokenSize the maximum length of an input token before WILDCARD tokens are generated - * @param wildcardToken the token to emit if a token is longer than maxTokenSize in length + * @param ngramSuffix the suffix with which to mark ngrams + * @param maxTokenSize the maximum length of an input token before WILDCARD tokens are generated + * @param wildcardToken the token to emit if a token is longer than maxTokenSize in length * @param excludedFields a Set of fields to ignore when generating ngrams */ - public RegexpQueryHandler(String ngramSuffix, int maxTokenSize, String wildcardToken, Set excludedFields) { + public RegexpQueryHandler( + String ngramSuffix, int maxTokenSize, String wildcardToken, Set excludedFields) { this.ngramSuffix = ngramSuffix; this.maxTokenSize = maxTokenSize; this.wildcardTokenBytes = new BytesRef(wildcardToken); @@ -79,9 +72,7 @@ public class RegexpQueryHandler implements CustomQueryHandler { this.excludedFields = excludedFields == null ? Collections.emptySet() : excludedFields; } - /** - * Creates a new RegexpQueryHandler using default settings - */ + /** Creates a new RegexpQueryHandler using default settings */ public RegexpQueryHandler() { this(DEFAULT_NGRAM_SUFFIX, DEFAULT_MAX_TOKEN_SIZE, DEFAULT_WILDCARD_TOKEN, null); } @@ -97,8 +88,7 @@ public class RegexpQueryHandler implements CustomQueryHandler { @Override public TokenStream wrapTermStream(String field, TokenStream ts) { - if (excludedFields.contains(field)) - return ts; + if (excludedFields.contains(field)) return ts; return new SuffixingNGramTokenFilter(ts, ngramSuffix, wildcardToken, maxTokenSize); } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/ScoringMatch.java b/lucene/monitor/src/java/org/apache/lucene/monitor/ScoringMatch.java index 80684fdcf55..56c48813adc 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/ScoringMatch.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/ScoringMatch.java @@ -18,15 +18,12 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; -/** - * A QueryMatch that reports scores for each match - */ +/** A QueryMatch that reports scores for each match */ public class ScoringMatch extends QueryMatch { public static final MatcherFactory matchWithSimilarity(Similarity similarity) { @@ -34,10 +31,10 @@ public class ScoringMatch extends QueryMatch { searcher.setSimilarity(similarity); return new CollectingMatcher(searcher, ScoreMode.COMPLETE) { @Override - protected ScoringMatch doMatch(String queryId, int doc, Scorable scorer) throws IOException { + protected ScoringMatch doMatch(String queryId, int doc, Scorable scorer) + throws IOException { float score = scorer.score(); - if (score > 0) - return new ScoringMatch(queryId, score); + if (score > 0) return new ScoringMatch(queryId, score); return null; } @@ -49,7 +46,8 @@ public class ScoringMatch extends QueryMatch { }; } - public static final MatcherFactory DEFAULT_MATCHER = matchWithSimilarity(new BM25Similarity()); + public static final MatcherFactory DEFAULT_MATCHER = + matchWithSimilarity(new BM25Similarity()); private final float score; @@ -69,7 +67,6 @@ public class ScoringMatch extends QueryMatch { if (!super.equals(o)) return false; ScoringMatch that = (ScoringMatch) o; return Float.compare(that.score, score) == 0; - } @Override diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/SlowLog.java b/lucene/monitor/src/java/org/apache/lucene/monitor/SlowLog.java index c8a8618b1f8..f6dffa2b264 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/SlowLog.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/SlowLog.java @@ -21,20 +21,18 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -/** - * Reports on slow queries in a given match run - */ +/** Reports on slow queries in a given match run */ public class SlowLog implements Iterable { private final List slowQueries = new ArrayList<>(); /** * Add a query and time taken to the slow log. - *

    - * The query will only be recorded if the time is above the configured limit + * + *

    The query will only be recorded if the time is above the configured limit * * @param query the query id - * @param time the time taken by the query in ns + * @param time the time taken by the query in ns */ void addQuery(String query, long time) { slowQueries.add(new Entry(query, time)); @@ -56,19 +54,13 @@ public class SlowLog implements Iterable { return slowQueries.iterator(); } - /** - * An individual entry in the slow log - */ + /** An individual entry in the slow log */ public static class Entry { - /** - * The query id - */ + /** The query id */ final String queryId; - /** - * The time taken to execute the query in ms - */ + /** The time taken to execute the query in ms */ final long time; Entry(String queryId, long time) { diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/SuffixingNGramTokenFilter.java b/lucene/monitor/src/java/org/apache/lucene/monitor/SuffixingNGramTokenFilter.java index f7068b9e2d6..0709f7999f9 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/SuffixingNGramTokenFilter.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/SuffixingNGramTokenFilter.java @@ -18,11 +18,10 @@ package org.apache.lucene.monitor; import java.io.IOException; - +import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.*; -import org.apache.lucene.analysis.CharArraySet; final class SuffixingNGramTokenFilter extends TokenFilter { @@ -51,12 +50,13 @@ final class SuffixingNGramTokenFilter extends TokenFilter { /** * Creates SuffixingNGramTokenFilter. * - * @param input {@link org.apache.lucene.analysis.TokenStream} holding the input to be tokenized - * @param suffix a string to suffix to all ngrams - * @param wildcardToken a token to emit if the input token is longer than maxTokenLength + * @param input {@link org.apache.lucene.analysis.TokenStream} holding the input to be tokenized + * @param suffix a string to suffix to all ngrams + * @param wildcardToken a token to emit if the input token is longer than maxTokenLength * @param maxTokenLength tokens longer than this will not be ngrammed */ - public SuffixingNGramTokenFilter(TokenStream input, String suffix, String wildcardToken, int maxTokenLength) { + public SuffixingNGramTokenFilter( + TokenStream input, String suffix, String wildcardToken, int maxTokenLength) { super(input); this.suffix = suffix; @@ -65,12 +65,9 @@ final class SuffixingNGramTokenFilter extends TokenFilter { posIncAtt = addAttribute(PositionIncrementAttribute.class); posLenAtt = addAttribute(PositionLengthAttribute.class); - } - /** - * Returns the next token in the stream, or null at EOS. - */ + /** Returns the next token in the stream, or null at EOS. */ @Override public final boolean incrementToken() throws IOException { while (true) { @@ -80,8 +77,7 @@ final class SuffixingNGramTokenFilter extends TokenFilter { return false; } - if (keywordAtt.isKeyword()) - return true; + if (keywordAtt.isKeyword()) return true; curTermBuffer = termAtt.buffer().clone(); curTermLength = termAtt.length(); @@ -92,9 +88,8 @@ final class SuffixingNGramTokenFilter extends TokenFilter { curPosLen = posLenAtt.getPositionLength(); tokStart = offsetAtt.startOffset(); tokEnd = offsetAtt.endOffset(); - //termAtt.setEmpty().append(suffix); + // termAtt.setEmpty().append(suffix); return true; - } if (curTermLength > maxTokenLength) { @@ -111,10 +106,12 @@ final class SuffixingNGramTokenFilter extends TokenFilter { if (curGramSize >= 0 && (curPos + curGramSize) <= curCodePointCount) { clearAttributes(); final int start = Character.offsetByCodePoints(curTermBuffer, 0, curTermLength, 0, curPos); - final int end = Character.offsetByCodePoints(curTermBuffer, 0, curTermLength, start, curGramSize); + final int end = + Character.offsetByCodePoints(curTermBuffer, 0, curTermLength, start, curGramSize); termAtt.copyBuffer(curTermBuffer, start, end - start); termAtt.append(suffix); - if ((curGramSize == curTermLength - curPos) && !seenSuffixes.add(termAtt.subSequence(0, termAtt.length()))) { + if ((curGramSize == curTermLength - curPos) + && !seenSuffixes.add(termAtt.subSequence(0, termAtt.length()))) { curTermBuffer = null; continue; } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/TermFilteredPresearcher.java b/lucene/monitor/src/java/org/apache/lucene/monitor/TermFilteredPresearcher.java index fb1d24ddbb7..6ae8b20a4bd 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/TermFilteredPresearcher.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/TermFilteredPresearcher.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.BiPredicate; - import org.apache.lucene.analysis.FilteringTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; @@ -49,24 +48,20 @@ import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.BytesRefIterator; /** - * Presearcher implementation that uses terms extracted from queries to index - * them in the Monitor, and builds a disjunction from terms in a document to match - * them. + * Presearcher implementation that uses terms extracted from queries to index them in the Monitor, + * and builds a disjunction from terms in a document to match them. * - * Handling of queries that do not support term extraction through the - * {@link org.apache.lucene.search.QueryVisitor} API can be configured by passing - * a list of {@link CustomQueryHandler} implementations. + *

    Handling of queries that do not support term extraction through the {@link + * org.apache.lucene.search.QueryVisitor} API can be configured by passing a list of {@link + * CustomQueryHandler} implementations. * - * Filtering by additional fields can be configured by passing a set of field names. - * Documents that contain values in those fields will only be checked against - * {@link MonitorQuery} instances that have the same fieldname-value mapping in - * their metadata. + *

    Filtering by additional fields can be configured by passing a set of field names. Documents + * that contain values in those fields will only be checked against {@link MonitorQuery} instances + * that have the same fieldname-value mapping in their metadata. */ public class TermFilteredPresearcher extends Presearcher { - /** - * The default TermWeightor, weighting by token length - */ + /** The default TermWeightor, weighting by token length */ public static final TermWeightor DEFAULT_WEIGHTOR = TermWeightor.DEFAULT; private final QueryAnalyzer extractor; @@ -78,9 +73,7 @@ public class TermFilteredPresearcher extends Presearcher { static final String ANYTOKEN_FIELD = "__anytokenfield"; static final String ANYTOKEN = "__ANYTOKEN__"; - /** - * Creates a new TermFilteredPresearcher using the default term weighting - */ + /** Creates a new TermFilteredPresearcher using the default term weighting */ public TermFilteredPresearcher() { this(DEFAULT_WEIGHTOR, Collections.emptyList(), Collections.emptySet()); } @@ -88,11 +81,15 @@ public class TermFilteredPresearcher extends Presearcher { /** * Creates a new TermFilteredPresearcher * - * @param weightor the TermWeightor - * @param customQueryHandlers A list of custom query handlers to extract terms from non-core queries - * @param filterFields A set of fields to filter on + * @param weightor the TermWeightor + * @param customQueryHandlers A list of custom query handlers to extract terms from non-core + * queries + * @param filterFields A set of fields to filter on */ - public TermFilteredPresearcher(TermWeightor weightor, List customQueryHandlers, Set filterFields) { + public TermFilteredPresearcher( + TermWeightor weightor, + List customQueryHandlers, + Set filterFields) { this.extractor = new QueryAnalyzer(customQueryHandlers); this.filterFields = filterFields; this.queryHandlers.addAll(customQueryHandlers); @@ -115,20 +112,22 @@ public class TermFilteredPresearcher extends Presearcher { ts = handler.wrapTermStream(field.name, ts); } - ts = new FilteringTokenFilter(ts) { - TermToBytesRefAttribute termAtt = addAttribute(TermToBytesRefAttribute.class); - @Override - protected boolean accept() { - return filterFields.contains(field.name) == false && termAcceptor.test(field.name, termAtt.getBytesRef()); - } - }; + ts = + new FilteringTokenFilter(ts) { + TermToBytesRefAttribute termAtt = addAttribute(TermToBytesRefAttribute.class); + + @Override + protected boolean accept() { + return filterFields.contains(field.name) == false + && termAcceptor.test(field.name, termAtt.getBytesRef()); + } + }; TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); while (ts.incrementToken()) { queryBuilder.addTerm(field.name, BytesRef.deepCopyOf(termAtt.getBytesRef())); } ts.close(); - } Query presearcherQuery = queryBuilder.build(); @@ -170,8 +169,7 @@ public class TermFilteredPresearcher extends Presearcher { private Query buildFilterClause(LeafReader reader, String field) throws IOException { Terms terms = reader.terms(field); - if (terms == null) - return null; + if (terms == null) return null; BooleanQuery.Builder bq = new BooleanQuery.Builder(); @@ -183,39 +181,32 @@ public class TermFilteredPresearcher extends Presearcher { // we need to check that every document in the batch has the same field values, otherwise // this filtering will not work if (te.docFreq() != docsInBatch) - throw new IllegalArgumentException("Some documents in this batch do not have a term value of " - + field + ":" + Term.toString(term)); + throw new IllegalArgumentException( + "Some documents in this batch do not have a term value of " + + field + + ":" + + Term.toString(term)); bq.add(new TermQuery(new Term(field, BytesRef.deepCopyOf(term))), BooleanClause.Occur.SHOULD); } BooleanQuery built = bq.build(); - if (built.clauses().size() == 0) - return null; + if (built.clauses().size() == 0) return null; return built; } - /** - * Constructs a document disjunction from a set of terms - */ + /** Constructs a document disjunction from a set of terms */ protected interface DocumentQueryBuilder { - /** - * Add a term from this document - */ + /** Add a term from this document */ void addTerm(String field, BytesRef term) throws IOException; - /** - * @return the final Query - */ + /** @return the final Query */ Query build(); - } - /** - * Returns a {@link DocumentQueryBuilder} for this presearcher - */ + /** Returns a {@link DocumentQueryBuilder} for this presearcher */ protected DocumentQueryBuilder getQueryBuilder() { return new DocumentQueryBuilder() { @@ -231,7 +222,8 @@ public class TermFilteredPresearcher extends Presearcher { public Query build() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (Map.Entry> entry : terms.entrySet()) { - builder.add(new TermInSetQuery(entry.getKey(), entry.getValue()), BooleanClause.Occur.SHOULD); + builder.add( + new TermInSetQuery(entry.getKey(), entry.getValue()), BooleanClause.Occur.SHOULD); } return builder.build(); } @@ -258,34 +250,32 @@ public class TermFilteredPresearcher extends Presearcher { return doc; } - /** - * Builds a {@link Document} from the terms extracted from a query - */ + /** Builds a {@link Document} from the terms extracted from a query */ protected Document buildQueryDocument(QueryTree querytree) { Map fieldTerms = collectTerms(querytree); Document doc = new Document(); for (Map.Entry entry : fieldTerms.entrySet()) { - doc.add(new Field(entry.getKey(), - new TermsEnumTokenStream(new BytesRefHashIterator(entry.getValue())), QUERYFIELDTYPE)); + doc.add( + new Field( + entry.getKey(), + new TermsEnumTokenStream(new BytesRefHashIterator(entry.getValue())), + QUERYFIELDTYPE)); } return doc; } - /** - * Collects terms from a {@link QueryTree} and maps them per-field - */ + /** Collects terms from a {@link QueryTree} and maps them per-field */ protected Map collectTerms(QueryTree querytree) { Map fieldTerms = new HashMap<>(); - querytree.collectTerms((field, term) -> { - BytesRefHash tt = fieldTerms.computeIfAbsent(field, f -> new BytesRefHash()); - tt.add(term); - }); + querytree.collectTerms( + (field, term) -> { + BytesRefHash tt = fieldTerms.computeIfAbsent(field, f -> new BytesRefHash()); + tt.add(term); + }); return fieldTerms; } - /** - * Implements a {@link BytesRefIterator} over a {@link BytesRefHash} - */ + /** Implements a {@link BytesRefIterator} over a {@link BytesRefHash} */ protected class BytesRefHashIterator implements BytesRefIterator { final BytesRef scratch = new BytesRef(); @@ -293,7 +283,6 @@ public class TermFilteredPresearcher extends Presearcher { final int[] sortedTerms; int upto = -1; - BytesRefHashIterator(BytesRefHash terms) { this.terms = terms; this.sortedTerms = terms.sort(); @@ -301,14 +290,11 @@ public class TermFilteredPresearcher extends Presearcher { @Override public BytesRef next() { - if (upto >= sortedTerms.length) - return null; + if (upto >= sortedTerms.length) return null; upto++; - if (sortedTerms[upto] == -1) - return null; + if (sortedTerms[upto] == -1) return null; this.terms.get(sortedTerms[upto], scratch); return scratch; } } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/TermWeightor.java b/lucene/monitor/src/java/org/apache/lucene/monitor/TermWeightor.java index 54c995be5a3..f99ab97de87 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/TermWeightor.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/TermWeightor.java @@ -22,23 +22,16 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.function.ToDoubleFunction; - import org.apache.lucene.index.Term; import org.apache.lucene.util.BytesRef; -/** - * Calculates the weight of a {@link Term} - */ +/** Calculates the weight of a {@link Term} */ public interface TermWeightor extends ToDoubleFunction { - /** - * A default TermWeightor based on token length - */ + /** A default TermWeightor based on token length */ TermWeightor DEFAULT = lengthWeightor(3, 0.3f); - /** - * Combine weightors by multiplication - */ + /** Combine weightors by multiplication */ static TermWeightor combine(TermWeightor... weightors) { return value -> { double r = 1; @@ -49,9 +42,7 @@ public interface TermWeightor extends ToDoubleFunction { }; } - /** - * QueryTerms with a field from the selected set will be assigned the given weight - */ + /** QueryTerms with a field from the selected set will be assigned the given weight */ static TermWeightor fieldWeightor(double weight, Set fields) { return value -> { if (fields.contains(value.field())) { @@ -61,16 +52,12 @@ public interface TermWeightor extends ToDoubleFunction { }; } - /** - * QueryTerms with a field from the selected set will be assigned the given weight - */ + /** QueryTerms with a field from the selected set will be assigned the given weight */ static TermWeightor fieldWeightor(double weight, String... fields) { return fieldWeightor(weight, new HashSet<>(Arrays.asList(fields))); } - /** - * QueryTerms with a term value from the selected set will be assigned the given weight - */ + /** QueryTerms with a term value from the selected set will be assigned the given weight */ static TermWeightor termWeightor(double weight, Set terms) { return value -> { if (terms.contains(value.bytes())) { @@ -80,9 +67,7 @@ public interface TermWeightor extends ToDoubleFunction { }; } - /** - * QueryTerms with a term value from the selected set will be assigned the given weight - */ + /** QueryTerms with a term value from the selected set will be assigned the given weight */ static TermWeightor termWeightor(double weight, BytesRef... terms) { return termWeightor(weight, new HashSet<>(Arrays.asList(terms))); } @@ -109,19 +94,18 @@ public interface TermWeightor extends ToDoubleFunction { /** * QueryTerms will be assigned a weight based on their term frequency * - * More infrequent terms are weighted higher. Terms are weighted according - * to the function {@code w = (n / freq) + k}. Terms with no associated - * frequency receive a weight of value {@code 1} + *

    More infrequent terms are weighted higher. Terms are weighted according to the function + * {@code w = (n / freq) + k}. Terms with no associated frequency receive a weight of value {@code + * 1} * * @param frequencies a map of terms to frequencies - * @param n a scaling factor - * @param k the minimum weight to scale to + * @param n a scaling factor + * @param k the minimum weight to scale to */ static TermWeightor termFreqWeightor(Map frequencies, double n, double k) { return value -> { Integer mapVal = frequencies.get(value.text()); - if (mapVal != null) - return (n / mapVal) + k; + if (mapVal != null) return (n / mapVal) + k; return 1; }; } @@ -129,9 +113,8 @@ public interface TermWeightor extends ToDoubleFunction { /** * QueryTerms will be assigned a weight based on their term length * - * Weights are assigned by the function {@code a * e ^ (-k * length)}. Longer - * terms are weighted higher. Terms of length greater than 32 all receive the - * same weight. + *

    Weights are assigned by the function {@code a * e ^ (-k * length)}. Longer terms are + * weighted higher. Terms of length greater than 32 all receive the same weight. * * @param a a * @param k k @@ -148,5 +131,4 @@ public interface TermWeightor extends ToDoubleFunction { return 4 - lengthNorms[value.bytes().length]; }; } - } diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/TermsEnumTokenStream.java b/lucene/monitor/src/java/org/apache/lucene/monitor/TermsEnumTokenStream.java index 66c66d17b51..523cdc40f49 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/TermsEnumTokenStream.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/TermsEnumTokenStream.java @@ -18,15 +18,12 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; -/** - * A TokenStream created from a {@link org.apache.lucene.index.TermsEnum} - */ +/** A TokenStream created from a {@link org.apache.lucene.index.TermsEnum} */ class TermsEnumTokenStream extends TokenStream { private final BytesRefIterator termsEnum; @@ -45,8 +42,7 @@ class TermsEnumTokenStream extends TokenStream { public final boolean incrementToken() throws IOException { clearAttributes(); BytesRef bytes = termsEnum.next(); - if (bytes == null) - return false; + if (bytes == null) return false; charTerm.setEmpty(); charTerm.append(bytes.utf8ToString()); return true; diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/package-info.java b/lucene/monitor/src/java/org/apache/lucene/monitor/package-info.java index 695007648e3..9ee06fed075 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/package-info.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/package-info.java @@ -16,83 +16,96 @@ */ /** + * + * *

    Monitoring framework

    * - * This package contains classes to allow the monitoring of a stream of - * documents with a set of queries. + * This package contains classes to allow the monitoring of a stream of documents with a set of + * queries. * - * To use, instantiate a {@link org.apache.lucene.monitor.Monitor} object, - * register queries with it via - * {@link org.apache.lucene.monitor.Monitor#register(org.apache.lucene.monitor.MonitorQuery...)}, - * and then match documents against it either individually via - * {@link org.apache.lucene.monitor.Monitor#match(org.apache.lucene.document.Document, org.apache.lucene.monitor.MatcherFactory)} - * or in batches via - * {@link org.apache.lucene.monitor.Monitor#match(org.apache.lucene.document.Document[], org.apache.lucene.monitor.MatcherFactory)} + *

    To use, instantiate a {@link org.apache.lucene.monitor.Monitor} object, register queries with + * it via {@link + * org.apache.lucene.monitor.Monitor#register(org.apache.lucene.monitor.MonitorQuery...)}, and then + * match documents against it either individually via {@link + * org.apache.lucene.monitor.Monitor#match(org.apache.lucene.document.Document, + * org.apache.lucene.monitor.MatcherFactory)} or in batches via {@link + * org.apache.lucene.monitor.Monitor#match(org.apache.lucene.document.Document[], + * org.apache.lucene.monitor.MatcherFactory)} * *

    Matcher types

    * * A number of matcher types are included: + * *
      - *
    • {@link org.apache.lucene.monitor.QueryMatch#SIMPLE_MATCHER} — just returns the set of query ids that a Document has matched
    • - *
    • {@link org.apache.lucene.monitor.ScoringMatch#matchWithSimilarity(org.apache.lucene.search.similarities.Similarity)} - * — returns the set of matching queries, with the score that each one records against a Document
    • - *
    • {@link org.apache.lucene.monitor.ExplainingMatch#MATCHER — similar to ScoringMatch, but include the full Explanation}
    • - *
    • {@link org.apache.lucene.monitor.HighlightsMatch#MATCHER — return the matching queries along with the matching terms for each query}
    • + *
    • {@link org.apache.lucene.monitor.QueryMatch#SIMPLE_MATCHER} — just returns the set of + * query ids that a Document has matched + *
    • {@link + * org.apache.lucene.monitor.ScoringMatch#matchWithSimilarity(org.apache.lucene.search.similarities.Similarity)} + * — returns the set of matching queries, with the score that each one records against a + * Document + *
    • {@link org.apache.lucene.monitor.ExplainingMatch#MATCHER — similar to ScoringMatch, + * but include the full Explanation} + *
    • {@link org.apache.lucene.monitor.HighlightsMatch#MATCHER — return the matching + * queries along with the matching terms for each query} *
    * - * Matchers can be wrapped in {@link org.apache.lucene.monitor.PartitionMatcher} or {@link org.apache.lucene.monitor.ParallelMatcher} to increase - * performance in low-concurrency systems. + * Matchers can be wrapped in {@link org.apache.lucene.monitor.PartitionMatcher} or {@link + * org.apache.lucene.monitor.ParallelMatcher} to increase performance in low-concurrency systems. * *

    Pre-filtering of queries

    * * Monitoring is done efficiently by extracting minimal sets of terms from queries, and using these - * to build a query index. When a document is passed to - * {@link org.apache.lucene.monitor.Monitor#match(org.apache.lucene.document.Document, org.apache.lucene.monitor.MatcherFactory)}, - * it is converted into a small index, and the terms dictionary from that index is then used to build - * a disjunction query to run against the query index. Queries that match this disjunction are then run - * against the document. In this way, the Monitor can avoid running queries that have no chance of - * matching. The process of extracting terms and building document disjunctions is handled by a - * {@link org.apache.lucene.monitor.Presearcher} + * to build a query index. When a document is passed to {@link + * org.apache.lucene.monitor.Monitor#match(org.apache.lucene.document.Document, + * org.apache.lucene.monitor.MatcherFactory)}, it is converted into a small index, and the terms + * dictionary from that index is then used to build a disjunction query to run against the query + * index. Queries that match this disjunction are then run against the document. In this way, the + * Monitor can avoid running queries that have no chance of matching. The process of extracting + * terms and building document disjunctions is handled by a {@link + * org.apache.lucene.monitor.Presearcher} * - * In addition, extra per-field filtering can be specified by passing a set of keyword fields to - * filter on. When queries are registered with the monitor, field-value pairs can be added as - * optional metadata for each query, and these can then be used to restrict which queries a - * document is checked against. For example, you can specify a language that each query should - * apply to, and documents containing a value in their language field would only be checked against - * queries that have that same value in their language metadata. Note that when matching documents - * in batches, all documents in the batch must have the same values in their filter fields. + *

    In addition, extra per-field filtering can be specified by passing a set of keyword fields to + * filter on. When queries are registered with the monitor, field-value pairs can be added as + * optional metadata for each query, and these can then be used to restrict which queries a document + * is checked against. For example, you can specify a language that each query should apply to, and + * documents containing a value in their language field would only be checked against queries that + * have that same value in their language metadata. Note that when matching documents in batches, + * all documents in the batch must have the same values in their filter fields. * - * Query analysis uses the {@link org.apache.lucene.search.QueryVisitor} API to extract terms, which will work - * for all basic term-based queries shipped with Lucene. The analyzer builds a representation of the query - * called a {@link org.apache.lucene.monitor.QueryTree}, and then selects a minimal set of terms, one of which - * must be present in a document for that document to match. Individual terms are weighted using a - * {@link org.apache.lucene.monitor.TermWeightor}, which allows some selectivity when building the term set. - * For example, given a conjunction of terms (a boolean query with several MUST clauses, or a phrase, span or interval - * query), we need only extract one term. The TermWeightor can be configured in a number of ways; by default - * it will weight longer terms more highly. + *

    Query analysis uses the {@link org.apache.lucene.search.QueryVisitor} API to extract terms, + * which will work for all basic term-based queries shipped with Lucene. The analyzer builds a + * representation of the query called a {@link org.apache.lucene.monitor.QueryTree}, and then + * selects a minimal set of terms, one of which must be present in a document for that document to + * match. Individual terms are weighted using a {@link org.apache.lucene.monitor.TermWeightor}, + * which allows some selectivity when building the term set. For example, given a conjunction of + * terms (a boolean query with several MUST clauses, or a phrase, span or interval query), we need + * only extract one term. The TermWeightor can be configured in a number of ways; by default it will + * weight longer terms more highly. * - * For query sets that contain many conjunctions, it can be useful to extract and index different - * minimal term combinations. For example, a phrase query on 'the quick brown fox' could index - * both 'quick' and 'brown', and avoid being run against documents that contain only one of these - * terms. The {@link org.apache.lucene.monitor.MultipassTermFilteredPresearcher} allows this sort - * of indexing, taking a minimum term weight so that very common terms such as 'the' can be avoided. + *

    For query sets that contain many conjunctions, it can be useful to extract and index different + * minimal term combinations. For example, a phrase query on 'the quick brown fox' could index both + * 'quick' and 'brown', and avoid being run against documents that contain only one of these terms. + * The {@link org.apache.lucene.monitor.MultipassTermFilteredPresearcher} allows this sort of + * indexing, taking a minimum term weight so that very common terms such as 'the' can be avoided. * - * Custom Query implementations that are based on term matching, and that implement - * {@link org.apache.lucene.search.Query#visit(org.apache.lucene.search.QueryVisitor)} will work with no - * extra configuration; for more complicated custom queries, you can register a - * {@link org.apache.lucene.monitor.CustomQueryHandler} with the presearcher. Included in this package - * is a {@link org.apache.lucene.monitor.RegexpQueryHandler}, which gives an example of a different method - * of indexing automaton-based queries by extracting fixed substrings from a regular expression, and then - * using ngram filtering to build the document disjunction. + *

    Custom Query implementations that are based on term matching, and that implement {@link + * org.apache.lucene.search.Query#visit(org.apache.lucene.search.QueryVisitor)} will work with no + * extra configuration; for more complicated custom queries, you can register a {@link + * org.apache.lucene.monitor.CustomQueryHandler} with the presearcher. Included in this package is a + * {@link org.apache.lucene.monitor.RegexpQueryHandler}, which gives an example of a different + * method of indexing automaton-based queries by extracting fixed substrings from a regular + * expression, and then using ngram filtering to build the document disjunction. * *

    Persistent query sets

    * - * By default, {@link org.apache.lucene.monitor.Monitor} instances are ephemeral, storing their query - * indexes in memory. To make a persistent monitor, build a {@link org.apache.lucene.monitor.MonitorConfiguration} - * object and call {@link org.apache.lucene.monitor.MonitorConfiguration#setIndexPath(java.nio.file.Path, org.apache.lucene.monitor.MonitorQuerySerializer)} - * to tell the Monitor to store its query index on disk. All queries registered with this Monitor will - * need to have a string representation that is also stored, and can be re-parsed by the associated - * {@link org.apache.lucene.monitor.MonitorQuerySerializer} when the index is loaded by a new Monitor + * By default, {@link org.apache.lucene.monitor.Monitor} instances are ephemeral, storing their + * query indexes in memory. To make a persistent monitor, build a {@link + * org.apache.lucene.monitor.MonitorConfiguration} object and call {@link + * org.apache.lucene.monitor.MonitorConfiguration#setIndexPath(java.nio.file.Path, + * org.apache.lucene.monitor.MonitorQuerySerializer)} to tell the Monitor to store its query index + * on disk. All queries registered with this Monitor will need to have a string representation that + * is also stored, and can be re-parsed by the associated {@link + * org.apache.lucene.monitor.MonitorQuerySerializer} when the index is loaded by a new Monitor * instance. */ -package org.apache.lucene.monitor; \ No newline at end of file +package org.apache.lucene.monitor; diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/ConcurrentMatcherTestBase.java b/lucene/monitor/src/test/org/apache/lucene/monitor/ConcurrentMatcherTestBase.java index 7a120d07a91..872571ad7fd 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/ConcurrentMatcherTestBase.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/ConcurrentMatcherTestBase.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; @@ -33,8 +32,8 @@ public abstract class ConcurrentMatcherTestBase extends LuceneTestCase { private static final Analyzer ANALYZER = new StandardAnalyzer(); - protected abstract MatcherFactory matcherFactory(ExecutorService executor, - MatcherFactory factory, int threads); + protected abstract MatcherFactory matcherFactory( + ExecutorService executor, MatcherFactory factory, int threads); public void testAllMatchesAreCollected() throws Exception { @@ -49,12 +48,11 @@ public abstract class ConcurrentMatcherTestBase extends LuceneTestCase { Document doc = new Document(); doc.add(newTextField("field", "test", Field.Store.NO)); - MatchingQueries matches - = monitor.match(doc, matcherFactory(executor, QueryMatch.SIMPLE_MATCHER, 10)); + MatchingQueries matches = + monitor.match(doc, matcherFactory(executor, QueryMatch.SIMPLE_MATCHER, 10)); assertEquals(1000, matches.getMatchCount()); - } - finally { + } finally { executor.shutdown(); } } @@ -66,7 +64,8 @@ public abstract class ConcurrentMatcherTestBase extends LuceneTestCase { try (Monitor monitor = new Monitor(ANALYZER)) { List queries = new ArrayList<>(); for (int i = 0; i < 10; i++) { - queries.add(new MonitorQuery(Integer.toString(i), MonitorTestBase.parse("test^10 doc " + i))); + queries.add( + new MonitorQuery(Integer.toString(i), MonitorTestBase.parse("test^10 doc " + i))); } monitor.register(queries); assertEquals(30, monitor.getDisjunctCount()); @@ -74,8 +73,8 @@ public abstract class ConcurrentMatcherTestBase extends LuceneTestCase { Document doc = new Document(); doc.add(newTextField("field", "test doc doc", Field.Store.NO)); - MatchingQueries matches - = monitor.match(doc, matcherFactory(executor, ScoringMatch.DEFAULT_MATCHER, 10)); + MatchingQueries matches = + monitor.match(doc, matcherFactory(executor, ScoringMatch.DEFAULT_MATCHER, 10)); assertEquals(20, matches.getQueriesRun()); assertEquals(10, matches.getMatchCount()); @@ -87,10 +86,8 @@ public abstract class ConcurrentMatcherTestBase extends LuceneTestCase { // up with the sum of the scores for the 'test' and 'doc' parts assertEquals(1.4874471f, match.getScore(), 0); } - } - finally { + } finally { executor.shutdown(); } } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/FieldFilterPresearcherComponentTestBase.java b/lucene/monitor/src/test/org/apache/lucene/monitor/FieldFilterPresearcherComponentTestBase.java index da059b8dffb..db167f5c9af 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/FieldFilterPresearcherComponentTestBase.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/FieldFilterPresearcherComponentTestBase.java @@ -17,15 +17,14 @@ package org.apache.lucene.monitor; +import static org.hamcrest.CoreMatchers.containsString; + import java.io.IOException; import java.util.Collections; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.search.MatchAllDocsQuery; -import static org.hamcrest.CoreMatchers.containsString; - public abstract class FieldFilterPresearcherComponentTestBase extends PresearcherTestBase { public void testBatchFiltering() throws IOException { @@ -47,7 +46,8 @@ public abstract class FieldFilterPresearcherComponentTestBase extends Presearche doc3.add(newTextField(TEXTFIELD, "wahl is a misspelling of whale", Field.Store.NO)); doc3.add(newTextField("language", "en", Field.Store.NO)); - MultiMatchingQueries matches = monitor.match(new Document[]{ doc1, doc2, doc3 }, QueryMatch.SIMPLE_MATCHER); + MultiMatchingQueries matches = + monitor.match(new Document[] {doc1, doc2, doc3}, QueryMatch.SIMPLE_MATCHER); assertEquals(1, matches.getMatchCount(0)); assertNotNull(matches.matches("1", 0)); assertEquals(1, matches.getMatchCount(1)); @@ -67,8 +67,10 @@ public abstract class FieldFilterPresearcherComponentTestBase extends Presearche doc2.add(newTextField("language", "de", Field.Store.NO)); try (Monitor monitor = newMonitor()) { - IllegalArgumentException e - = expectThrows(IllegalArgumentException.class, () -> monitor.match(new Document[]{ doc1, doc2 }, QueryMatch.SIMPLE_MATCHER)); + IllegalArgumentException e = + expectThrows( + IllegalArgumentException.class, + () -> monitor.match(new Document[] {doc1, doc2}, QueryMatch.SIMPLE_MATCHER)); assertThat(e.getMessage(), containsString("language:")); } } @@ -114,7 +116,9 @@ public abstract class FieldFilterPresearcherComponentTestBase extends Presearche public void testFilteringOnMatchAllQueries() throws IOException { try (Monitor monitor = newMonitor()) { - monitor.register(new MonitorQuery("1", new MatchAllDocsQuery(), null, Collections.singletonMap("language", "de"))); + monitor.register( + new MonitorQuery( + "1", new MatchAllDocsQuery(), null, Collections.singletonMap("language", "de"))); Document enDoc = new Document(); enDoc.add(newTextField(TEXTFIELD, "this is a test", Field.Store.NO)); @@ -127,7 +131,8 @@ public abstract class FieldFilterPresearcherComponentTestBase extends Presearche public void testDebugQueries() throws Exception { try (Monitor monitor = newMonitor()) { - monitor.register(new MonitorQuery("1", parse("test"), null, Collections.singletonMap("language", "en"))); + monitor.register( + new MonitorQuery("1", parse("test"), null, Collections.singletonMap("language", "en"))); Document enDoc = new Document(); enDoc.add(newTextField(TEXTFIELD, "this is a test", Field.Store.NO)); @@ -137,5 +142,4 @@ public abstract class FieldFilterPresearcherComponentTestBase extends Presearche assertFalse(matches.match("1", 0).presearcherMatches.isEmpty()); } } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/MonitorTestBase.java b/lucene/monitor/src/test/org/apache/lucene/monitor/MonitorTestBase.java index a6e18fcf5d7..b8fff7e0c77 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/MonitorTestBase.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/MonitorTestBase.java @@ -20,7 +20,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexReader; diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/PresearcherTestBase.java b/lucene/monitor/src/test/org/apache/lucene/monitor/PresearcherTestBase.java index 5fff66743c4..c3ec9312e74 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/PresearcherTestBase.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/PresearcherTestBase.java @@ -18,7 +18,6 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; @@ -58,15 +57,15 @@ public abstract class PresearcherTestBase extends MonitorTestBase { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", parse("field_1:test"))); - assertEquals(0, - monitor.match(buildDoc("field_2", "test"), QueryMatch.SIMPLE_MATCHER).getMatchCount()); + assertEquals( + 0, monitor.match(buildDoc("field_2", "test"), QueryMatch.SIMPLE_MATCHER).getMatchCount()); } - } public void testEmptyMonitorHandling() throws IOException { try (Monitor monitor = newMonitor()) { - MatchingQueries matches = monitor.match(buildDoc("field_2", "test"), QueryMatch.SIMPLE_MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("field_2", "test"), QueryMatch.SIMPLE_MATCHER); assertEquals(0, matches.getMatchCount()); assertEquals(0, matches.getQueriesRun()); } @@ -75,22 +74,24 @@ public abstract class PresearcherTestBase extends MonitorTestBase { public void testMatchAllQueryHandling() throws IOException { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", new MatchAllDocsQuery())); - assertEquals(1, - monitor.match(buildDoc("f", "wibble"), QueryMatch.SIMPLE_MATCHER).getMatchCount()); + assertEquals( + 1, monitor.match(buildDoc("f", "wibble"), QueryMatch.SIMPLE_MATCHER).getMatchCount()); } } public void testNegativeQueryHandling() throws IOException { - Query q = new BooleanQuery.Builder() - .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("f", "foo")), BooleanClause.Occur.MUST_NOT) - .build(); + Query q = + new BooleanQuery.Builder() + .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("f", "foo")), BooleanClause.Occur.MUST_NOT) + .build(); try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", q)); - MultiMatchingQueries matches = monitor.match(new Document[]{ - buildDoc("f", "bar"), buildDoc("f", "foo") - }, QueryMatch.SIMPLE_MATCHER); + MultiMatchingQueries matches = + monitor.match( + new Document[] {buildDoc("f", "bar"), buildDoc("f", "foo")}, + QueryMatch.SIMPLE_MATCHER); assertEquals(1, matches.getMatchCount(0)); assertEquals(0, matches.getMatchCount(1)); } @@ -100,13 +101,14 @@ public abstract class PresearcherTestBase extends MonitorTestBase { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", new MatchAllDocsQuery())); - MatchingQueries matches = monitor.match(buildDoc("f", "wibble"), QueryMatch.SIMPLE_MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("f", "wibble"), QueryMatch.SIMPLE_MATCHER); assertEquals(1, matches.getMatchCount()); assertEquals(1, matches.getQueriesRun()); } } - private static final BytesRef NON_STRING_TERM = new BytesRef(new byte[]{60, 8, 0, 0, 0, 9}); + private static final BytesRef NON_STRING_TERM = new BytesRef(new byte[] {60, 8, 0, 0, 0, 9}); static class BytesRefAttribute extends AttributeImpl implements TermToBytesRefAttribute { @@ -116,19 +118,13 @@ public abstract class PresearcherTestBase extends MonitorTestBase { } @Override - public void clear() { - - } + public void clear() {} @Override - public void reflectWith(AttributeReflector attributeReflector) { - - } + public void reflectWith(AttributeReflector attributeReflector) {} @Override - public void copyTo(AttributeImpl attribute) { - - } + public void copyTo(AttributeImpl attribute) {} } static final class NonStringTokenStream extends TokenStream { @@ -143,8 +139,7 @@ public abstract class PresearcherTestBase extends MonitorTestBase { @Override public boolean incrementToken() { - if (done) - return false; + if (done) return false; return done = true; } } @@ -164,7 +159,6 @@ public abstract class PresearcherTestBase extends MonitorTestBase { assertEquals(1, m.getMatchCount()); assertEquals(1, m.getQueriesRun()); } - } public static BooleanClause must(Query q) { @@ -174,5 +168,4 @@ public abstract class PresearcherTestBase extends MonitorTestBase { public static BooleanClause should(Query q) { return new BooleanClause(q, BooleanClause.Occur.SHOULD); } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanClauseWeightings.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanClauseWeightings.java index 87d1a7a5455..83646ffcb9c 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanClauseWeightings.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanClauseWeightings.java @@ -20,7 +20,6 @@ package org.apache.lucene.monitor; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; @@ -34,13 +33,16 @@ public class TestBooleanClauseWeightings extends LuceneTestCase { private static QueryAnalyzer treeBuilder = new QueryAnalyzer(); public void testExactClausesPreferred() { - Query bq = new BooleanQuery.Builder() - .add(LongPoint.newRangeQuery("field2", 1, 2), BooleanClause.Occur.MUST) - .add(new BooleanQuery.Builder() - .add(new TermQuery(new Term("field1", "term1")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field1", "term2")), BooleanClause.Occur.SHOULD) - .build(), BooleanClause.Occur.MUST) - .build(); + Query bq = + new BooleanQuery.Builder() + .add(LongPoint.newRangeQuery("field2", 1, 2), BooleanClause.Occur.MUST) + .add( + new BooleanQuery.Builder() + .add(new TermQuery(new Term("field1", "term1")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field1", "term2")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.MUST) + .build(); QueryTree tree = treeBuilder.buildTree(bq, TermWeightor.DEFAULT); Set terms = new HashSet<>(); tree.collectTerms((f, b) -> terms.add(new Term(f, b))); @@ -48,17 +50,19 @@ public class TestBooleanClauseWeightings extends LuceneTestCase { } public void testLongerTermsPreferred() { - Query q = new BooleanQuery.Builder() - .add(new TermQuery(new Term("field1", "a")), BooleanClause.Occur.MUST) - .add(new TermQuery(new Term("field1", "supercalifragilisticexpialidocious")), BooleanClause.Occur.MUST) - .add(new TermQuery(new Term("field1", "b")), BooleanClause.Occur.MUST) - .build(); - Set expected - = Collections.singleton(new Term("field1", "supercalifragilisticexpialidocious")); + Query q = + new BooleanQuery.Builder() + .add(new TermQuery(new Term("field1", "a")), BooleanClause.Occur.MUST) + .add( + new TermQuery(new Term("field1", "supercalifragilisticexpialidocious")), + BooleanClause.Occur.MUST) + .add(new TermQuery(new Term("field1", "b")), BooleanClause.Occur.MUST) + .build(); + Set expected = + Collections.singleton(new Term("field1", "supercalifragilisticexpialidocious")); QueryTree tree = treeBuilder.buildTree(q, TermWeightor.DEFAULT); Set terms = new HashSet<>(); tree.collectTerms((f, b) -> terms.add(new Term(f, b))); assertEquals(expected, terms); } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanTermExtractor.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanTermExtractor.java index 98c5a9aa504..d1b1f9dd58d 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanTermExtractor.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanTermExtractor.java @@ -21,7 +21,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -45,11 +44,9 @@ public class TestBooleanTermExtractor extends LuceneTestCase { Query bq = MonitorTestBase.parse("field1:term1 field1:term2"); Set terms = collectTerms(bq); - Set expected = new HashSet<>(Arrays.asList( - new Term("field1", "term1"), - new Term("field1", "term2"))); + Set expected = + new HashSet<>(Arrays.asList(new Term("field1", "term1"), new Term("field1", "term2"))); assertEquals(expected, terms); - } public void testAllNestedDisjunctionClausesAreIncluded() { @@ -84,7 +81,7 @@ public class TestBooleanTermExtractor extends LuceneTestCase { // Set up - single MatchAllDocsQuery clause in a BooleanQuery Query q = MonitorTestBase.parse("+*:*"); assertTrue(q instanceof BooleanQuery); - BooleanClause clause = ((BooleanQuery)q).iterator().next(); + BooleanClause clause = ((BooleanQuery) q).iterator().next(); assertTrue(clause.getQuery() instanceof MatchAllDocsQuery); assertEquals(BooleanClause.Occur.MUST, clause.getOccur()); @@ -116,7 +113,8 @@ public class TestBooleanTermExtractor extends LuceneTestCase { public void testMatchAllDocsMustWithKeywordShouldAndKeywordNot() throws Exception { Query q = MonitorTestBase.parse("+*:* field1:term1 -field2:notterm"); - // Because field1:notterm is negated and field1:term1 is optional, only the mandatory MatchAllDocsQuery is collected. + // Because field1:notterm is negated and field1:term1 is optional, only the mandatory + // MatchAllDocsQuery is collected. Set terms = collectTerms(q); assertEquals(1, terms.size()); Term t = terms.iterator().next(); @@ -126,10 +124,10 @@ public class TestBooleanTermExtractor extends LuceneTestCase { public void testMatchAllDocsMustAndOtherMustWithKeywordShouldAndKeywordNot() throws Exception { Query q = MonitorTestBase.parse("+*:* +field9:term9 field1:term1 -field2:notterm"); - // The queryterm collected by weight is the non-anynode, so field9:term9 shows up before MatchAllDocsQuery. + // The queryterm collected by weight is the non-anynode, so field9:term9 shows up before + // MatchAllDocsQuery. Set terms = collectTerms(q); Set expected = Collections.singleton(new Term("field9", "term9")); assertEquals(expected, terms); } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestCachePurging.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestCachePurging.java index fb5f0d5f827..c77f622ed02 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestCachePurging.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestCachePurging.java @@ -17,37 +17,38 @@ package org.apache.lucene.monitor; +import static org.hamcrest.core.Is.is; + import java.io.IOException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.util.NamedThreadFactory; -import static org.hamcrest.core.Is.is; - public class TestCachePurging extends MonitorTestBase { public void testQueryCacheCanBePurged() throws IOException { final AtomicInteger purgeCount = new AtomicInteger(); - MonitorUpdateListener listener = new MonitorUpdateListener() { - @Override - public void onPurge() { - purgeCount.incrementAndGet(); - } - }; + MonitorUpdateListener listener = + new MonitorUpdateListener() { + @Override + public void onPurge() { + purgeCount.incrementAndGet(); + } + }; try (Monitor monitor = new Monitor(ANALYZER)) { - MonitorQuery[] queries = new MonitorQuery[]{ - new MonitorQuery("1", parse("test1 test4")), - new MonitorQuery("2", parse("test2")), - new MonitorQuery("3", parse("test3")) - }; + MonitorQuery[] queries = + new MonitorQuery[] { + new MonitorQuery("1", parse("test1 test4")), + new MonitorQuery("2", parse("test2")), + new MonitorQuery("3", parse("test3")) + }; monitor.addQueryIndexUpdateListener(listener); monitor.register(queries); assertThat(monitor.getQueryCount(), is(3)); @@ -85,19 +86,21 @@ public class TestCachePurging extends MonitorTestBase { final CountDownLatch finishUpdating = new CountDownLatch(1); try (final Monitor monitor = new Monitor(ANALYZER)) { - Runnable updaterThread = () -> { - try { - startUpdating.await(); - for (int i = 200; i < 400; i++) { - monitor.register(newMonitorQuery(i)); - } - finishUpdating.countDown(); - } catch (Exception e) { - throw new RuntimeException(e); - } - }; + Runnable updaterThread = + () -> { + try { + startUpdating.await(); + for (int i = 200; i < 400; i++) { + monitor.register(newMonitorQuery(i)); + } + finishUpdating.countDown(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }; - ExecutorService executor = Executors.newFixedThreadPool(1, new NamedThreadFactory("updaters")); + ExecutorService executor = + Executors.newFixedThreadPool(1, new NamedThreadFactory("updaters")); try { executor.submit(updaterThread); @@ -132,7 +135,8 @@ public class TestCachePurging extends MonitorTestBase { public void testBackgroundPurges() throws IOException, InterruptedException { - MonitorConfiguration config = new MonitorConfiguration().setPurgeFrequency(50, TimeUnit.MILLISECONDS); + MonitorConfiguration config = + new MonitorConfiguration().setPurgeFrequency(50, TimeUnit.MILLISECONDS); try (Monitor monitor = new Monitor(ANALYZER, Presearcher.NO_FILTERING, config)) { assertEquals(-1, monitor.getQueryCacheStats().lastPurged); @@ -146,14 +150,14 @@ public class TestCachePurging extends MonitorTestBase { assertEquals(99, monitor.getQueryCacheStats().queries); CountDownLatch latch = new CountDownLatch(1); - monitor.addQueryIndexUpdateListener(new MonitorUpdateListener() { - @Override - public void onPurge() { - // It can sometimes take a couple of purge runs to get everything in sync - if (monitor.getQueryCacheStats().cachedQueries == 99) - latch.countDown(); - } - }); + monitor.addQueryIndexUpdateListener( + new MonitorUpdateListener() { + @Override + public void onPurge() { + // It can sometimes take a couple of purge runs to get everything in sync + if (monitor.getQueryCacheStats().cachedQueries == 99) latch.countDown(); + } + }); assertTrue(latch.await(5, TimeUnit.SECONDS)); assertEquals(99, monitor.getQueryCacheStats().queries); diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestConcurrentQueryLoader.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestConcurrentQueryLoader.java index 20da4c74a2a..c310d186559 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestConcurrentQueryLoader.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestConcurrentQueryLoader.java @@ -31,5 +31,4 @@ public class TestConcurrentQueryLoader extends MonitorTestBase { assertEquals(numQueries, monitor.getQueryCount()); } } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestDocumentBatch.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestDocumentBatch.java index b82a0ede430..b21e3fe015e 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestDocumentBatch.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestDocumentBatch.java @@ -17,16 +17,15 @@ package org.apache.lucene.monitor; -import java.io.IOException; +import static org.hamcrest.CoreMatchers.containsString; +import java.io.IOException; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; -import static org.hamcrest.CoreMatchers.containsString; - public class TestDocumentBatch extends LuceneTestCase { public static final Analyzer ANALYZER = new StandardAnalyzer(); @@ -36,20 +35,22 @@ public class TestDocumentBatch extends LuceneTestCase { DocumentBatch.of(ANALYZER); } - public void testSingleDocumentAndArrayOfOneDocumentResultInSameDocumentBatch() throws IOException { + public void testSingleDocumentAndArrayOfOneDocumentResultInSameDocumentBatch() + throws IOException { Document doc = new Document(); try (DocumentBatch batchDoc = DocumentBatch.of(ANALYZER, doc); - DocumentBatch batchArr = DocumentBatch.of(ANALYZER, new Document[] {doc})) { + DocumentBatch batchArr = DocumentBatch.of(ANALYZER, new Document[] {doc})) { assertThat(batchDoc.getClass().getName(), containsString("SingletonDocumentBatch")); assertEquals(batchDoc.getClass(), batchArr.getClass()); } } - public void testDocumentBatchClassDiffersWhetherItContainsOneOrMoreDocuments() throws IOException { + public void testDocumentBatchClassDiffersWhetherItContainsOneOrMoreDocuments() + throws IOException { Document doc = new Document(); try (DocumentBatch batch1 = DocumentBatch.of(ANALYZER, new Document[] {doc}); - DocumentBatch batch2 = DocumentBatch.of(ANALYZER, doc, doc); - DocumentBatch batch3 = DocumentBatch.of(ANALYZER, doc, doc, doc)) { + DocumentBatch batch2 = DocumentBatch.of(ANALYZER, doc, doc); + DocumentBatch batch3 = DocumentBatch.of(ANALYZER, doc, doc, doc)) { assertNotEquals(batch1.getClass(), batch2.getClass()); assertEquals(batch2.getClass(), batch3.getClass()); assertThat(batch3.getClass().getName(), containsString("MultiDocumentBatch")); diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestExplainingMatcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestExplainingMatcher.java index 7362f952375..60d24ca23f3 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestExplainingMatcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestExplainingMatcher.java @@ -18,7 +18,6 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.search.Explanation; @@ -28,7 +27,8 @@ public class TestExplainingMatcher extends MonitorTestBase { public void testExplainingMatcher() throws IOException { try (Monitor monitor = newMonitor()) { - monitor.register(new MonitorQuery("1", parse("test")), new MonitorQuery("2", parse("wibble"))); + monitor.register( + new MonitorQuery("1", parse("test")), new MonitorQuery("2", parse("wibble"))); Document doc = new Document(); doc.add(newTextField("field", "test", Field.Store.NO)); diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestExtractors.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestExtractors.java index 1b2ac8cb3ba..934e0299904 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestExtractors.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestExtractors.java @@ -21,7 +21,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -53,7 +52,6 @@ public class TestExtractors extends LuceneTestCase { Query csqWithQuery = new ConstantScoreQuery(bq.build()); Set expected = Collections.singleton(new Term("f", "q1")); assertEquals(expected, collectTerms(csqWithQuery)); - } public void testPhraseQueryExtractor() { @@ -64,7 +62,6 @@ public class TestExtractors extends LuceneTestCase { Set expected = Collections.singleton(new Term("f", "encyclopedia")); assertEquals(expected, collectTerms(pq.build())); - } public void testBoostQueryExtractor() { @@ -80,24 +77,21 @@ public class TestExtractors extends LuceneTestCase { public void testDisjunctionMaxExtractor() { - Query query = new DisjunctionMaxQuery( - Arrays.asList(new TermQuery(new Term("f", "t1")), new TermQuery(new Term("f", "t2"))), 0.1f - ); - Set expected = new HashSet<>(Arrays.asList( - new Term("f", "t1"), - new Term("f", "t2") - )); + Query query = + new DisjunctionMaxQuery( + Arrays.asList(new TermQuery(new Term("f", "t1")), new TermQuery(new Term("f", "t2"))), + 0.1f); + Set expected = new HashSet<>(Arrays.asList(new Term("f", "t1"), new Term("f", "t2"))); assertEquals(expected, collectTerms(query)); } public void testBooleanExtractsFilter() { - Query q = new BooleanQuery.Builder() - .add(new TermQuery(new Term("f", "must")), BooleanClause.Occur.MUST) - .add(new TermQuery(new Term("f", "filter")), BooleanClause.Occur.FILTER) - .build(); + Query q = + new BooleanQuery.Builder() + .add(new TermQuery(new Term("f", "must")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term("f", "filter")), BooleanClause.Occur.FILTER) + .build(); Set expected = Collections.singleton(new Term("f", "filter")); // it's longer, so it wins assertEquals(expected, collectTerms(q)); } - - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestFieldFilteredMultipassPresearcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestFieldFilteredMultipassPresearcher.java index 75e708eaad0..5b725ae68a7 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestFieldFilteredMultipassPresearcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestFieldFilteredMultipassPresearcher.java @@ -23,7 +23,7 @@ public class TestFieldFilteredMultipassPresearcher extends FieldFilterPresearche @Override protected Presearcher createPresearcher() { - return new MultipassTermFilteredPresearcher(2, 0, TermWeightor.DEFAULT, - Collections.emptyList(), Collections.singleton("language")); + return new MultipassTermFilteredPresearcher( + 2, 0, TermWeightor.DEFAULT, Collections.emptyList(), Collections.singleton("language")); } } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestFieldTermFilteredPresearcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestFieldTermFilteredPresearcher.java index 38518ee450e..68ac8793d84 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestFieldTermFilteredPresearcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestFieldTermFilteredPresearcher.java @@ -23,6 +23,7 @@ public class TestFieldTermFilteredPresearcher extends FieldFilterPresearcherComp @Override protected Presearcher createPresearcher() { - return new TermFilteredPresearcher(TermWeightor.DEFAULT, Collections.emptyList(), Collections.singleton("language")); + return new TermFilteredPresearcher( + TermWeightor.DEFAULT, Collections.emptyList(), Collections.singleton("language")); } } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestForceNoBulkScoringQuery.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestForceNoBulkScoringQuery.java index 7a2297294aa..3c7ab1a7da1 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestForceNoBulkScoringQuery.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestForceNoBulkScoringQuery.java @@ -18,7 +18,6 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -47,13 +46,14 @@ public class TestForceNoBulkScoringQuery extends LuceneTestCase { assertNotEquals(new ForceNoBulkScoringQuery(tq1), new ForceNoBulkScoringQuery(tq2)); assertEquals(new ForceNoBulkScoringQuery(tq2), new ForceNoBulkScoringQuery(tq3)); - assertEquals(new ForceNoBulkScoringQuery(tq2).hashCode(), new ForceNoBulkScoringQuery(tq3).hashCode()); + assertEquals( + new ForceNoBulkScoringQuery(tq2).hashCode(), new ForceNoBulkScoringQuery(tq3).hashCode()); } public void testRewrite() throws IOException { try (Directory dir = new ByteBuffersDirectory(); - IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(new StandardAnalyzer()))) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(new StandardAnalyzer()))) { Document doc = new Document(); doc.add(new TextField("field", "term1 term2 term3 term4", Field.Store.NO)); @@ -72,11 +72,6 @@ public class TestForceNoBulkScoringQuery extends LuceneTestCase { Query inner = ((ForceNoBulkScoringQuery) rewritten).getWrappedQuery(); assertNotEquals(inner, pq); - - } - - } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestHighlightingMatcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestHighlightingMatcher.java index eb13db4d942..79370b873dc 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestHighlightingMatcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestHighlightingMatcher.java @@ -19,7 +19,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.Arrays; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; @@ -41,7 +40,6 @@ import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; - public class TestHighlightingMatcher extends MonitorTestBase { private static final Analyzer WHITESPACE = new WhitespaceAnalyzer(); @@ -58,8 +56,8 @@ public class TestHighlightingMatcher extends MonitorTestBase { MonitorQuery mq = new MonitorQuery("query1", parse("test")); monitor.register(mq); - MatchingQueries matches = monitor.match(buildDoc("this is a test document"), - HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("this is a test document"), HighlightsMatch.MATCHER); assertEquals(1, matches.getMatchCount()); HighlightsMatch match = matches.matches("query1"); assertTrue(match.getHits(FIELD).contains(new HighlightsMatch.Hit(3, 10, 3, 14))); @@ -72,13 +70,12 @@ public class TestHighlightingMatcher extends MonitorTestBase { MonitorQuery mq = new MonitorQuery("query1", parse("\"test document\"")); monitor.register(mq); - MatchingQueries matches = monitor.match(buildDoc("this is a test document"), - HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("this is a test document"), HighlightsMatch.MATCHER); assertEquals(1, matches.getMatchCount()); HighlightsMatch m = matches.matches("query1"); assertTrue(m.getHits(FIELD).contains(new HighlightsMatch.Hit(3, 10, 4, 23))); } - } public void testToString() { @@ -88,7 +85,9 @@ public class TestHighlightingMatcher extends MonitorTestBase { match.addHit("field", 0, 1, -1, -1); match.addHit("afield", 0, 1, 0, 4); - assertEquals("Match(query=1){hits={afield=[0(0)->1(4)], field=[0(-1)->1(-1), 2(-1)->3(-1)]}}", match.toString()); + assertEquals( + "Match(query=1){hits={afield=[0(0)->1(4)], field=[0(-1)->1(-1), 2(-1)->3(-1)]}}", + match.toString()); } public void testMultiFieldQueryMatches() throws IOException { @@ -109,19 +108,20 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertTrue(m.getHits("field1").contains(new HighlightsMatch.Hit(3, 10, 3, 14))); assertTrue(m.getHits("field2").contains(new HighlightsMatch.Hit(5, 26, 5, 30))); } - } public void testQueryErrors() throws IOException { try (Monitor monitor = new Monitor(ANALYZER, Presearcher.NO_FILTERING)) { - monitor.register(new MonitorQuery("1", parse("test")), + monitor.register( + new MonitorQuery("1", parse("test")), new MonitorQuery("2", new ThrowOnRewriteQuery()), new MonitorQuery("3", parse("document")), new MonitorQuery("4", parse("foo"))); - MatchingQueries matches = monitor.match(buildDoc("this is a test document"), HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("this is a test document"), HighlightsMatch.MATCHER); assertEquals(4, matches.getQueriesRun()); assertEquals(2, matches.getMatchCount()); assertEquals(1, matches.getErrors().size()); @@ -134,7 +134,8 @@ public class TestHighlightingMatcher extends MonitorTestBase { monitor.register(new MonitorQuery("1", new RegexpQuery(new Term(FIELD, "he.*")))); - MatchingQueries matches = monitor.match(buildDoc("hello world"), HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("hello world"), HighlightsMatch.MATCHER); assertEquals(1, matches.getQueriesRun()); assertEquals(1, matches.getMatchCount()); assertEquals(1, matches.matches("1").getHitCount()); @@ -143,76 +144,79 @@ public class TestHighlightingMatcher extends MonitorTestBase { public void testWildcardCombinations() throws Exception { - final BooleanQuery bq = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "term1")), BooleanClause.Occur.MUST) - .add(new PrefixQuery(new Term(FIELD, "term2")), BooleanClause.Occur.MUST) - .add(new TermQuery(new Term(FIELD, "term3")), BooleanClause.Occur.MUST_NOT) - .build(); + final BooleanQuery bq = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "term1")), BooleanClause.Occur.MUST) + .add(new PrefixQuery(new Term(FIELD, "term2")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(FIELD, "term3")), BooleanClause.Occur.MUST_NOT) + .build(); try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", bq)); - MatchingQueries matches = monitor.match(buildDoc("term1 term22 term4"), HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("term1 term22 term4"), HighlightsMatch.MATCHER); HighlightsMatch m = matches.matches("1"); assertNotNull(m); assertEquals(2, m.getHitCount()); } - } public void testDisjunctionMaxQuery() throws IOException { - final DisjunctionMaxQuery query = new DisjunctionMaxQuery(Arrays.asList( - new TermQuery(new Term(FIELD, "term1")), new PrefixQuery(new Term(FIELD, "term2")) - ), 1.0f); + final DisjunctionMaxQuery query = + new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(FIELD, "term1")), new PrefixQuery(new Term(FIELD, "term2"))), + 1.0f); try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", query)); - MatchingQueries matches = monitor.match(buildDoc("term1 term2 term3"), HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("term1 term2 term3"), HighlightsMatch.MATCHER); HighlightsMatch m = matches.matches("1"); assertNotNull(m); assertEquals(2, m.getHitCount()); } - } public void testIdenticalMatches() throws Exception { - final BooleanQuery bq = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "term1")), BooleanClause.Occur.MUST) - .add(new TermQuery(new Term(FIELD, "term1")), BooleanClause.Occur.SHOULD) - .build(); + final BooleanQuery bq = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "term1")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(FIELD, "term1")), BooleanClause.Occur.SHOULD) + .build(); try (Monitor monitor = new Monitor(ANALYZER)) { monitor.register(new MonitorQuery("1", bq)); - MatchingQueries matches = monitor.match(buildDoc("term1 term2"), HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("term1 term2"), HighlightsMatch.MATCHER); HighlightsMatch m = matches.matches("1"); assertNotNull(m); assertEquals(1, m.getHitCount()); } - } public void testWildcardBooleanRewrites() throws Exception { final Query wc = new PrefixQuery(new Term(FIELD, "term1")); - final Query wrapper = new BooleanQuery.Builder() - .add(wc, BooleanClause.Occur.MUST) - .build(); + final Query wrapper = new BooleanQuery.Builder().add(wc, BooleanClause.Occur.MUST).build(); - final Query wrapper2 = new BooleanQuery.Builder() - .add(wrapper, BooleanClause.Occur.MUST) - .build(); + final Query wrapper2 = + new BooleanQuery.Builder().add(wrapper, BooleanClause.Occur.MUST).build(); - final BooleanQuery bq = new BooleanQuery.Builder() - .add(new PrefixQuery(new Term(FIELD, "term2")), BooleanClause.Occur.MUST) - .add(wrapper2, BooleanClause.Occur.MUST_NOT) - .build(); + final BooleanQuery bq = + new BooleanQuery.Builder() + .add(new PrefixQuery(new Term(FIELD, "term2")), BooleanClause.Occur.MUST) + .add(wrapper2, BooleanClause.Occur.MUST_NOT) + .build(); try (Monitor monitor = new Monitor(ANALYZER)) { monitor.register(new MonitorQuery("1", bq)); - MatchingQueries matches = monitor.match(buildDoc("term2 term"), HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("term2 term"), HighlightsMatch.MATCHER); HighlightsMatch m = matches.matches("1"); assertNotNull(m); assertEquals(1, m.getHitCount()); @@ -225,16 +229,18 @@ public class TestHighlightingMatcher extends MonitorTestBase { } public void testWildcardProximityRewrites() throws Exception { - final SpanNearQuery snq = SpanNearQuery.newOrderedNearQuery(FIELD) - .addClause(new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term(FIELD, "term*")))) - .addClause(new SpanTermQuery(new Term(FIELD, "foo"))) - .build(); + final SpanNearQuery snq = + SpanNearQuery.newOrderedNearQuery(FIELD) + .addClause(new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term(FIELD, "term*")))) + .addClause(new SpanTermQuery(new Term(FIELD, "foo"))) + .build(); try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", snq)); - MatchingQueries matches = monitor.match(buildDoc("term1 foo"), HighlightsMatch.MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("term1 foo"), HighlightsMatch.MATCHER); HighlightsMatch m = matches.matches("1"); assertNotNull(m); assertEquals(2, m.getHitCount()); @@ -243,18 +249,22 @@ public class TestHighlightingMatcher extends MonitorTestBase { public void testDisjunctionWithOrderedNearSpans() throws Exception { - final Query bq = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.SHOULD) - .add(SpanNearQuery.newOrderedNearQuery(FIELD) - .addClause(new SpanTermQuery(new Term(FIELD, "b"))) - .addClause(new SpanTermQuery(new Term(FIELD, "c"))) - .setSlop(1) - .build(), BooleanClause.Occur.SHOULD) - .build(); - final Query parent = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.MUST) - .add(bq, BooleanClause.Occur.MUST) - .build(); + final Query bq = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.SHOULD) + .add( + SpanNearQuery.newOrderedNearQuery(FIELD) + .addClause(new SpanTermQuery(new Term(FIELD, "b"))) + .addClause(new SpanTermQuery(new Term(FIELD, "c"))) + .setSlop(1) + .build(), + BooleanClause.Occur.SHOULD) + .build(); + final Query parent = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.MUST) + .add(bq, BooleanClause.Occur.MUST) + .build(); try (Monitor monitor = new Monitor(ANALYZER)) { monitor.register(new MonitorQuery("1", parent)); @@ -266,23 +276,26 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertNotNull(m); assertEquals(1, m.getHitCount()); } - } public void testDisjunctionWithUnorderedNearSpans() throws Exception { - final Query bq = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.SHOULD) - .add(SpanNearQuery.newUnorderedNearQuery(FIELD) - .addClause(new SpanTermQuery(new Term(FIELD, "b"))) - .addClause(new SpanTermQuery(new Term(FIELD, "c"))) - .setSlop(1) - .build(), BooleanClause.Occur.SHOULD) - .build(); - final Query parent = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.MUST) - .add(bq, BooleanClause.Occur.MUST) - .build(); + final Query bq = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.SHOULD) + .add( + SpanNearQuery.newUnorderedNearQuery(FIELD) + .addClause(new SpanTermQuery(new Term(FIELD, "b"))) + .addClause(new SpanTermQuery(new Term(FIELD, "c"))) + .setSlop(1) + .build(), + BooleanClause.Occur.SHOULD) + .build(); + final Query parent = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.MUST) + .add(bq, BooleanClause.Occur.MUST) + .build(); try (Monitor monitor = new Monitor(ANALYZER)) { monitor.register(new MonitorQuery("1", parent)); @@ -294,7 +307,6 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertNotNull(m); assertEquals(1, m.getHitCount()); } - } public void testEquality() { @@ -319,22 +331,23 @@ public class TestHighlightingMatcher extends MonitorTestBase { public void testMutliValuedFieldWithNonDefaultGaps() throws IOException { - Analyzer analyzer = new Analyzer() { - @Override - public int getPositionIncrementGap(String fieldName) { - return 1000; - } + Analyzer analyzer = + new Analyzer() { + @Override + public int getPositionIncrementGap(String fieldName) { + return 1000; + } - @Override - public int getOffsetGap(String fieldName) { - return 2000; - } + @Override + public int getOffsetGap(String fieldName) { + return 2000; + } - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new WhitespaceTokenizer()); - } - }; + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new WhitespaceTokenizer()); + } + }; MonitorQuery mq = new MonitorQuery("query", parse(FIELD + ":\"hello world\"~5")); try (Monitor monitor = newMonitor(analyzer)) { @@ -368,23 +381,26 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertTrue(m3.getHits(FIELD).contains(new HighlightsMatch.Hit(0, 0, 1, 11))); assertTrue(m3.getHits(FIELD).contains(new HighlightsMatch.Hit(1002, 2011, 1004, 2030))); } - } public void testDisjunctionWithOrderedNearMatch() throws Exception { - final Query bq = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.SHOULD) - .add(SpanNearQuery.newOrderedNearQuery(FIELD) - .addClause(new SpanTermQuery(new Term(FIELD, "b"))) - .addClause(new SpanTermQuery(new Term(FIELD, "c"))) - .setSlop(1) - .build(), BooleanClause.Occur.SHOULD) - .build(); - final Query parent = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.MUST) - .add(bq, BooleanClause.Occur.MUST) - .build(); + final Query bq = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.SHOULD) + .add( + SpanNearQuery.newOrderedNearQuery(FIELD) + .addClause(new SpanTermQuery(new Term(FIELD, "b"))) + .addClause(new SpanTermQuery(new Term(FIELD, "c"))) + .setSlop(1) + .build(), + BooleanClause.Occur.SHOULD) + .build(); + final Query parent = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.MUST) + .add(bq, BooleanClause.Occur.MUST) + .build(); try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", parent)); @@ -400,44 +416,48 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertTrue(m.getHits(FIELD).contains(new HighlightsMatch.Hit(1, 2, 1, 3))); assertTrue(m.getHits(FIELD).contains(new HighlightsMatch.Hit(2, 4, 2, 5))); } - } public void testUnorderedNearWithinOrderedNear() throws Exception { - final SpanQuery spanPhrase = SpanNearQuery.newOrderedNearQuery(FIELD) - .addClause(new SpanTermQuery(new Term(FIELD, "time"))) - .addClause(new SpanTermQuery(new Term(FIELD, "men"))) - .setSlop(1) - .build(); + final SpanQuery spanPhrase = + SpanNearQuery.newOrderedNearQuery(FIELD) + .addClause(new SpanTermQuery(new Term(FIELD, "time"))) + .addClause(new SpanTermQuery(new Term(FIELD, "men"))) + .setSlop(1) + .build(); - final SpanQuery unorderedNear = SpanNearQuery.newUnorderedNearQuery(FIELD) - .addClause(spanPhrase) - .addClause(new SpanTermQuery(new Term(FIELD, "all"))) - .setSlop(5) - .build(); + final SpanQuery unorderedNear = + SpanNearQuery.newUnorderedNearQuery(FIELD) + .addClause(spanPhrase) + .addClause(new SpanTermQuery(new Term(FIELD, "all"))) + .setSlop(5) + .build(); - final SpanQuery orderedNear = SpanNearQuery.newOrderedNearQuery(FIELD) - .addClause(new SpanTermQuery(new Term(FIELD, "the"))) - .addClause(unorderedNear) - .setSlop(10) - .build(); + final SpanQuery orderedNear = + SpanNearQuery.newOrderedNearQuery(FIELD) + .addClause(new SpanTermQuery(new Term(FIELD, "the"))) + .addClause(unorderedNear) + .setSlop(10) + .build(); - final Query innerConjunct = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "is")), BooleanClause.Occur.MUST) - .add(orderedNear, BooleanClause.Occur.MUST) - .build(); + final Query innerConjunct = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "is")), BooleanClause.Occur.MUST) + .add(orderedNear, BooleanClause.Occur.MUST) + .build(); - final Query disjunct = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "now")), BooleanClause.Occur.SHOULD) - .add(innerConjunct, BooleanClause.Occur.SHOULD) - .build(); - - final Query outerConjunct = new BooleanQuery.Builder() - .add(disjunct, BooleanClause.Occur.MUST) - .add(new TermQuery(new Term(FIELD, "good")), BooleanClause.Occur.MUST) - .build(); + final Query disjunct = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "now")), BooleanClause.Occur.SHOULD) + .add(innerConjunct, BooleanClause.Occur.SHOULD) + .build(); + final Query outerConjunct = + new BooleanQuery.Builder() + .add(disjunct, BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(FIELD, "good")), BooleanClause.Occur.MUST) + .build(); try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", outerConjunct)); @@ -449,23 +469,24 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertTrue(m.getHits(FIELD).contains(new HighlightsMatch.Hit(0, 0, 0, 3))); assertTrue(m.getHits(FIELD).contains(new HighlightsMatch.Hit(6, 24, 6, 28))); } - } public void testMinShouldMatchQuery() throws Exception { - final Query minq = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "x")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term(FIELD, "y")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term(FIELD, "z")), BooleanClause.Occur.SHOULD) - .setMinimumNumberShouldMatch(2) - .build(); + final Query minq = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "x")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(FIELD, "y")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(FIELD, "z")), BooleanClause.Occur.SHOULD) + .setMinimumNumberShouldMatch(2) + .build(); - final Query bq = new BooleanQuery.Builder() - .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.MUST) - .add(new TermQuery(new Term(FIELD, "b")), BooleanClause.Occur.MUST) - .add(minq, BooleanClause.Occur.SHOULD) - .build(); + final Query bq = + new BooleanQuery.Builder() + .add(new TermQuery(new Term(FIELD, "a")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(FIELD, "b")), BooleanClause.Occur.MUST) + .add(minq, BooleanClause.Occur.SHOULD) + .build(); try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", bq)); @@ -478,7 +499,6 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertTrue(m.getHits(FIELD).contains(new HighlightsMatch.Hit(0, 0, 0, 1))); assertTrue(m.getHits(FIELD).contains(new HighlightsMatch.Hit(1, 2, 1, 3))); } - } public void testComplexPhraseQueryParser() throws Exception { @@ -494,7 +514,6 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertEquals(2, m.getHitCount()); assertTrue(m.getFields().contains(FIELD)); } - } public void testHighlightBatches() throws Exception { @@ -513,7 +532,8 @@ public class TestHighlightingMatcher extends MonitorTestBase { Document doc3 = new Document(); doc3.add(newTextField(FIELD, "biology text", Field.Store.NO)); - MultiMatchingQueries matches = monitor.match(new Document[]{doc1, doc2, doc3}, HighlightsMatch.MATCHER); + MultiMatchingQueries matches = + monitor.match(new Document[] {doc1, doc2, doc3}, HighlightsMatch.MATCHER); assertEquals(2, matches.getMatchCount(0)); assertEquals(0, matches.getMatchCount(1)); assertEquals(1, matches.getMatchCount(2)); @@ -523,5 +543,4 @@ public class TestHighlightingMatcher extends MonitorTestBase { assertTrue(m2.getHits(FIELD).contains(new HighlightsMatch.Hit(0, 0, 0, 7))); } } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMatchAllPresearcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMatchAllPresearcher.java index cf1bf41b354..ab0336e2f8d 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMatchAllPresearcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMatchAllPresearcher.java @@ -23,5 +23,4 @@ public class TestMatchAllPresearcher extends PresearcherTestBase { protected Presearcher createPresearcher() { return Presearcher.NO_FILTERING; } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitor.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitor.java index ca4da71d7e3..aa5f3452764 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitor.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitor.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; @@ -59,7 +58,8 @@ public class TestMonitor extends MonitorTestBase { doc.add(newTextField(FIELD, "This is a test document", Field.Store.NO)); try (Monitor monitor = newMonitor()) { - monitor.register(new MonitorQuery("query1", new TermQuery(new Term(MonitorTestBase.FIELD, "test")))); + monitor.register( + new MonitorQuery("query1", new TermQuery(new Term(MonitorTestBase.FIELD, "test")))); MatchingQueries matches = monitor.match(doc, QueryMatch.SIMPLE_MATCHER); assertEquals(1, matches.getQueriesRun()); @@ -74,8 +74,10 @@ public class TestMonitor extends MonitorTestBase { doc.add(newTextField(FIELD, "that", Field.Store.NO)); try (Monitor monitor = newMonitor()) { - monitor.register(new MonitorQuery("query1", new TermQuery(new Term(MonitorTestBase.FIELD, "this")))); - monitor.register(new MonitorQuery("query1", new TermQuery(new Term(MonitorTestBase.FIELD, "that")))); + monitor.register( + new MonitorQuery("query1", new TermQuery(new Term(MonitorTestBase.FIELD, "this")))); + monitor.register( + new MonitorQuery("query1", new TermQuery(new Term(MonitorTestBase.FIELD, "that")))); MatchingQueries matches = monitor.match(doc, QueryMatch.SIMPLE_MATCHER); assertNotNull(matches.matches("query1")); @@ -89,7 +91,8 @@ public class TestMonitor extends MonitorTestBase { doc.add(newTextField(FIELD, "other things", Field.Store.NO)); try (Monitor monitor = newMonitor()) { - monitor.register(new MonitorQuery("query1", new TermQuery(new Term(MonitorTestBase.FIELD, "this")))); + monitor.register( + new MonitorQuery("query1", new TermQuery(new Term(MonitorTestBase.FIELD, "this")))); monitor.register( new MonitorQuery("query2", new TermQuery(new Term(MonitorTestBase.FIELD, "that"))), new MonitorQuery("query3", new TermQuery(new Term(MonitorTestBase.FIELD, "other")))); @@ -102,7 +105,6 @@ public class TestMonitor extends MonitorTestBase { assertEquals(1, matches.getQueriesRun()); assertNotNull(matches.matches("query3")); } - } public void testCanClearTheMonitor() throws IOException { @@ -127,7 +129,6 @@ public class TestMonitor extends MonitorTestBase { MatchingQueries matches = monitor.match(doc, QueryMatch.SIMPLE_MATCHER); assertEquals(0, matches.getQueriesRun()); } - } // takes huge amounts of ram. TODO: what is this test doing? @@ -139,19 +140,20 @@ public class TestMonitor extends MonitorTestBase { queries.add(new MonitorQuery(Integer.toString(i), MonitorTestBase.parse("test"))); } - final int[] expectedSizes = new int[]{5001, 5001, 353}; + final int[] expectedSizes = new int[] {5001, 5001, 353}; final AtomicInteger callCount = new AtomicInteger(); final AtomicInteger updateCount = new AtomicInteger(); - MonitorUpdateListener listener = new MonitorUpdateListener() { + MonitorUpdateListener listener = + new MonitorUpdateListener() { - @Override - public void afterUpdate(List updates) { - int calls = callCount.getAndIncrement(); - updateCount.addAndGet(updates.size()); - assertEquals(expectedSizes[calls], updates.size()); - } - }; + @Override + public void afterUpdate(List updates) { + int calls = callCount.getAndIncrement(); + updateCount.addAndGet(updates.size()); + assertEquals(expectedSizes[calls], updates.size()); + } + }; try (Monitor monitor = new Monitor(ANALYZER)) { monitor.addQueryIndexUpdateListener(listener); @@ -165,22 +167,27 @@ public class TestMonitor extends MonitorTestBase { HashMap metadataMap = new HashMap<>(); metadataMap.put("key", "value"); - monitor.register(new MonitorQuery(Integer.toString(1), MonitorTestBase.parse("+test " + 1), null, metadataMap)); + monitor.register( + new MonitorQuery( + Integer.toString(1), MonitorTestBase.parse("+test " + 1), null, metadataMap)); Document doc = new Document(); doc.add(newTextField(FIELD, "This is a test document", Field.Store.NO)); - MatcherFactory testMatcherFactory = docs -> new CandidateMatcher(docs) { - @Override - protected void matchQuery(String queryId, Query matchQuery, Map metadata) { - assertEquals("value", metadata.get("key")); - } + MatcherFactory testMatcherFactory = + docs -> + new CandidateMatcher(docs) { + @Override + protected void matchQuery( + String queryId, Query matchQuery, Map metadata) { + assertEquals("value", metadata.get("key")); + } - @Override - public QueryMatch resolve(QueryMatch match1, QueryMatch match2) { - return null; - } - }; + @Override + public QueryMatch resolve(QueryMatch match1, QueryMatch match2) { + return null; + } + }; monitor.match(doc, testMatcherFactory); } @@ -193,35 +200,39 @@ public class TestMonitor extends MonitorTestBase { Document doc2 = new Document(); doc2.add(newTextField(FIELD, "This is a kangaroo document", Field.Store.NO)); - try (Monitor monitor = new Monitor(ANALYZER)) { - monitor.register(new MonitorQuery("1", new TermQuery(new Term(MonitorTestBase.FIELD, "kangaroo")))); + monitor.register( + new MonitorQuery("1", new TermQuery(new Term(MonitorTestBase.FIELD, "kangaroo")))); - MultiMatchingQueries response = monitor.match(new Document[]{ doc1, doc2 }, QueryMatch.SIMPLE_MATCHER); + MultiMatchingQueries response = + monitor.match(new Document[] {doc1, doc2}, QueryMatch.SIMPLE_MATCHER); assertEquals(2, response.getBatchSize()); } } public void testMutliValuedFieldWithNonDefaultGaps() throws IOException { - Analyzer analyzer = new Analyzer() { - @Override - public int getPositionIncrementGap(String fieldName) { - return 1000; - } + Analyzer analyzer = + new Analyzer() { + @Override + public int getPositionIncrementGap(String fieldName) { + return 1000; + } - @Override - public int getOffsetGap(String fieldName) { - return 2000; - } + @Override + public int getOffsetGap(String fieldName) { + return 2000; + } - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new WhitespaceTokenizer()); - } - }; + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new WhitespaceTokenizer()); + } + }; - MonitorQuery mq = new MonitorQuery("query", MonitorTestBase.parse(MonitorTestBase.FIELD + ":\"hello world\"~5")); + MonitorQuery mq = + new MonitorQuery( + "query", MonitorTestBase.parse(MonitorTestBase.FIELD + ":\"hello world\"~5")); try (Monitor monitor = new Monitor(analyzer)) { monitor.register(mq); @@ -241,7 +252,5 @@ public class TestMonitor extends MonitorTestBase { matches = monitor.match(doc2, QueryMatch.SIMPLE_MATCHER); assertEquals(0, matches.getMatchCount()); } - } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitorErrorHandling.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitorErrorHandling.java index 5e2863c7c89..aabf1cfa3d5 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitorErrorHandling.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitorErrorHandling.java @@ -19,7 +19,6 @@ package org.apache.lucene.monitor; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.search.MatchAllDocsQuery; @@ -46,12 +45,14 @@ public class TestMonitorErrorHandling extends MonitorTestBase { } public void testMonitorQueryNullValues() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - Map metadata2 = new HashMap<>(); - metadata2.put("key", null); - new MonitorQuery("id", new MatchAllDocsQuery(), null, metadata2); - }); + IllegalArgumentException e = + expectThrows( + IllegalArgumentException.class, + () -> { + Map metadata2 = new HashMap<>(); + metadata2.put("key", null); + new MonitorQuery("id", new MatchAllDocsQuery(), null, metadata2); + }); assertEquals("Null value for key key in metadata map", e.getMessage()); } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitorPersistence.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitorPersistence.java index 821ab508255..945abcd3293 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitorPersistence.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMonitorPersistence.java @@ -20,7 +20,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.nio.file.Path; import java.util.Collections; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.search.MatchAllDocsQuery; @@ -33,8 +32,10 @@ public class TestMonitorPersistence extends MonitorTestBase { Document doc = new Document(); doc.add(newTextField(FIELD, "test", Field.Store.NO)); - MonitorConfiguration config = new MonitorConfiguration() - .setIndexPath(indexDirectory, MonitorQuerySerializer.fromParser(MonitorTestBase::parse)); + MonitorConfiguration config = + new MonitorConfiguration() + .setIndexPath( + indexDirectory, MonitorQuerySerializer.fromParser(MonitorTestBase::parse)); try (Monitor monitor = new Monitor(ANALYZER, config)) { monitor.register( @@ -45,9 +46,16 @@ public class TestMonitorPersistence extends MonitorTestBase { assertEquals(4, monitor.match(doc, QueryMatch.SIMPLE_MATCHER).getMatchCount()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> monitor.register(new MonitorQuery("5", new MatchAllDocsQuery(), null, Collections.emptyMap()))); - assertEquals("Cannot add a MonitorQuery with a null string representation to a non-ephemeral Monitor", e.getMessage()); + IllegalArgumentException e = + expectThrows( + IllegalArgumentException.class, + () -> + monitor.register( + new MonitorQuery( + "5", new MatchAllDocsQuery(), null, Collections.emptyMap()))); + assertEquals( + "Cannot add a MonitorQuery with a null string representation to a non-ephemeral Monitor", + e.getMessage()); } try (Monitor monitor2 = new Monitor(ANALYZER, config)) { @@ -57,16 +65,15 @@ public class TestMonitorPersistence extends MonitorTestBase { MonitorQuery mq = monitor2.getQuery("4"); assertEquals("quack", mq.getMetadata().get("wibble")); } - } public void testEphemeralMonitorDoesNotStoreQueries() throws IOException { try (Monitor monitor2 = new Monitor(ANALYZER)) { - IllegalStateException e = expectThrows(IllegalStateException.class, () -> monitor2.getQuery("query")); - assertEquals("Cannot get queries from an index with no MonitorQuerySerializer", e.getMessage()); + IllegalStateException e = + expectThrows(IllegalStateException.class, () -> monitor2.getQuery("query")); + assertEquals( + "Cannot get queries from an index with no MonitorQuerySerializer", e.getMessage()); } - } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMultipassPresearcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMultipassPresearcher.java index 18afe73d306..54ac305d3e5 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestMultipassPresearcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestMultipassPresearcher.java @@ -18,7 +18,6 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -50,8 +49,8 @@ public class TestMultipassPresearcher extends PresearcherTestBase { new MonitorQuery("3", parse("field:\"hello there world\"")), new MonitorQuery("4", parse("field:\"this and that\""))); - MatchingQueries matches = monitor.match(buildDoc("field", "hello world and goodbye"), - QueryMatch.SIMPLE_MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("field", "hello world and goodbye"), QueryMatch.SIMPLE_MATCHER); assertEquals(2, matches.getQueriesRun()); assertNotNull(matches.matches("1")); } @@ -62,8 +61,8 @@ public class TestMultipassPresearcher extends PresearcherTestBase { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", parse("field:(+foo +bar +(badger cormorant))"))); - MatchingQueries matches - = monitor.match(buildDoc("field", "a badger walked into a bar"), QueryMatch.SIMPLE_MATCHER); + MatchingQueries matches = + monitor.match(buildDoc("field", "a badger walked into a bar"), QueryMatch.SIMPLE_MATCHER); assertEquals(0, matches.getMatchCount()); assertEquals(0, matches.getQueriesRun()); @@ -74,7 +73,6 @@ public class TestMultipassPresearcher extends PresearcherTestBase { matches = monitor.match(buildDoc("field", "bar badger foo"), QueryMatch.SIMPLE_MATCHER); assertEquals(1, matches.getMatchCount()); } - } public void testQueryBuilder() throws IOException { @@ -84,12 +82,13 @@ public class TestMultipassPresearcher extends PresearcherTestBase { Directory dir = new ByteBuffersDirectory(); IndexWriter writer = new IndexWriter(dir, iwc); - MonitorConfiguration config = new MonitorConfiguration(){ - @Override - public IndexWriter buildIndexWriter() { - return writer; - } - }; + MonitorConfiguration config = + new MonitorConfiguration() { + @Override + public IndexWriter buildIndexWriter() { + return writer; + } + }; try (Monitor monitor = new Monitor(ANALYZER, presearcher, config)) { monitor.register(new MonitorQuery("1", parse("f:test"))); @@ -103,21 +102,41 @@ public class TestMultipassPresearcher extends PresearcherTestBase { QueryIndex.QueryTermFilter termFilter = new QueryIndex.QueryTermFilter(reader); BooleanQuery q = (BooleanQuery) presearcher.buildQuery(docsReader, termFilter); - BooleanQuery expected = new BooleanQuery.Builder() - .add(should(new BooleanQuery.Builder() - .add(must(new BooleanQuery.Builder().add(should(new TermInSetQuery("f_0", new BytesRef("test")))).build())) - .add(must(new BooleanQuery.Builder().add(should(new TermInSetQuery("f_1", new BytesRef("test")))).build())) - .add(must(new BooleanQuery.Builder().add(should(new TermInSetQuery("f_2", new BytesRef("test")))).build())) - .add(must(new BooleanQuery.Builder().add(should(new TermInSetQuery("f_3", new BytesRef("test")))).build())) - .build())) - .add(should(new TermQuery(new Term("__anytokenfield", "__ANYTOKEN__")))) - .build(); + BooleanQuery expected = + new BooleanQuery.Builder() + .add( + should( + new BooleanQuery.Builder() + .add( + must( + new BooleanQuery.Builder() + .add( + should(new TermInSetQuery("f_0", new BytesRef("test")))) + .build())) + .add( + must( + new BooleanQuery.Builder() + .add( + should(new TermInSetQuery("f_1", new BytesRef("test")))) + .build())) + .add( + must( + new BooleanQuery.Builder() + .add( + should(new TermInSetQuery("f_2", new BytesRef("test")))) + .build())) + .add( + must( + new BooleanQuery.Builder() + .add( + should(new TermInSetQuery("f_3", new BytesRef("test")))) + .build())) + .build())) + .add(should(new TermQuery(new Term("__anytokenfield", "__ANYTOKEN__")))) + .build(); assertEquals(expected, q); } - } - } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestParallelMatcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestParallelMatcher.java index 2ede9421593..2f1e187b1b4 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestParallelMatcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestParallelMatcher.java @@ -22,7 +22,8 @@ import java.util.concurrent.ExecutorService; public class TestParallelMatcher extends ConcurrentMatcherTestBase { @Override - protected MatcherFactory matcherFactory(ExecutorService executor, MatcherFactory factory, int threads) { + protected MatcherFactory matcherFactory( + ExecutorService executor, MatcherFactory factory, int threads) { return ParallelMatcher.factory(executor, factory, threads); } } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestPartitionMatcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestPartitionMatcher.java index cc953daa9f0..2354043bb94 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestPartitionMatcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestPartitionMatcher.java @@ -25,7 +25,8 @@ import java.util.concurrent.ExecutorService; public class TestPartitionMatcher extends ConcurrentMatcherTestBase { @Override - protected MatcherFactory matcherFactory(ExecutorService executor, MatcherFactory factory, int threads) { + protected MatcherFactory matcherFactory( + ExecutorService executor, MatcherFactory factory, int threads) { return PartitionMatcher.factory(executor, factory, threads); } @@ -55,6 +56,5 @@ public class TestPartitionMatcher extends ConcurrentMatcherTestBase { assertTrue(partitions.contains(Collections.singletonList("6"))); assertTrue(partitions.contains(Arrays.asList("7", "8"))); assertTrue(partitions.contains(Arrays.asList("9", "10"))); - } } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestPresearcherMatchCollector.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestPresearcherMatchCollector.java index f5365c176c6..47d24d85220 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestPresearcherMatchCollector.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestPresearcherMatchCollector.java @@ -17,13 +17,12 @@ package org.apache.lucene.monitor; -import java.io.IOException; +import static org.hamcrest.CoreMatchers.containsString; +import java.io.IOException; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import static org.hamcrest.CoreMatchers.containsString; - public class TestPresearcherMatchCollector extends MonitorTestBase { public void testMatchCollectorShowMatches() throws IOException { @@ -57,5 +56,4 @@ public class TestPresearcherMatchCollector extends MonitorTestBase { assertNull(matches.match("4", 0)); } } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryAnalyzer.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryAnalyzer.java index f74361abff4..e15533f4c34 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryAnalyzer.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryAnalyzer.java @@ -21,7 +21,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; @@ -56,7 +55,6 @@ public class TestQueryAnalyzer extends LuceneTestCase { assertFalse(querytree.advancePhase(0)); assertEquals(expected, collectTerms(querytree)); - } public void testDisjunctionsWithAnyClausesOnlyReturnANYTOKEN() { @@ -67,7 +65,6 @@ public class TestQueryAnalyzer extends LuceneTestCase { Set terms = collectTerms(q); assertEquals(1, terms.size()); assertEquals(TermFilteredPresearcher.ANYTOKEN_FIELD, terms.iterator().next().field()); - } public void testConjunctionsDoNotAdvanceOverANYTOKENs() { @@ -84,14 +81,14 @@ public class TestQueryAnalyzer extends LuceneTestCase { assertFalse(tree.advancePhase(0)); assertEquals(expected, collectTerms(tree)); - } public void testConjunctionsCannotAdvanceOverMinWeightedTokens() { - TermWeightor weightor = TermWeightor.combine( - TermWeightor.termWeightor(0.1, new BytesRef("startterm")), - TermWeightor.lengthWeightor(1, 1)); + TermWeightor weightor = + TermWeightor.combine( + TermWeightor.termWeightor(0.1, new BytesRef("startterm")), + TermWeightor.lengthWeightor(1, 1)); QueryAnalyzer analyzer = new QueryAnalyzer(); @@ -106,7 +103,6 @@ public class TestQueryAnalyzer extends LuceneTestCase { assertEquals(expected, collectTerms(tree)); assertFalse(tree.advancePhase(0.5)); - } public void testNestedConjunctions() { @@ -129,7 +125,6 @@ public class TestQueryAnalyzer extends LuceneTestCase { expected = Collections.singleton(new Term("field", "d")); assertEquals(expected, collectTerms(tree)); assertFalse(tree.advancePhase(0)); - } public void testNestedDisjunctions() { @@ -137,58 +132,45 @@ public class TestQueryAnalyzer extends LuceneTestCase { Query q = MonitorTestBase.parse("+(+((+aaaa +cc) (+dd +bbb +f)))"); QueryTree tree = analyzer.buildTree(q, TermWeightor.DEFAULT); - Set expected = new HashSet<>(Arrays.asList( - new Term("field", "aaaa"), - new Term("field", "bbb" - ))); + Set expected = + new HashSet<>(Arrays.asList(new Term("field", "aaaa"), new Term("field", "bbb"))); assertEquals(expected, collectTerms(tree)); assertTrue(tree.advancePhase(0)); - expected = new HashSet<>(Arrays.asList( - new Term("field", "cc"), - new Term("field", "dd") - )); + expected = new HashSet<>(Arrays.asList(new Term("field", "cc"), new Term("field", "dd"))); assertEquals(expected, collectTerms(tree)); assertTrue(tree.advancePhase(0)); - expected = new HashSet<>(Arrays.asList( - new Term("field", "cc"), - new Term("field", "f") - )); + expected = new HashSet<>(Arrays.asList(new Term("field", "cc"), new Term("field", "f"))); assertEquals(expected, collectTerms(tree)); assertFalse(tree.advancePhase(0)); } public void testMinWeightAdvances() { - QueryTree tree = QueryTree.disjunction( - QueryTree.conjunction( - QueryTree.term(new Term("field", "term1"), 1), - QueryTree.term(new Term("field", "term2"), 0.1), - QueryTree.anyTerm("*:*") - ), - QueryTree.conjunction( - QueryTree.disjunction( - QueryTree.term(new Term("field", "term4"), 0.2), - QueryTree.term(new Term("field", "term5"), 1) - ), - QueryTree.term(new Term("field", "term3"), 0.5) - ) - ); + QueryTree tree = + QueryTree.disjunction( + QueryTree.conjunction( + QueryTree.term(new Term("field", "term1"), 1), + QueryTree.term(new Term("field", "term2"), 0.1), + QueryTree.anyTerm("*:*")), + QueryTree.conjunction( + QueryTree.disjunction( + QueryTree.term(new Term("field", "term4"), 0.2), + QueryTree.term(new Term("field", "term5"), 1)), + QueryTree.term(new Term("field", "term3"), 0.5))); - Set expected = new HashSet<>(Arrays.asList( - new Term("field", "term1"), - new Term("field", "term3") - )); + Set expected = + new HashSet<>(Arrays.asList(new Term("field", "term1"), new Term("field", "term3"))); assertEquals(expected, collectTerms(tree)); assertTrue(tree.advancePhase(0.1f)); - expected = new HashSet<>(Arrays.asList( - new Term("field", "term1"), - new Term("field", "term4"), - new Term("field", "term5") - )); + expected = + new HashSet<>( + Arrays.asList( + new Term("field", "term1"), + new Term("field", "term4"), + new Term("field", "term5"))); assertEquals(expected, collectTerms(tree)); assertFalse(tree.advancePhase(0.1f)); } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryDecomposer.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryDecomposer.java index 1de4d86ac2a..0086bbb978e 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryDecomposer.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryDecomposer.java @@ -21,7 +21,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -29,7 +28,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -public class TestQueryDecomposer extends MonitorTestBase { +public class TestQueryDecomposer extends MonitorTestBase { private static final QueryDecomposer decomposer = new QueryDecomposer(); @@ -47,18 +46,22 @@ public class TestQueryDecomposer extends MonitorTestBase { public void testNestedDisjunctions() { Query q = parse("(hello goodbye) world"); - Set expected = new HashSet<>(Arrays.asList(parse("hello"), parse("goodbye"), parse("world"))); + Set expected = + new HashSet<>(Arrays.asList(parse("hello"), parse("goodbye"), parse("world"))); assertEquals(expected, decomposer.decompose(q)); } public void testExclusions() { - Set expected = new HashSet<>(Arrays.asList(parse("+hello -goodbye"), parse("+world -goodbye"))); + Set expected = + new HashSet<>(Arrays.asList(parse("+hello -goodbye"), parse("+world -goodbye"))); assertEquals(expected, decomposer.decompose(parse("hello world -goodbye"))); } public void testNestedExclusions() { - Set expected - = new HashSet<>(Arrays.asList(parse("+(+hello -goodbye) -greeting"), parse("+(+world -goodbye) -greeting"))); + Set expected = + new HashSet<>( + Arrays.asList( + parse("+(+hello -goodbye) -greeting"), parse("+(+world -goodbye) -greeting"))); assertEquals(expected, decomposer.decompose(parse("((hello world) -goodbye) -greeting"))); } @@ -68,42 +71,45 @@ public class TestQueryDecomposer extends MonitorTestBase { } public void testSingleValuedConjunctWithExclusions() { - Set expected = new HashSet<>(Arrays.asList(parse("+hello -goodbye"), parse("+world -goodbye"))); + Set expected = + new HashSet<>(Arrays.asList(parse("+hello -goodbye"), parse("+world -goodbye"))); assertEquals(expected, decomposer.decompose(parse("+(hello world) -goodbye"))); } public void testBoostsArePreserved() { Set expected = new HashSet<>(Arrays.asList(parse("hello^0.7"), parse("world^0.7"))); assertEquals(expected, decomposer.decompose(parse("+(hello world)^0.7"))); - expected = new HashSet<>(Arrays.asList(parse("+hello^0.7 -goodbye"), parse("+world^0.7 -goodbye"))); + expected = + new HashSet<>(Arrays.asList(parse("+hello^0.7 -goodbye"), parse("+world^0.7 -goodbye"))); assertEquals(expected, decomposer.decompose(parse("+(hello world)^0.7 -goodbye"))); expected = new HashSet<>(Arrays.asList(parse("(hello^0.5)^0.8"), parse("world^0.8"))); assertEquals(expected, decomposer.decompose(parse("+(hello^0.5 world)^0.8"))); } public void testDisjunctionMaxDecomposition() { - Query q = new DisjunctionMaxQuery( - Arrays.asList(new TermQuery(new Term("f", "t1")), new TermQuery(new Term("f", "t2"))), 0.1f - ); + Query q = + new DisjunctionMaxQuery( + Arrays.asList(new TermQuery(new Term("f", "t1")), new TermQuery(new Term("f", "t2"))), + 0.1f); Set expected = new HashSet<>(Arrays.asList(parse("f:t1"), parse("f:t2"))); assertEquals(expected, decomposer.decompose(q)); } public void testNestedDisjunctionMaxDecomposition() { - Query q = new DisjunctionMaxQuery( - Arrays.asList(parse("hello goodbye"), parse("world")), 0.1f - ); - Set expected = new HashSet<>(Arrays.asList(parse("hello"), parse("goodbye"), parse("world"))); + Query q = new DisjunctionMaxQuery(Arrays.asList(parse("hello goodbye"), parse("world")), 0.1f); + Set expected = + new HashSet<>(Arrays.asList(parse("hello"), parse("goodbye"), parse("world"))); assertEquals(expected, decomposer.decompose(q)); } public void testFilterAndShouldClause() { final Query shouldTermQuery = new TermQuery(new Term("f", "should")); final Query filterTermQuery = new TermQuery(new Term("f", "filter")); - Query q = new BooleanQuery.Builder() - .add(shouldTermQuery, BooleanClause.Occur.SHOULD) - .add(filterTermQuery, BooleanClause.Occur.FILTER) - .build(); + Query q = + new BooleanQuery.Builder() + .add(shouldTermQuery, BooleanClause.Occur.SHOULD) + .add(filterTermQuery, BooleanClause.Occur.FILTER) + .build(); assertEquals(Collections.singleton(q), decomposer.decompose(q)); } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryTermComparators.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryTermComparators.java index c2d0c5e9ac8..d411e60d516 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryTermComparators.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryTermComparators.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; - import org.apache.lucene.index.Term; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; @@ -58,15 +57,12 @@ public class TestQueryTermComparators extends LuceneTestCase { Term term = new Term("f", "foobar"); QueryTree node1 = QueryTree.term(term, 1); - QueryTree node2 = QueryTree.disjunction( - QueryTree.term(term, 1), - QueryTree.term(term, 1)); + QueryTree node2 = QueryTree.disjunction(QueryTree.term(term, 1), QueryTree.term(term, 1)); QueryTree conjunction = QueryTree.conjunction(node1, node2); Set terms = new HashSet<>(); conjunction.collectTerms((f, b) -> terms.add(new Term(f, b))); assertEquals(1, terms.size()); - } public void testFieldWeights() { @@ -87,17 +83,14 @@ public class TestQueryTermComparators extends LuceneTestCase { termfreqs.put("s", 47088); TermWeightor weight = TermWeightor.termFreqWeightor(termfreqs, 100, 0.8); - assertTrue(weight.applyAsDouble(new Term("f", "france")) > - weight.applyAsDouble(new Term("f", "s"))); - + assertTrue( + weight.applyAsDouble(new Term("f", "france")) > weight.applyAsDouble(new Term("f", "s"))); } public void testFieldSpecificTermWeightNorms() { - TermWeightor weight = TermWeightor.termAndFieldWeightor(0.1, - new Term("field1", "f"), - new Term("field1", "g")); + TermWeightor weight = + TermWeightor.termAndFieldWeightor(0.1, new Term("field1", "f"), new Term("field1", "g")); assertEquals(0.1, weight.applyAsDouble(new Term("field1", "f")), 0); assertEquals(1, weight.applyAsDouble(new Term("field2", "f")), 0); } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryTermFilter.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryTermFilter.java index 9a5b96628e7..b9ef232489e 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryTermFilter.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestQueryTermFilter.java @@ -20,7 +20,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.Collections; import java.util.function.BiPredicate; - import org.apache.lucene.index.Term; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -32,14 +31,18 @@ public class TestQueryTermFilter extends LuceneTestCase { public void testFiltersAreRemoved() throws IOException { - try (QueryIndex qi = new QueryIndex(new MonitorConfiguration(), new TermFilteredPresearcher())) { - qi.commit(Collections.singletonList(new MonitorQuery("1", new TermQuery(new Term(FIELD, "term"))))); + try (QueryIndex qi = + new QueryIndex(new MonitorConfiguration(), new TermFilteredPresearcher())) { + qi.commit( + Collections.singletonList(new MonitorQuery("1", new TermQuery(new Term(FIELD, "term"))))); assertEquals(1, qi.termFilters.size()); BiPredicate filter = qi.termFilters.values().iterator().next(); assertTrue(filter.test(FIELD, new BytesRef("term"))); assertFalse(filter.test(FIELD, new BytesRef("term2"))); - qi.commit(Collections.singletonList(new MonitorQuery("2", new TermQuery(new Term(FIELD, "term2"))))); + qi.commit( + Collections.singletonList( + new MonitorQuery("2", new TermQuery(new Term(FIELD, "term2"))))); assertEquals(1, qi.termFilters.size()); filter = qi.termFilters.values().iterator().next(); @@ -48,5 +51,4 @@ public class TestQueryTermFilter extends LuceneTestCase { assertFalse(filter.test(FIELD, new BytesRef("term3"))); } } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestRegexpQueryHandler.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestRegexpQueryHandler.java index 8836b88c86b..a30cde1b66a 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestRegexpQueryHandler.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestRegexpQueryHandler.java @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; @@ -34,28 +33,52 @@ public class TestRegexpQueryHandler extends BaseTokenStreamTestCase { public void testTermStreamWrapping() throws IOException { - CustomQueryHandler handler - = new RegexpQueryHandler("FOO", 10, "__wibble__", Collections.singleton("field1")); + CustomQueryHandler handler = + new RegexpQueryHandler("FOO", 10, "__wibble__", Collections.singleton("field1")); try (Analyzer input = new WhitespaceAnalyzer()) { // field1 is in the excluded set, so nothing should happen - assertTokenStreamContents(handler.wrapTermStream("field1", input.tokenStream("field1", "hello world")), - new String[]{ "hello", "world" }); + assertTokenStreamContents( + handler.wrapTermStream("field1", input.tokenStream("field1", "hello world")), + new String[] {"hello", "world"}); // field2 is not excluded - assertTokenStreamContents(handler.wrapTermStream("field2", input.tokenStream("field2", "harm alarm asdasasdasdasd")), - new String[]{ - "harm", "harmFOO", "harFOO", "haFOO", "hFOO", "armFOO", "arFOO", "aFOO", "rmFOO", "rFOO", "mFOO", "FOO", - "alarm", "alarmFOO", "alarFOO", "alaFOO", "alFOO", "larmFOO", "larFOO", "laFOO", "lFOO", - "asdasasdasdasd", "__wibble__" + assertTokenStreamContents( + handler.wrapTermStream( + "field2", input.tokenStream("field2", "harm alarm asdasasdasdasd")), + new String[] { + "harm", + "harmFOO", + "harFOO", + "haFOO", + "hFOO", + "armFOO", + "arFOO", + "aFOO", + "rmFOO", + "rFOO", + "mFOO", + "FOO", + "alarm", + "alarmFOO", + "alarFOO", + "alaFOO", + "alFOO", + "larmFOO", + "larFOO", + "laFOO", + "lFOO", + "asdasasdasdasd", + "__wibble__" }); } } private Set collectTerms(Query q) { - QueryAnalyzer builder = new QueryAnalyzer(Collections.singletonList( - new RegexpQueryHandler("XX", 30, "WILDCARD", null))); + QueryAnalyzer builder = + new QueryAnalyzer( + Collections.singletonList(new RegexpQueryHandler("XX", 30, "WILDCARD", null))); QueryTree tree = builder.buildTree(q, TermWeightor.DEFAULT); Set terms = new HashSet<>(); tree.collectTerms((f, b) -> terms.add(new Term(f, b))); @@ -64,21 +87,18 @@ public class TestRegexpQueryHandler extends BaseTokenStreamTestCase { public void testRegexpExtractor() { - Set expected = new HashSet<>(Arrays.asList( - new Term("field", "califragilisticXX"), - new Term("field", "WILDCARD"))); - assertEquals(expected, collectTerms(new RegexpQuery(new Term("field", "super.*califragilistic")))); + Set expected = + new HashSet<>( + Arrays.asList(new Term("field", "califragilisticXX"), new Term("field", "WILDCARD"))); + assertEquals( + expected, collectTerms(new RegexpQuery(new Term("field", "super.*califragilistic")))); - expected = new HashSet<>(Arrays.asList( - new Term("field", "hellXX"), - new Term("field", "WILDCARD"))); + expected = + new HashSet<>(Arrays.asList(new Term("field", "hellXX"), new Term("field", "WILDCARD"))); assertEquals(expected, collectTerms(new RegexpQuery(new Term("field", "hell.")))); - expected = new HashSet<>(Arrays.asList( - new Term("field", "heXX"), - new Term("field", "WILDCARD"))); + expected = + new HashSet<>(Arrays.asList(new Term("field", "heXX"), new Term("field", "WILDCARD"))); assertEquals(expected, collectTerms(new RegexpQuery(new Term("field", "hel?o")))); - } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestSimilarities.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestSimilarities.java index 64204150e0b..926053f9557 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestSimilarities.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestSimilarities.java @@ -29,18 +29,21 @@ public class TestSimilarities extends MonitorTestBase { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", MonitorTestBase.parse("test"))); - Similarity similarity = new ClassicSimilarity() { - @Override - public float tf(float freq) { - return 1000f; - } - }; + Similarity similarity = + new ClassicSimilarity() { + @Override + public float tf(float freq) { + return 1000f; + } + }; Document doc = new Document(); doc.add(newTextField("field", "this is a test", Field.Store.NO)); - MatchingQueries standard = monitor.match(doc, ScoringMatch.matchWithSimilarity(new ClassicSimilarity())); - MatchingQueries withSim = monitor.match(doc, ScoringMatch.matchWithSimilarity(similarity)); + MatchingQueries standard = + monitor.match(doc, ScoringMatch.matchWithSimilarity(new ClassicSimilarity())); + MatchingQueries withSim = + monitor.match(doc, ScoringMatch.matchWithSimilarity(similarity)); float standScore = standard.getMatches().iterator().next().getScore(); float simScore = withSim.getMatches().iterator().next().getScore(); diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestSimpleMatcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestSimpleMatcher.java index 5f37116fa84..0fbaa450e22 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestSimpleMatcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestSimpleMatcher.java @@ -18,7 +18,6 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -28,8 +27,7 @@ public class TestSimpleMatcher extends MonitorTestBase { try (Monitor monitor = newMonitor()) { monitor.register( - new MonitorQuery("1", parse("test")), - new MonitorQuery("2", parse("wibble"))); + new MonitorQuery("1", parse("test")), new MonitorQuery("2", parse("wibble"))); Document doc = new Document(); doc.add(newTextField(FIELD, "test", Field.Store.NO)); diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestSpanExtractors.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestSpanExtractors.java index 7f4e36bb665..362fe3b4f29 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestSpanExtractors.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestSpanExtractors.java @@ -21,7 +21,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; @@ -37,7 +36,7 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.SpanWithinQuery; import org.apache.lucene.util.LuceneTestCase; -public class TestSpanExtractors extends LuceneTestCase { +public class TestSpanExtractors extends LuceneTestCase { private static final QueryAnalyzer treeBuilder = new QueryAnalyzer(); @@ -49,32 +48,40 @@ public class TestSpanExtractors extends LuceneTestCase { } public void testOrderedNearExtractor() { - SpanNearQuery q = new SpanNearQuery(new SpanQuery[]{ - new SpanTermQuery(new Term("field1", "term1")), - new SpanTermQuery(new Term("field1", "term")) - }, 0, true); + SpanNearQuery q = + new SpanNearQuery( + new SpanQuery[] { + new SpanTermQuery(new Term("field1", "term1")), + new SpanTermQuery(new Term("field1", "term")) + }, + 0, + true); Set expected = Collections.singleton(new Term("field1", "term1")); assertEquals(expected, collectTerms(q)); } public void testOrderedNearWithWildcardExtractor() { - SpanNearQuery q = new SpanNearQuery(new SpanQuery[]{ - new SpanMultiTermQueryWrapper<>(new RegexpQuery(new Term("field", "super.*cali.*"))), - new SpanTermQuery(new Term("field", "is")) - }, 0, true); + SpanNearQuery q = + new SpanNearQuery( + new SpanQuery[] { + new SpanMultiTermQueryWrapper<>(new RegexpQuery(new Term("field", "super.*cali.*"))), + new SpanTermQuery(new Term("field", "is")) + }, + 0, + true); Set expected = Collections.singleton(new Term("field", "is")); assertEquals(expected, collectTerms(q)); } public void testSpanOrExtractor() { - SpanOrQuery or = new SpanOrQuery(new SpanTermQuery(new Term("field", "term1")), - new SpanTermQuery(new Term("field", "term2"))); - Set expected = new HashSet<>(Arrays.asList( - new Term("field", "term1"), - new Term("field", "term2") - )); + SpanOrQuery or = + new SpanOrQuery( + new SpanTermQuery(new Term("field", "term1")), + new SpanTermQuery(new Term("field", "term2"))); + Set expected = + new HashSet<>(Arrays.asList(new Term("field", "term1"), new Term("field", "term2"))); assertEquals(expected, collectTerms(or)); } @@ -89,12 +96,13 @@ public class TestSpanExtractors extends LuceneTestCase { Term t1 = new Term("field", "term1"); Term t2 = new Term("field", "term22"); Term t3 = new Term("field", "term333"); - SpanWithinQuery swq = new SpanWithinQuery( - SpanNearQuery.newOrderedNearQuery("field") - .addClause(new SpanTermQuery(t1)) - .addClause(new SpanTermQuery(t2)) - .build(), - new SpanTermQuery(t3)); + SpanWithinQuery swq = + new SpanWithinQuery( + SpanNearQuery.newOrderedNearQuery("field") + .addClause(new SpanTermQuery(t1)) + .addClause(new SpanTermQuery(t2)) + .build(), + new SpanTermQuery(t3)); assertEquals(Collections.singleton(t3), collectTerms(swq)); } @@ -103,12 +111,13 @@ public class TestSpanExtractors extends LuceneTestCase { Term t1 = new Term("field", "term1"); Term t2 = new Term("field", "term22"); Term t3 = new Term("field", "term333"); - SpanContainingQuery swq = new SpanContainingQuery( - SpanNearQuery.newOrderedNearQuery("field") - .addClause(new SpanTermQuery(t1)) - .addClause(new SpanTermQuery(t2)) - .build(), - new SpanTermQuery(t3)); + SpanContainingQuery swq = + new SpanContainingQuery( + SpanNearQuery.newOrderedNearQuery("field") + .addClause(new SpanTermQuery(t1)) + .addClause(new SpanTermQuery(t2)) + .build(), + new SpanTermQuery(t3)); assertEquals(Collections.singleton(t3), collectTerms(swq)); } @@ -130,5 +139,4 @@ public class TestSpanExtractors extends LuceneTestCase { Query q = new SpanFirstQuery(new SpanTermQuery(t1), 10); assertEquals(Collections.singleton(t1), collectTerms(q)); } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestSuffixingNGramTokenizer.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestSuffixingNGramTokenizer.java index 5bb2e7551f2..0a19bedcea4 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestSuffixingNGramTokenizer.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestSuffixingNGramTokenizer.java @@ -18,7 +18,6 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; @@ -27,41 +26,51 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; public class TestSuffixingNGramTokenizer extends BaseTokenStreamTestCase { - private Analyzer analyzer = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer source = new WhitespaceTokenizer(); - TokenStream sink = new SuffixingNGramTokenFilter(source, "XX", "ANY", 10); - return new TokenStreamComponents(source, sink); - } - }; + private Analyzer analyzer = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer source = new WhitespaceTokenizer(); + TokenStream sink = new SuffixingNGramTokenFilter(source, "XX", "ANY", 10); + return new TokenStreamComponents(source, sink); + } + }; public void testTokensAreSuffixed() throws IOException { - assertAnalyzesTo(analyzer, "term", new String[]{ - "term", "termXX", "terXX", "teXX", "tXX", "ermXX", "erXX", "eXX", "rmXX", "rXX", "mXX", "XX" - }); + assertAnalyzesTo( + analyzer, + "term", + new String[] { + "term", "termXX", "terXX", "teXX", "tXX", "ermXX", "erXX", "eXX", "rmXX", "rXX", "mXX", + "XX" + }); } public void testRepeatedSuffixesAreNotEmitted() throws IOException { - assertAnalyzesTo(analyzer, "arm harm term", new String[]{ - "arm", "armXX", "arXX", "aXX", "rmXX", "rXX", "mXX", "XX", - "harm", "harmXX", "harXX", "haXX", "hXX", - "term", "termXX", "terXX", "teXX", "tXX", "ermXX", "erXX", "eXX" - }); + assertAnalyzesTo( + analyzer, + "arm harm term", + new String[] { + "arm", "armXX", "arXX", "aXX", "rmXX", "rXX", "mXX", "XX", "harm", "harmXX", "harXX", + "haXX", "hXX", "term", "termXX", "terXX", "teXX", "tXX", "ermXX", "erXX", "eXX" + }); } public void testRepeatedInfixesAreNotEmitted() throws IOException { - assertAnalyzesTo(analyzer, "alarm alas harm", new String[]{ - "alarm", "alarmXX", "alarXX", "alaXX", "alXX", "aXX", - "larmXX", "larXX", "laXX", "lXX", "armXX", "arXX", "rmXX", "rXX", "mXX", "XX", - "alas", "alasXX", "lasXX", "asXX", "sXX", "harm", "harmXX", "harXX", "haXX", "hXX" - }); + assertAnalyzesTo( + analyzer, + "alarm alas harm", + new String[] { + "alarm", "alarmXX", "alarXX", "alaXX", "alXX", "aXX", "larmXX", "larXX", "laXX", "lXX", + "armXX", "arXX", "rmXX", "rXX", "mXX", "XX", "alas", "alasXX", "lasXX", "asXX", "sXX", + "harm", "harmXX", "harXX", "haXX", "hXX" + }); } public void testLengthyTokensAreNotNgrammed() throws IOException { - assertAnalyzesTo(analyzer, "alongtermthatshouldntbengrammed", new String[]{ - "alongtermthatshouldntbengrammed", "ANY" - }); + assertAnalyzesTo( + analyzer, + "alongtermthatshouldntbengrammed", + new String[] {"alongtermthatshouldntbengrammed", "ANY"}); } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestTermPresearcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestTermPresearcher.java index 819e217819b..f2bcaceda2a 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestTermPresearcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestTermPresearcher.java @@ -20,7 +20,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -42,20 +41,22 @@ public class TestTermPresearcher extends PresearcherTestBase { public void testFiltersOnTermQueries() throws IOException { - MonitorQuery query1 - = new MonitorQuery("1", parse("furble")); - MonitorQuery query2 - = new MonitorQuery("2", parse("document")); - MonitorQuery query3 = new MonitorQuery("3", parse("\"a document\"")); // will be selected but not match + MonitorQuery query1 = new MonitorQuery("1", parse("furble")); + MonitorQuery query2 = new MonitorQuery("2", parse("document")); + MonitorQuery query3 = + new MonitorQuery("3", parse("\"a document\"")); // will be selected but not match try (Monitor monitor = newMonitor()) { monitor.register(query1, query2, query3); Map timings = new HashMap<>(); QueryTimeListener timeListener = - (queryId, timeInNanos) -> timings.compute(queryId, (q, t) -> t == null ? timeInNanos : t + timeInNanos); - MatchingQueries matches = monitor.match(buildDoc(TEXTFIELD, "this is a test document"), - QueryTimeListener.timingMatcher(QueryMatch.SIMPLE_MATCHER, timeListener)); + (queryId, timeInNanos) -> + timings.compute(queryId, (q, t) -> t == null ? timeInNanos : t + timeInNanos); + MatchingQueries matches = + monitor.match( + buildDoc(TEXTFIELD, "this is a test document"), + QueryTimeListener.timingMatcher(QueryMatch.SIMPLE_MATCHER, timeListener)); assertEquals(1, matches.getMatchCount()); assertNotNull(matches.matches("2")); assertEquals(2, matches.getQueriesRun()); @@ -70,7 +71,8 @@ public class TestTermPresearcher extends PresearcherTestBase { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", parse("document -test"))); - MatchingQueries matches = monitor.match(buildDoc(TEXTFIELD, "this is a test document"), QueryMatch.SIMPLE_MATCHER); + MatchingQueries matches = + monitor.match(buildDoc(TEXTFIELD, "this is a test document"), QueryMatch.SIMPLE_MATCHER); assertEquals(0, matches.getMatchCount()); assertEquals(1, matches.getQueriesRun()); @@ -78,7 +80,6 @@ public class TestTermPresearcher extends PresearcherTestBase { assertEquals(0, matches.getMatchCount()); assertEquals(0, matches.getQueriesRun()); } - } public void testMatchesAnyQueries() throws IOException { @@ -86,11 +87,11 @@ public class TestTermPresearcher extends PresearcherTestBase { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", parse("/hell./"))); - MatchingQueries matches = monitor.match(buildDoc(TEXTFIELD, "hello"), QueryMatch.SIMPLE_MATCHER); + MatchingQueries matches = + monitor.match(buildDoc(TEXTFIELD, "hello"), QueryMatch.SIMPLE_MATCHER); assertEquals(1, matches.getMatchCount()); assertEquals(1, matches.getQueriesRun()); } - } @Override @@ -101,12 +102,12 @@ public class TestTermPresearcher extends PresearcherTestBase { public void testAnyTermsAreCorrectlyAnalyzed() { QueryAnalyzer analyzer = new QueryAnalyzer(); - QueryTree qt = analyzer.buildTree(new MatchAllDocsQuery(), TermFilteredPresearcher.DEFAULT_WEIGHTOR); + QueryTree qt = + analyzer.buildTree(new MatchAllDocsQuery(), TermFilteredPresearcher.DEFAULT_WEIGHTOR); TermFilteredPresearcher presearcher = new TermFilteredPresearcher(); Map extractedTerms = presearcher.collectTerms(qt); assertEquals(1, extractedTerms.size()); - } public void testQueryBuilder() throws IOException { @@ -116,12 +117,13 @@ public class TestTermPresearcher extends PresearcherTestBase { IndexWriterConfig iwc = new IndexWriterConfig(new KeywordAnalyzer()); Directory dir = new ByteBuffersDirectory(); IndexWriter writer = new IndexWriter(dir, iwc); - MonitorConfiguration config = new MonitorConfiguration(){ - @Override - public IndexWriter buildIndexWriter() { - return writer; - } - }; + MonitorConfiguration config = + new MonitorConfiguration() { + @Override + public IndexWriter buildIndexWriter() { + return writer; + } + }; try (Monitor monitor = new Monitor(ANALYZER, presearcher, config)) { @@ -137,18 +139,18 @@ public class TestTermPresearcher extends PresearcherTestBase { QueryIndex.QueryTermFilter termFilter = new QueryIndex.QueryTermFilter(reader); BooleanQuery q = (BooleanQuery) presearcher.buildQuery(docsReader, termFilter); - BooleanQuery expected = new BooleanQuery.Builder() - .add(should(new BooleanQuery.Builder() - .add(should(new TermInSetQuery("f", new BytesRef("test")))).build())) - .add(should(new TermQuery(new Term("__anytokenfield", "__ANYTOKEN__")))) - .build(); + BooleanQuery expected = + new BooleanQuery.Builder() + .add( + should( + new BooleanQuery.Builder() + .add(should(new TermInSetQuery("f", new BytesRef("test")))) + .build())) + .add(should(new TermQuery(new Term("__anytokenfield", "__ANYTOKEN__")))) + .build(); assertEquals(expected, q); - } - } - } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestTermsEnumTokenFilter.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestTermsEnumTokenFilter.java index 93ff578fc2e..3322e6fc391 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestTermsEnumTokenFilter.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestTermsEnumTokenFilter.java @@ -18,7 +18,6 @@ package org.apache.lucene.monitor; import java.io.IOException; - import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; @@ -48,19 +47,18 @@ public class TestTermsEnumTokenFilter extends LuceneTestCase { final BytesRef foo = new BytesRef("foo"); final BytesRef bar = new BytesRef("bar"); - BytesRefIterator terms = new BytesRefIterator() { + BytesRefIterator terms = + new BytesRefIterator() { - long count = 1000; + long count = 1000; - @Override - public BytesRef next() throws IOException { - if (count-- > 100) - return foo; - if (count-- > 0) - return bar; - return null; - } - }; + @Override + public BytesRef next() throws IOException { + if (count-- > 100) return foo; + if (count-- > 0) return bar; + return null; + } + }; try (TokenStream ts = new LeapfrogTokenFilter(new TermsEnumTokenStream(terms))) { while (ts.incrementToken()) { @@ -69,5 +67,4 @@ public class TestTermsEnumTokenFilter extends LuceneTestCase { } } } - } diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestWildcardTermPresearcher.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestWildcardTermPresearcher.java index 57fcd00958d..1a217f47dd9 100644 --- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestWildcardTermPresearcher.java +++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestWildcardTermPresearcher.java @@ -19,7 +19,6 @@ package org.apache.lucene.monitor; import java.io.IOException; import java.util.Collections; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -28,16 +27,27 @@ public class TestWildcardTermPresearcher extends PresearcherTestBase { public void testFiltersWildcards() throws IOException { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", parse("/hell.*/"))); - assertEquals(1, - monitor.match(buildDoc(TEXTFIELD, "well hello there"), QueryMatch.SIMPLE_MATCHER).getMatchCount()); - assertEquals(0, monitor.match(buildDoc(TEXTFIELD, "hi there"), QueryMatch.SIMPLE_MATCHER).getQueriesRun()); + assertEquals( + 1, + monitor + .match(buildDoc(TEXTFIELD, "well hello there"), QueryMatch.SIMPLE_MATCHER) + .getMatchCount()); + assertEquals( + 0, + monitor + .match(buildDoc(TEXTFIELD, "hi there"), QueryMatch.SIMPLE_MATCHER) + .getQueriesRun()); } } public void testNgramsOnlyMatchWildcards() throws IOException { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", parse("hello"))); - assertEquals(0, monitor.match(buildDoc(TEXTFIELD, "hellopolis"), QueryMatch.SIMPLE_MATCHER).getQueriesRun()); + assertEquals( + 0, + monitor + .match(buildDoc(TEXTFIELD, "hellopolis"), QueryMatch.SIMPLE_MATCHER) + .getQueriesRun()); } } @@ -55,26 +65,32 @@ public class TestWildcardTermPresearcher extends PresearcherTestBase { monitor.register(new MonitorQuery("1", parse("/a.*/"))); Document doc = new Document(); - doc.add(newTextField(TEXTFIELD, repeat("a", RegexpQueryHandler.DEFAULT_MAX_TOKEN_SIZE + 1), Field.Store.NO)); + doc.add( + newTextField( + TEXTFIELD, + repeat("a", RegexpQueryHandler.DEFAULT_MAX_TOKEN_SIZE + 1), + Field.Store.NO)); MatchingQueries matches = monitor.match(doc, QueryMatch.SIMPLE_MATCHER); assertEquals(1, matches.getQueriesRun()); assertNotNull(matches.matches("1")); } - } public void testCaseSensitivity() throws IOException { try (Monitor monitor = newMonitor()) { monitor.register(new MonitorQuery("1", parse("foo"))); - assertEquals(1, + assertEquals( + 1, monitor.match(buildDoc(TEXTFIELD, "Foo foo"), QueryMatch.SIMPLE_MATCHER).getMatchCount()); } } @Override protected Presearcher createPresearcher() { - return new TermFilteredPresearcher(TermWeightor.DEFAULT, Collections.singletonList(new RegexpQueryHandler()), Collections.emptySet()); + return new TermFilteredPresearcher( + TermWeightor.DEFAULT, + Collections.singletonList(new RegexpQueryHandler()), + Collections.emptySet()); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/CharStream.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/CharStream.java index 1a293afd985..0c40523195c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/CharStream.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/CharStream.java @@ -17,93 +17,80 @@ package org.apache.lucene.queryparser.charstream; /** - * This interface describes a character stream that maintains line and - * column number positions of the characters. It also has the capability - * to backup the stream to some extent. An implementation of this - * interface is used in the TokenManager implementation generated by - * JavaCCParser. + * This interface describes a character stream that maintains line and column number positions of + * the characters. It also has the capability to backup the stream to some extent. An implementation + * of this interface is used in the TokenManager implementation generated by JavaCCParser. * - * All the methods except backup can be implemented in any fashion. backup - * needs to be implemented correctly for the correct operation of the lexer. - * Rest of the methods are all used to get information like line number, - * column number and the String that constitutes a token and are not used - * by the lexer. Hence their implementation won't affect the generated lexer's - * operation. + *

    All the methods except backup can be implemented in any fashion. backup needs to be + * implemented correctly for the correct operation of the lexer. Rest of the methods are all used to + * get information like line number, column number and the String that constitutes a token and are + * not used by the lexer. Hence their implementation won't affect the generated lexer's operation. */ public interface CharStream { /** - * Returns the next character from the selected input. The method - * of selecting the input is the responsibility of the class - * implementing this interface. Can throw any java.io.IOException. + * Returns the next character from the selected input. The method of selecting the input is the + * responsibility of the class implementing this interface. Can throw any java.io.IOException. */ char readChar() throws java.io.IOException; /** - * Returns the column number of the last character for current token (being - * matched after the last call to BeginTOken). + * Returns the column number of the last character for current token (being matched after the last + * call to BeginTOken). */ int getEndColumn(); /** - * Returns the line number of the last character for current token (being - * matched after the last call to BeginTOken). + * Returns the line number of the last character for current token (being matched after the last + * call to BeginTOken). */ int getEndLine(); /** - * Returns the column number of the first character for current token (being - * matched after the last call to BeginTOken). + * Returns the column number of the first character for current token (being matched after the + * last call to BeginTOken). */ int getBeginColumn(); /** - * Returns the line number of the first character for current token (being - * matched after the last call to BeginTOken). + * Returns the line number of the first character for current token (being matched after the last + * call to BeginTOken). */ int getBeginLine(); /** - * Backs up the input stream by amount steps. Lexer calls this method if it - * had already read some characters, but could not use them to match a - * (longer) token. So, they will be used again as the prefix of the next - * token and it is the implementation's responsibility to do this right. + * Backs up the input stream by amount steps. Lexer calls this method if it had already read some + * characters, but could not use them to match a (longer) token. So, they will be used again as + * the prefix of the next token and it is the implementation's responsibility to do this right. */ void backup(int amount); /** - * Returns the next character that marks the beginning of the next token. - * All characters must remain in the buffer between two successive calls - * to this method to implement backup correctly. + * Returns the next character that marks the beginning of the next token. All characters must + * remain in the buffer between two successive calls to this method to implement backup correctly. */ char BeginToken() throws java.io.IOException; /** - * Returns a string made up of characters from the marked token beginning - * to the current buffer position. Implementations have the choice of returning - * anything that they want to. For example, for efficiency, one might decide - * to just return null, which is a valid implementation. + * Returns a string made up of characters from the marked token beginning to the current buffer + * position. Implementations have the choice of returning anything that they want to. For example, + * for efficiency, one might decide to just return null, which is a valid implementation. */ String GetImage(); /** - * Returns an array of characters that make up the suffix of length 'len' for - * the currently matched token. This is used to build up the matched string - * for use in actions in the case of MORE. A simple and inefficient - * implementation of this is as follows : + * Returns an array of characters that make up the suffix of length 'len' for the currently + * matched token. This is used to build up the matched string for use in actions in the case of + * MORE. A simple and inefficient implementation of this is as follows : * - * { - * String t = GetImage(); - * return t.substring(t.length() - len, t.length()).toCharArray(); - * } + *

    { String t = GetImage(); return t.substring(t.length() - len, t.length()).toCharArray(); } */ char[] GetSuffix(int len); /** - * The lexer calls this function to indicate that it is done with the stream - * and hence implementations can free any resources held by this class. - * Again, the body of this function can be just empty and it will not - * affect the lexer's operation. + * The lexer calls this function to indicate that it is done with the stream and hence + * implementations can free any resources held by this class. Again, the body of this function can + * be just empty and it will not affect the lexer's operation. */ void Done(); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/FastCharStream.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/FastCharStream.java index f48996fed29..649ecf362be 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/FastCharStream.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/FastCharStream.java @@ -20,65 +20,60 @@ import java.io.*; /** * An efficient implementation of JavaCC's CharStream interface. - *

    - * Note that this does not do line-number counting, but instead keeps track of the - * character position of the token in the input, as required by Lucene's - * {@link org.apache.lucene.analysis.tokenattributes.OffsetAttribute} API. + * + *

    Note that this does not do line-number counting, but instead keeps track of the character + * position of the token in the input, as required by Lucene's {@link + * org.apache.lucene.analysis.tokenattributes.OffsetAttribute} API. */ public final class FastCharStream implements CharStream { // See SOLR-11314 - private final static IOException READ_PAST_EOF = new IOException("Read past EOF."); + private static final IOException READ_PAST_EOF = new IOException("Read past EOF."); char[] buffer = null; - int bufferLength = 0; // end of valid chars - int bufferPosition = 0; // next char to read + int bufferLength = 0; // end of valid chars + int bufferPosition = 0; // next char to read - int tokenStart = 0; // offset in buffer - int bufferStart = 0; // position in file of buffer + int tokenStart = 0; // offset in buffer + int bufferStart = 0; // position in file of buffer - Reader input; // source of chars + Reader input; // source of chars - /** - * Constructs from a Reader. - */ + /** Constructs from a Reader. */ public FastCharStream(Reader r) { input = r; } @Override public final char readChar() throws IOException { - if (bufferPosition >= bufferLength) - refill(); + if (bufferPosition >= bufferLength) refill(); return buffer[bufferPosition++]; } private void refill() throws IOException { int newPosition = bufferLength - tokenStart; - if (tokenStart == 0) { // token won't fit in buffer - if (buffer == null) { // first time: alloc buffer + if (tokenStart == 0) { // token won't fit in buffer + if (buffer == null) { // first time: alloc buffer buffer = new char[2048]; } else if (bufferLength == buffer.length) { // grow buffer char[] newBuffer = new char[buffer.length * 2]; System.arraycopy(buffer, 0, newBuffer, 0, bufferLength); buffer = newBuffer; } - } else { // shift token to front + } else { // shift token to front System.arraycopy(buffer, tokenStart, buffer, 0, newPosition); } - bufferLength = newPosition; // update state + bufferLength = newPosition; // update state bufferPosition = newPosition; bufferStart += tokenStart; tokenStart = 0; - int charsRead = // fill space in buffer + int charsRead = // fill space in buffer input.read(buffer, newPosition, buffer.length - newPosition); - if (charsRead == -1) - throw READ_PAST_EOF; - else - bufferLength += charsRead; + if (charsRead == -1) throw READ_PAST_EOF; + else bufferLength += charsRead; } @Override diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/package-info.java index dc30e767015..84dbf33ce90 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/charstream/package-info.java @@ -15,10 +15,8 @@ * limitations under the License. */ - /** - * This package contains reusable parts for javacc-generated - * grammars (query parsers). + * This package contains reusable parts for javacc-generated grammars (query parsers). * * @see org.apache.lucene.queryparser.charstream.CharStream * @see org.apache.lucene.queryparser.charstream.FastCharStream diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java index 3ee9c6ced0c..a269901bb42 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java @@ -19,7 +19,6 @@ package org.apache.lucene.queryparser.classic; import java.util.ArrayList; import java.util.List; import java.util.Map; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -28,72 +27,58 @@ import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; -/** - * A QueryParser which constructs queries to search multiple fields. - * - */ -public class MultiFieldQueryParser extends QueryParser -{ +/** A QueryParser which constructs queries to search multiple fields. */ +public class MultiFieldQueryParser extends QueryParser { protected String[] fields; - protected Map boosts; + protected Map boosts; /** - * Creates a MultiFieldQueryParser. - * Allows passing of a map with term to Boost, and the boost to apply to each term. + * Creates a MultiFieldQueryParser. Allows passing of a map with term to Boost, and the boost to + * apply to each term. * - *

    It will, when parse(String query) - * is called, construct a query like this (assuming the query consists of - * two terms and you specify the two fields title and body):

    - * + *

    It will, when parse(String query) is called, construct a query like this (assuming the query + * consists of two terms and you specify the two fields title and body): * * (title:term1 body:term1) (title:term2 body:term2) * * - *

    When setDefaultOperator(AND_OPERATOR) is set, the result will be:

    - * - * + *

    When setDefaultOperator(AND_OPERATOR) is set, the result will be: * +(title:term1 body:term1) +(title:term2 body:term2) * - * - *

    When you pass a boost (title=>5 body=>10) you can get

    - * - * + * + *

    When you pass a boost (title=>5 body=>10) you can get * +(title:term1^5.0 body:term1^10.0) +(title:term2^5.0 body:term2^10.0) * * - *

    In other words, all the query's terms must appear, but it doesn't matter in - * what fields they appear.

    + *

    In other words, all the query's terms must appear, but it doesn't matter in what fields they + * appear. */ - public MultiFieldQueryParser(String[] fields, Analyzer analyzer, Map boosts) { + public MultiFieldQueryParser(String[] fields, Analyzer analyzer, Map boosts) { this(fields, analyzer); this.boosts = boosts; } - + /** * Creates a MultiFieldQueryParser. * - *

    It will, when parse(String query) - * is called, construct a query like this (assuming the query consists of - * two terms and you specify the two fields title and body):

    - * + *

    It will, when parse(String query) is called, construct a query like this (assuming the query + * consists of two terms and you specify the two fields title and body): * * (title:term1 body:term1) (title:term2 body:term2) * * - *

    When setDefaultOperator(AND_OPERATOR) is set, the result will be:

    - * - * + *

    When setDefaultOperator(AND_OPERATOR) is set, the result will be: * +(title:term1 body:term1) +(title:term2 body:term2) * - * - *

    In other words, all the query's terms must appear, but it doesn't matter in - * what fields they appear.

    + * + *

    In other words, all the query's terms must appear, but it doesn't matter in what fields they + * appear. */ public MultiFieldQueryParser(String[] fields, Analyzer analyzer) { super(null, analyzer); this.fields = fields; } - + @Override protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { if (field == null) { @@ -101,24 +86,24 @@ public class MultiFieldQueryParser extends QueryParser for (int i = 0; i < fields.length; i++) { Query q = super.getFieldQuery(fields[i], queryText, true); if (q != null) { - //If the user passes a map of boosts + // If the user passes a map of boosts if (boosts != null) { - //Get the boost from the map and apply them + // Get the boost from the map and apply them Float boost = boosts.get(fields[i]); if (boost != null) { q = new BoostQuery(q, boost.floatValue()); } } - q = applySlop(q,slop); + q = applySlop(q, slop); clauses.add(q); } } - if (clauses.size() == 0) // happens for stopwords - return null; + if (clauses.size() == 0) // happens for stopwords + return null; return getMultiFieldQuery(clauses); } Query q = super.getFieldQuery(field, queryText, true); - q = applySlop(q,slop); + q = applySlop(q, slop); return q; } @@ -134,18 +119,18 @@ public class MultiFieldQueryParser extends QueryParser } q = builder.build(); } else if (q instanceof MultiPhraseQuery) { - MultiPhraseQuery mpq = (MultiPhraseQuery)q; - + MultiPhraseQuery mpq = (MultiPhraseQuery) q; + if (slop != mpq.getSlop()) { q = new MultiPhraseQuery.Builder(mpq).setSlop(slop).build(); } } return q; } - @Override - protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { + protected Query getFieldQuery(String field, String queryText, boolean quoted) + throws ParseException { if (field == null) { List clauses = new ArrayList<>(); Query[] fieldQueries = new Query[fields.length]; @@ -154,7 +139,7 @@ public class MultiFieldQueryParser extends QueryParser Query q = super.getFieldQuery(fields[i], queryText, quoted); if (q != null) { if (q instanceof BooleanQuery) { - maxTerms = Math.max(maxTerms, ((BooleanQuery)q).clauses().size()); + maxTerms = Math.max(maxTerms, ((BooleanQuery) q).clauses().size()); } else { maxTerms = Math.max(1, maxTerms); } @@ -167,7 +152,7 @@ public class MultiFieldQueryParser extends QueryParser if (fieldQueries[i] != null) { Query q = null; if (fieldQueries[i] instanceof BooleanQuery) { - List nestedClauses = ((BooleanQuery)fieldQueries[i]).clauses(); + List nestedClauses = ((BooleanQuery) fieldQueries[i]).clauses(); if (termNum < nestedClauses.size()) { q = nestedClauses.get(termNum).getQuery(); } @@ -176,7 +161,7 @@ public class MultiFieldQueryParser extends QueryParser } if (q != null) { if (boosts != null) { - //Get the boost from the map and apply them + // Get the boost from the map and apply them Float boost = boosts.get(fields[i]); if (boost != null) { q = new BoostQuery(q, boost); @@ -198,18 +183,17 @@ public class MultiFieldQueryParser extends QueryParser clauses.addAll(termClauses); } } - if (clauses.size() == 0) // happens for stopwords - return null; + if (clauses.size() == 0) // happens for stopwords + return null; return getMultiFieldQuery(clauses); } Query q = super.getFieldQuery(field, queryText, quoted); return q; } - @Override - protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException - { + protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) + throws ParseException { if (field == null) { List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { @@ -221,8 +205,7 @@ public class MultiFieldQueryParser extends QueryParser } @Override - protected Query getPrefixQuery(String field, String termStr) throws ParseException - { + protected Query getPrefixQuery(String field, String termStr) throws ParseException { if (field == null) { List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { @@ -245,9 +228,10 @@ public class MultiFieldQueryParser extends QueryParser return super.getWildcardQuery(field, termStr); } - @Override - protected Query getRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) throws ParseException { + protected Query getRangeQuery( + String field, String part1, String part2, boolean startInclusive, boolean endInclusive) + throws ParseException { if (field == null) { List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { @@ -257,12 +241,9 @@ public class MultiFieldQueryParser extends QueryParser } return super.getRangeQuery(field, part1, part2, startInclusive, endInclusive); } - - @Override - protected Query getRegexpQuery(String field, String termStr) - throws ParseException { + protected Query getRegexpQuery(String field, String termStr) throws ParseException { if (field == null) { List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { @@ -272,7 +253,7 @@ public class MultiFieldQueryParser extends QueryParser } return super.getRegexpQuery(field, termStr); } - + /** Creates a multifield query */ // TODO: investigate more general approach by default, e.g. DisjunctionMaxQuery? protected Query getMultiFieldQuery(List queries) throws ParseException { @@ -288,30 +269,33 @@ public class MultiFieldQueryParser extends QueryParser /** * Parses a query which searches on the fields specified. - *

    - * If x fields are specified, this effectively constructs: + * + *

    If x fields are specified, this effectively constructs: + * *

        * 
        * (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
        * 
        * 
    + * * @param queries Queries strings to parse * @param fields Fields to search on * @param analyzer Analyzer to use * @throws ParseException if query parsing fails - * @throws IllegalArgumentException if the length of the queries array differs - * from the length of the fields array + * @throws IllegalArgumentException if the length of the queries array differs from the length of + * the fields array */ - public static Query parse(String[] queries, String[] fields, Analyzer analyzer) throws ParseException { + public static Query parse(String[] queries, String[] fields, Analyzer analyzer) + throws ParseException { if (queries.length != fields.length) throw new IllegalArgumentException("queries.length != fields.length"); BooleanQuery.Builder bQuery = new BooleanQuery.Builder(); - for (int i = 0; i < fields.length; i++) - { + for (int i = 0; i < fields.length; i++) { QueryParser qp = new QueryParser(fields[i], analyzer); Query q = qp.parse(queries[i]); - if (q!=null && // q never null, just being defensive - (!(q instanceof BooleanQuery) || ((BooleanQuery)q).clauses().size()>0)) { + if (q != null + && // q never null, just being defensive + (!(q instanceof BooleanQuery) || ((BooleanQuery) q).clauses().size() > 0)) { bQuery.add(q, BooleanClause.Occur.SHOULD); } } @@ -319,11 +303,11 @@ public class MultiFieldQueryParser extends QueryParser } /** - * Parses a query, searching on the fields specified. - * Use this if you need to specify certain fields as required, - * and others as prohibited. - *

    - * Usage: + * Parses a query, searching on the fields specified. Use this if you need to specify certain + * fields as required, and others as prohibited. + * + *

    Usage: + * *

        * 
        * String[] fields = {"filename", "contents", "description"};
    @@ -333,8 +317,9 @@ public class MultiFieldQueryParser extends QueryParser
        * MultiFieldQueryParser.parse("query", fields, flags, analyzer);
        * 
        * 
    - *

    - * The code above would construct a query: + * + *

    The code above would construct a query: + * *

        * 
        * (filename:query) +(contents:query) -(description:query)
    @@ -346,19 +331,21 @@ public class MultiFieldQueryParser extends QueryParser
        * @param flags Flags describing the fields
        * @param analyzer Analyzer to use
        * @throws ParseException if query parsing fails
    -   * @throws IllegalArgumentException if the length of the fields array differs
    -   *  from the length of the flags array
    +   * @throws IllegalArgumentException if the length of the fields array differs from the length of
    +   *     the flags array
        */
    -  public static Query parse(String query, String[] fields,
    -      BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException {
    +  public static Query parse(
    +      String query, String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)
    +      throws ParseException {
         if (fields.length != flags.length)
           throw new IllegalArgumentException("fields.length != flags.length");
         BooleanQuery.Builder bQuery = new BooleanQuery.Builder();
         for (int i = 0; i < fields.length; i++) {
           QueryParser qp = new QueryParser(fields[i], analyzer);
           Query q = qp.parse(query);
    -      if (q!=null && // q never null, just being defensive 
    -          (!(q instanceof BooleanQuery) || ((BooleanQuery)q).clauses().size()>0)) {
    +      if (q != null
    +          && // q never null, just being defensive
    +          (!(q instanceof BooleanQuery) || ((BooleanQuery) q).clauses().size() > 0)) {
             bQuery.add(q, flags[i]);
           }
         }
    @@ -366,11 +353,11 @@ public class MultiFieldQueryParser extends QueryParser
       }
     
       /**
    -   * Parses a query, searching on the fields specified.
    -   * Use this if you need to specify certain fields as required,
    -   * and others as prohibited.
    -   * 

    - * Usage: + * Parses a query, searching on the fields specified. Use this if you need to specify certain + * fields as required, and others as prohibited. + * + *

    Usage: + * *

        * 
        * String[] query = {"query1", "query2", "query3"};
    @@ -381,8 +368,9 @@ public class MultiFieldQueryParser extends QueryParser
        * MultiFieldQueryParser.parse(query, fields, flags, analyzer);
        * 
        * 
    - *

    - * The code above would construct a query: + * + *

    The code above would construct a query: + * *

        * 
        * (filename:query1) +(contents:query2) -(description:query3)
    @@ -394,25 +382,24 @@ public class MultiFieldQueryParser extends QueryParser
        * @param flags Flags describing the fields
        * @param analyzer Analyzer to use
        * @throws ParseException if query parsing fails
    -   * @throws IllegalArgumentException if the length of the queries, fields,
    -   *  and flags array differ
    +   * @throws IllegalArgumentException if the length of the queries, fields, and flags array differ
        */
    -  public static Query parse(String[] queries, String[] fields, BooleanClause.Occur[] flags,
    -      Analyzer analyzer) throws ParseException
    -  {
    +  public static Query parse(
    +      String[] queries, String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)
    +      throws ParseException {
         if (!(queries.length == fields.length && queries.length == flags.length))
    -      throw new IllegalArgumentException("queries, fields, and flags array have have different length");
    +      throw new IllegalArgumentException(
    +          "queries, fields, and flags array have have different length");
         BooleanQuery.Builder bQuery = new BooleanQuery.Builder();
    -    for (int i = 0; i < fields.length; i++)
    -    {
    +    for (int i = 0; i < fields.length; i++) {
           QueryParser qp = new QueryParser(fields[i], analyzer);
           Query q = qp.parse(queries[i]);
    -      if (q!=null && // q never null, just being defensive
    -          (!(q instanceof BooleanQuery) || ((BooleanQuery)q).clauses().size()>0)) {
    +      if (q != null
    +          && // q never null, just being defensive
    +          (!(q instanceof BooleanQuery) || ((BooleanQuery) q).clauses().size() > 0)) {
             bQuery.add(q, flags[i]);
           }
         }
         return bQuery.build();
       }
    -
     }
    diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
    index 6afe913d0b1..3aaa2c61f60 100644
    --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
    +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
    @@ -16,12 +16,13 @@
      */
     package org.apache.lucene.queryparser.classic;
     
    +import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
    +
     import java.io.StringReader;
     import java.text.DateFormat;
     import java.util.*;
     import java.util.regex.Matcher;
     import java.util.regex.Pattern;
    -
     import org.apache.lucene.analysis.Analyzer;
     import org.apache.lucene.document.DateTools;
     import org.apache.lucene.index.Term;
    @@ -37,20 +38,20 @@ import org.apache.lucene.util.BytesRefBuilder;
     import org.apache.lucene.util.QueryBuilder;
     import org.apache.lucene.util.automaton.RegExp;
     
    -import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
    -
    -/** This class is overridden by QueryParser in QueryParser.jj
    - * and acts to separate the majority of the Java code from the .jj grammar file. 
    +/**
    + * This class is overridden by QueryParser in QueryParser.jj and acts to separate the majority of
    + * the Java code from the .jj grammar file.
      */
    -public abstract class QueryParserBase extends QueryBuilder implements CommonQueryParserConfiguration {
    +public abstract class QueryParserBase extends QueryBuilder
    +    implements CommonQueryParserConfiguration {
     
    -  static final int CONJ_NONE   = 0;
    -  static final int CONJ_AND    = 1;
    -  static final int CONJ_OR     = 2;
    +  static final int CONJ_NONE = 0;
    +  static final int CONJ_AND = 1;
    +  static final int CONJ_OR = 2;
     
    -  static final int MOD_NONE    = 0;
    -  static final int MOD_NOT     = 10;
    -  static final int MOD_REQ     = 11;
    +  static final int MOD_NONE = 0;
    +  static final int MOD_NOT = 10;
    +  static final int MOD_REQ = 11;
     
       // make it possible to call setDefaultOperator() without accessing
       // the nested class:
    @@ -75,7 +76,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer
       // the default date resolution
       DateTools.Resolution dateResolution = null;
       // maps field names to date resolutions
    -  Map fieldToDateResolution = null;
    +  Map fieldToDateResolution = null;
     
       boolean autoGeneratePhraseQueries;
       int maxDeterminizedStates = DEFAULT_MAX_DETERMINIZED_STATES;
    @@ -85,9 +86,11 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer
         super(null);
       }
     
    -  /** Initializes a query parser.  Called by the QueryParser constructor
    -   *  @param f  the default field for query terms.
    -   *  @param a   used to find terms in the query text.
    +  /**
    +   * Initializes a query parser. Called by the QueryParser constructor
    +   *
    +   * @param f the default field for query terms.
    +   * @param a used to find terms in the query text.
        */
       public void init(String f, Analyzer a) {
         setAnalyzer(a);
    @@ -97,78 +100,70 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer
     
       // the generated parser will create these in QueryParser
       public abstract void ReInit(CharStream stream);
    +
       public abstract Query TopLevelQuery(String field) throws ParseException;
     
    -
    -  /** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
    -   *  @param query  the query string to be parsed.
    -   *  @throws ParseException if the parsing fails
    +  /**
    +   * Parses a query string, returning a {@link org.apache.lucene.search.Query}.
    +   *
    +   * @param query the query string to be parsed.
    +   * @throws ParseException if the parsing fails
        */
       public Query parse(String query) throws ParseException {
         ReInit(new FastCharStream(new StringReader(query)));
         try {
           // TopLevelQuery is a Query followed by the end-of-input (EOF)
           Query res = TopLevelQuery(field);
    -      return res!=null ? res : newBooleanQuery().build();
    -    }
    -    catch (ParseException | TokenMgrError tme) {
    +      return res != null ? res : newBooleanQuery().build();
    +    } catch (ParseException | TokenMgrError tme) {
           // rethrow to include the original query:
    -      ParseException e = new ParseException("Cannot parse '" +query+ "': " + tme.getMessage());
    +      ParseException e = new ParseException("Cannot parse '" + query + "': " + tme.getMessage());
           e.initCause(tme);
           throw e;
         } catch (TooManyClauses tmc) {
    -      ParseException e = new ParseException("Cannot parse '" +query+ "': too many boolean clauses");
    +      ParseException e =
    +          new ParseException("Cannot parse '" + query + "': too many boolean clauses");
           e.initCause(tmc);
           throw e;
         }
       }
     
    -  /**
    -   * @return Returns the default field.
    -   */
    +  /** @return Returns the default field. */
       public String getField() {
         return field;
       }
     
    -  /**
    -   * @see #setAutoGeneratePhraseQueries(boolean)
    -   */
    +  /** @see #setAutoGeneratePhraseQueries(boolean) */
       public final boolean getAutoGeneratePhraseQueries() {
         return autoGeneratePhraseQueries;
       }
     
       /**
    -   * Set to true if phrase queries will be automatically generated
    -   * when the analyzer returns more than one term from whitespace
    -   * delimited text.
    -   * NOTE: this behavior may not be suitable for all languages.
    -   * 

    - * Set to false if phrase queries should only be generated when - * surrounded by double quotes. + * Set to true if phrase queries will be automatically generated when the analyzer returns more + * than one term from whitespace delimited text. NOTE: this behavior may not be suitable for all + * languages. + * + *

    Set to false if phrase queries should only be generated when surrounded by double quotes. */ public void setAutoGeneratePhraseQueries(boolean value) { this.autoGeneratePhraseQueries = value; } - /** - * Get the minimal similarity for fuzzy queries. - */ + /** Get the minimal similarity for fuzzy queries. */ @Override public float getFuzzyMinSim() { - return fuzzyMinSim; + return fuzzyMinSim; + } + + /** Set the minimum similarity for fuzzy queries. Default is 2f. */ + @Override + public void setFuzzyMinSim(float fuzzyMinSim) { + this.fuzzyMinSim = fuzzyMinSim; } /** - * Set the minimum similarity for fuzzy queries. - * Default is 2f. - */ - @Override - public void setFuzzyMinSim(float fuzzyMinSim) { - this.fuzzyMinSim = fuzzyMinSim; - } - - /** * Get the prefix length for fuzzy queries. + * * @return Returns the fuzzyPrefixLength. */ @Override @@ -178,6 +173,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer /** * Set the prefix length for fuzzy queries. Default is 0. + * * @param fuzzyPrefixLength The fuzzyPrefixLength to set. */ @Override @@ -186,123 +182,101 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer } /** - * Sets the default slop for phrases. If zero, then exact phrase matches - * are required. Default value is zero. + * Sets the default slop for phrases. If zero, then exact phrase matches are required. Default + * value is zero. */ @Override public void setPhraseSlop(int phraseSlop) { this.phraseSlop = phraseSlop; } - /** - * Gets the default slop for phrases. - */ + /** Gets the default slop for phrases. */ @Override public int getPhraseSlop() { return phraseSlop; } - /** * Set to true to allow leading wildcard characters. - *

    - * When set, * or ? are allowed as - * the first character of a PrefixQuery and WildcardQuery. - * Note that this can produce very slow - * queries on big indexes. - *

    - * Default: false. + * + *

    When set, * or ? are allowed as the first character of a + * PrefixQuery and WildcardQuery. Note that this can produce very slow queries on big indexes. + * + *

    Default: false. */ @Override public void setAllowLeadingWildcard(boolean allowLeadingWildcard) { this.allowLeadingWildcard = allowLeadingWildcard; } - /** - * @see #setAllowLeadingWildcard(boolean) - */ + /** @see #setAllowLeadingWildcard(boolean) */ @Override public boolean getAllowLeadingWildcard() { return allowLeadingWildcard; } /** - * Sets the boolean operator of the QueryParser. - * In default mode (OR_OPERATOR) terms without any modifiers - * are considered optional: for example capital of Hungary is equal to - * capital OR of OR Hungary.
    - * In AND_OPERATOR mode terms are considered to be in conjunction: the - * above mentioned query is parsed as capital AND of AND Hungary + * Sets the boolean operator of the QueryParser. In default mode (OR_OPERATOR) terms + * without any modifiers are considered optional: for example capital of Hungary is + * equal to capital OR of OR Hungary.
    + * In AND_OPERATOR mode terms are considered to be in conjunction: the above + * mentioned query is parsed as capital AND of AND Hungary */ public void setDefaultOperator(Operator op) { this.operator = op; } - - /** - * Gets implicit operator setting, which will be either AND_OPERATOR - * or OR_OPERATOR. - */ + /** Gets implicit operator setting, which will be either AND_OPERATOR or OR_OPERATOR. */ public Operator getDefaultOperator() { return operator; } - /** - * By default QueryParser uses {@link org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_REWRITE} - * when creating a {@link PrefixQuery}, {@link WildcardQuery} or {@link TermRangeQuery}. This implementation is generally preferable because it - * a) Runs faster b) Does not have the scarcity of terms unduly influence score - * c) avoids any {@link TooManyClauses} exception. - * However, if your application really needs to use the - * old-fashioned {@link BooleanQuery} expansion rewriting and the above - * points are not relevant then use this to change - * the rewrite method. + * By default QueryParser uses {@link + * org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_REWRITE} when creating a {@link + * PrefixQuery}, {@link WildcardQuery} or {@link TermRangeQuery}. This implementation is generally + * preferable because it a) Runs faster b) Does not have the scarcity of terms unduly influence + * score c) avoids any {@link TooManyClauses} exception. However, if your application really needs + * to use the old-fashioned {@link BooleanQuery} expansion rewriting and the above points are not + * relevant then use this to change the rewrite method. */ @Override public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method) { multiTermRewriteMethod = method; } - - /** - * @see #setMultiTermRewriteMethod - */ + /** @see #setMultiTermRewriteMethod */ @Override public MultiTermQuery.RewriteMethod getMultiTermRewriteMethod() { return multiTermRewriteMethod; } - /** - * Set locale used by date range parsing, lowercasing, and other - * locale-sensitive operations. - */ + /** Set locale used by date range parsing, lowercasing, and other locale-sensitive operations. */ @Override public void setLocale(Locale locale) { this.locale = locale; } - /** - * Returns current locale, allowing access by subclasses. - */ + /** Returns current locale, allowing access by subclasses. */ @Override public Locale getLocale() { return locale; } - + @Override public void setTimeZone(TimeZone timeZone) { this.timeZone = timeZone; } - + @Override public TimeZone getTimeZone() { return timeZone; } /** - * Sets the default date resolution used by RangeQueries for fields for which no - * specific date resolutions has been set. Field specific resolutions can be set - * with {@link #setDateResolution(String, org.apache.lucene.document.DateTools.Resolution)}. + * Sets the default date resolution used by RangeQueries for fields for which no specific date + * resolutions has been set. Field specific resolutions can be set with {@link + * #setDateResolution(String, org.apache.lucene.document.DateTools.Resolution)}. * * @param dateResolution the default date resolution to set */ @@ -331,10 +305,8 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer } /** - * Returns the date resolution that is used by RangeQueries for the given field. - * Returns null, if no default or field specific date resolution has been set - * for the given field. - * + * Returns the date resolution that is used by RangeQueries for the given field. Returns null, if + * no default or field specific date resolution has been set for the given field. */ public DateTools.Resolution getDateResolution(String fieldName) { if (fieldName == null) { @@ -356,18 +328,17 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer } /** - * @param maxDeterminizedStates the maximum number of states that - * determinizing a regexp query can result in. If the query results in any - * more states a TooComplexToDeterminizeException is thrown. + * @param maxDeterminizedStates the maximum number of states that determinizing a regexp query can + * result in. If the query results in any more states a TooComplexToDeterminizeException is + * thrown. */ public void setMaxDeterminizedStates(int maxDeterminizedStates) { this.maxDeterminizedStates = maxDeterminizedStates; } /** - * @return the maximum number of states that determinizing a regexp query - * can result in. If the query results in any more states a - * TooComplexToDeterminizeException is thrown. + * @return the maximum number of states that determinizing a regexp query can result in. If the + * query results in any more states a TooComplexToDeterminizeException is thrown. */ public int getMaxDeterminizedStates() { return maxDeterminizedStates; @@ -379,7 +350,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer // If this term is introduced by AND, make the preceding term required, // unless it's already prohibited if (clauses.size() > 0 && conj == CONJ_AND) { - BooleanClause c = clauses.get(clauses.size()-1); + BooleanClause c = clauses.get(clauses.size() - 1); if (!c.isProhibited()) clauses.set(clauses.size() - 1, new BooleanClause(c.getQuery(), Occur.MUST)); } @@ -389,15 +360,14 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b) // notice if the input is a OR b, first term is parsed as required; without // this modification a OR b would parsed as +a OR b - BooleanClause c = clauses.get(clauses.size()-1); + BooleanClause c = clauses.get(clauses.size() - 1); if (!c.isProhibited()) clauses.set(clauses.size() - 1, new BooleanClause(c.getQuery(), Occur.SHOULD)); } // We might have been passed a null query; the term might have been // filtered away by the analyzer. - if (q == null) - return; + if (q == null) return; if (operator == OR_OPERATOR) { // We set REQUIRED if we're introduced by AND or +; PROHIBITED if @@ -411,26 +381,23 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer // We set PROHIBITED if we're introduced by NOT or -; We set REQUIRED // if not PROHIBITED and not introduced by OR prohibited = (mods == MOD_NOT); - required = (!prohibited && conj != CONJ_OR); + required = (!prohibited && conj != CONJ_OR); } - if (required && !prohibited) - clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST)); - else if (!required && !prohibited) - clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD)); + if (required && !prohibited) clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST)); + else if (!required && !prohibited) clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD)); else if (!required && prohibited) clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST_NOT)); - else - throw new RuntimeException("Clause cannot be both required and prohibited"); + else throw new RuntimeException("Clause cannot be both required and prohibited"); } /** - * Adds clauses generated from analysis over text containing whitespace. - * There are no operators, so the query's clauses can either be MUST (if the - * default operator is AND) or SHOULD (default OR). + * Adds clauses generated from analysis over text containing whitespace. There are no operators, + * so the query's clauses can either be MUST (if the default operator is AND) or SHOULD (default + * OR). * - * If all of the clauses in the given Query are TermQuery-s, this method flattens the result - * by adding the TermQuery-s individually to the output clause list; otherwise, the given Query - * is added as a single clause including its nested clauses. + *

    If all of the clauses in the given Query are TermQuery-s, this method flattens the result by + * adding the TermQuery-s individually to the output clause list; otherwise, the given Query is + * added as a single clause including its nested clauses. */ protected void addMultiTermClauses(List clauses, Query q) { // We might have been passed a null query; the term might have been @@ -441,19 +408,20 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer boolean allNestedTermQueries = false; if (q instanceof BooleanQuery) { allNestedTermQueries = true; - for (BooleanClause clause : ((BooleanQuery)q).clauses()) { - if ( ! (clause.getQuery() instanceof TermQuery)) { + for (BooleanClause clause : ((BooleanQuery) q).clauses()) { + if (!(clause.getQuery() instanceof TermQuery)) { allNestedTermQueries = false; break; } } } if (allNestedTermQueries) { - clauses.addAll(((BooleanQuery)q).clauses()); + clauses.addAll(((BooleanQuery) q).clauses()); } else { - BooleanClause.Occur occur = operator == OR_OPERATOR ? BooleanClause.Occur.SHOULD : BooleanClause.Occur.MUST; + BooleanClause.Occur occur = + operator == OR_OPERATOR ? BooleanClause.Occur.SHOULD : BooleanClause.Occur.MUST; if (q instanceof BooleanQuery) { - for (BooleanClause clause : ((BooleanQuery)q).clauses()) { + for (BooleanClause clause : ((BooleanQuery) q).clauses()) { clauses.add(newBooleanClause(clause.getQuery(), occur)); } } else { @@ -463,36 +431,41 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer } /** - * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to disallow + * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to + * disallow */ - protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { + protected Query getFieldQuery(String field, String queryText, boolean quoted) + throws ParseException { return newFieldQuery(getAnalyzer(), field, queryText, quoted); } - - /** - * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to disallow - */ - protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, boolean quoted) throws ParseException { - BooleanClause.Occur occur = operator == Operator.AND ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD; - return createFieldQuery(analyzer, occur, field, queryText, quoted || autoGeneratePhraseQueries, phraseSlop); - } /** - * Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}. - * This method may be overridden, for example, to return - * a SpanNearQuery instead of a PhraseQuery. - * - * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to disallow + * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to + * disallow */ - protected Query getFieldQuery(String field, String queryText, int slop) - throws ParseException { + protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, boolean quoted) + throws ParseException { + BooleanClause.Occur occur = + operator == Operator.AND ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD; + return createFieldQuery( + analyzer, occur, field, queryText, quoted || autoGeneratePhraseQueries, phraseSlop); + } + + /** + * Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}. This method may + * be overridden, for example, to return a SpanNearQuery instead of a PhraseQuery. + * + * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to + * disallow + */ + protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { Query query = getFieldQuery(field, queryText, true); if (query instanceof PhraseQuery) { query = addSlopToPhrase((PhraseQuery) query, slop); } else if (query instanceof MultiPhraseQuery) { - MultiPhraseQuery mpq = (MultiPhraseQuery)query; - + MultiPhraseQuery mpq = (MultiPhraseQuery) query; + if (slop != mpq.getSlop()) { query = new MultiPhraseQuery.Builder(mpq).setSlop(slop).build(); } @@ -501,9 +474,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer return query; } - /** - * Rebuild a phrase query with a slop value - */ + /** Rebuild a phrase query with a slop value */ private PhraseQuery addSlopToPhrase(PhraseQuery query, int slop) { PhraseQuery.Builder builder = new PhraseQuery.Builder(); builder.setSlop(slop); @@ -516,19 +487,17 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer return builder.build(); } - protected Query getRangeQuery(String field, - String part1, - String part2, - boolean startInclusive, - boolean endInclusive) throws ParseException - { + protected Query getRangeQuery( + String field, String part1, String part2, boolean startInclusive, boolean endInclusive) + throws ParseException { DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, locale); df.setLenient(true); DateTools.Resolution resolution = getDateResolution(field); - + try { part1 = DateTools.dateToString(df.parse(part1), resolution); - } catch (Exception e) { } + } catch (Exception e) { + } try { Date d2 = df.parse(part2); @@ -545,27 +514,30 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer d2 = cal.getTime(); } part2 = DateTools.dateToString(d2, resolution); - } catch (Exception e) { } + } catch (Exception e) { + } return newRangeQuery(field, part1, part2, startInclusive, endInclusive); } - /** - * Builds a new BooleanClause instance - * @param q sub query - * @param occur how this clause should occur when matching documents - * @return new BooleanClause instance - */ + /** + * Builds a new BooleanClause instance + * + * @param q sub query + * @param occur how this clause should occur when matching documents + * @return new BooleanClause instance + */ protected BooleanClause newBooleanClause(Query q, BooleanClause.Occur occur) { return new BooleanClause(q, occur); } /** * Builds a new PrefixQuery instance + * * @param prefix Prefix term * @return new PrefixQuery instance */ - protected Query newPrefixQuery(Term prefix){ + protected Query newPrefixQuery(Term prefix) { PrefixQuery query = new PrefixQuery(prefix); query.setRewriteMethod(multiTermRewriteMethod); return query; @@ -573,18 +545,19 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer /** * Builds a new RegexpQuery instance + * * @param regexp Regexp term * @return new RegexpQuery instance */ protected Query newRegexpQuery(Term regexp) { - RegexpQuery query = new RegexpQuery(regexp, RegExp.ALL, - maxDeterminizedStates); + RegexpQuery query = new RegexpQuery(regexp, RegExp.ALL, maxDeterminizedStates); query.setRewriteMethod(multiTermRewriteMethod); return query; } /** * Builds a new FuzzyQuery instance + * * @param term Term * @param minimumSimilarity minimum similarity * @param prefixLength prefix length @@ -593,13 +566,14 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) { // FuzzyQuery doesn't yet allow constant score rewrite String text = term.text(); - int numEdits = FuzzyQuery.floatToEdits(minimumSimilarity, - text.codePointCount(0, text.length())); - return new FuzzyQuery(term,numEdits,prefixLength); + int numEdits = + FuzzyQuery.floatToEdits(minimumSimilarity, text.codePointCount(0, text.length())); + return new FuzzyQuery(term, numEdits, prefixLength); } /** * Builds a new {@link TermRangeQuery} instance + * * @param field Field * @param part1 min * @param part2 max @@ -607,23 +581,25 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer * @param endInclusive true if the end of the range is inclusive * @return new {@link TermRangeQuery} instance */ - protected Query newRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) { + protected Query newRangeQuery( + String field, String part1, String part2, boolean startInclusive, boolean endInclusive) { final BytesRef start; final BytesRef end; - + if (part1 == null) { start = null; } else { start = getAnalyzer().normalize(field, part1); } - + if (part2 == null) { end = null; } else { end = getAnalyzer().normalize(field, part2); } - - final TermRangeQuery query = new TermRangeQuery(field, start, end, startInclusive, endInclusive); + + final TermRangeQuery query = + new TermRangeQuery(field, start, end, startInclusive, endInclusive); query.setRewriteMethod(multiTermRewriteMethod); return query; @@ -631,6 +607,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer /** * Builds a new MatchAllDocsQuery instance + * * @return new MatchAllDocsQuery instance */ protected Query newMatchAllDocsQuery() { @@ -639,6 +616,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer /** * Builds a new WildcardQuery instance + * * @param t wildcard term * @return new WildcardQuery instance */ @@ -649,52 +627,48 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer } /** - * Factory method for generating query, given a set of clauses. - * By default creates a boolean query composed of clauses passed in. + * Factory method for generating query, given a set of clauses. By default creates a boolean query + * composed of clauses passed in. * - * Can be overridden by extending classes, to modify query being - * returned. - * - * @param clauses List that contains {@link org.apache.lucene.search.BooleanClause} instances - * to join. + *

    Can be overridden by extending classes, to modify query being returned. * + * @param clauses List that contains {@link org.apache.lucene.search.BooleanClause} instances to + * join. * @return Resulting {@link org.apache.lucene.search.Query} object. - * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to disallow + * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to + * disallow */ protected Query getBooleanQuery(List clauses) throws ParseException { - if (clauses.size()==0) { + if (clauses.size() == 0) { return null; // all clause words were filtered away by the analyzer. } BooleanQuery.Builder query = newBooleanQuery(); - for(final BooleanClause clause: clauses) { + for (final BooleanClause clause : clauses) { query.add(clause); } return query.build(); } /** - * Factory method for generating a query. Called when parser - * parses an input term token that contains one or more wildcard - * characters (? and *), but is not a prefix term token (one - * that has just a single * character at the end) - *

    - * Depending on settings, prefix term may be lower-cased - * automatically. It will not go through the default Analyzer, - * however, since normal Analyzers are unlikely to work properly - * with wildcard templates. - *

    - * Can be overridden by extending classes, to provide custom handling for - * wildcard queries, which may be necessary due to missing analyzer calls. + * Factory method for generating a query. Called when parser parses an input term token that + * contains one or more wildcard characters (? and *), but is not a prefix term token (one that + * has just a single * character at the end) + * + *

    Depending on settings, prefix term may be lower-cased automatically. It will not go through + * the default Analyzer, however, since normal Analyzers are unlikely to work properly with + * wildcard templates. + * + *

    Can be overridden by extending classes, to provide custom handling for wildcard queries, + * which may be necessary due to missing analyzer calls. * * @param field Name of the field query will use. - * @param termStr Term token that contains one or more wild card - * characters (? or *), but is not simple prefix term - * + * @param termStr Term token that contains one or more wild card characters (? or *), but is not + * simple prefix term * @return Resulting {@link org.apache.lucene.search.Query} built for the term - * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to disallow + * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to + * disallow */ - protected Query getWildcardQuery(String field, String termStr) throws ParseException - { + protected Query getWildcardQuery(String field, String termStr) throws ParseException { if ("*".equals(field)) { if ("*".equals(termStr)) return newMatchAllDocsQuery(); } @@ -713,18 +687,18 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer BytesRefBuilder sb = new BytesRefBuilder(); int last = 0; - while (wildcardMatcher.find()){ + while (wildcardMatcher.find()) { if (wildcardMatcher.start() > 0) { String chunk = termStr.substring(last, wildcardMatcher.start()); BytesRef normalized = getAnalyzer().normalize(field, chunk); sb.append(normalized); } - //append the matched group - without normalizing + // append the matched group - without normalizing sb.append(new BytesRef(wildcardMatcher.group())); last = wildcardMatcher.end(); } - if (last < termStr.length()){ + if (last < termStr.length()) { String chunk = termStr.substring(last); BytesRef normalized = getAnalyzer().normalize(field, chunk); sb.append(normalized); @@ -733,27 +707,23 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer } /** - * Factory method for generating a query. Called when parser - * parses an input term token that contains a regular expression - * query. - *

    - * Depending on settings, pattern term may be lower-cased - * automatically. It will not go through the default Analyzer, - * however, since normal Analyzers are unlikely to work properly - * with regular expression templates. - *

    - * Can be overridden by extending classes, to provide custom handling for - * regular expression queries, which may be necessary due to missing analyzer - * calls. + * Factory method for generating a query. Called when parser parses an input term token that + * contains a regular expression query. + * + *

    Depending on settings, pattern term may be lower-cased automatically. It will not go through + * the default Analyzer, however, since normal Analyzers are unlikely to work properly with + * regular expression templates. + * + *

    Can be overridden by extending classes, to provide custom handling for regular expression + * queries, which may be necessary due to missing analyzer calls. * * @param field Name of the field query will use. * @param termStr Term token that contains a regular expression - * * @return Resulting {@link org.apache.lucene.search.Query} built for the term - * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to disallow + * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to + * disallow */ - protected Query getRegexpQuery(String field, String termStr) throws ParseException - { + protected Query getRegexpQuery(String field, String termStr) throws ParseException { // We need to pass the whole string to #normalize, which will not work with // custom attribute factories for the binary term impl, and may not work // with some analyzers @@ -763,30 +733,27 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer } /** - * Factory method for generating a query (similar to - * {@link #getWildcardQuery}). Called when parser parses an input term - * token that uses prefix notation; that is, contains a single '*' wildcard - * character as its last character. Since this is a special case - * of generic wildcard term, and such a query can be optimized easily, - * this usually results in a different query object. - *

    - * Depending on settings, a prefix term may be lower-cased - * automatically. It will not go through the default Analyzer, - * however, since normal Analyzers are unlikely to work properly + * Factory method for generating a query (similar to {@link #getWildcardQuery}). Called when + * parser parses an input term token that uses prefix notation; that is, contains a single '*' + * wildcard character as its last character. Since this is a special case of generic wildcard + * term, and such a query can be optimized easily, this usually results in a different query + * object. + * + *

    Depending on settings, a prefix term may be lower-cased automatically. It will not go + * through the default Analyzer, however, since normal Analyzers are unlikely to work properly * with wildcard templates. - *

    - * Can be overridden by extending classes, to provide custom handling for - * wild card queries, which may be necessary due to missing analyzer calls. + * + *

    Can be overridden by extending classes, to provide custom handling for wild card queries, + * which may be necessary due to missing analyzer calls. * * @param field Name of the field query will use. - * @param termStr Term token to use for building term for the query - * (without trailing '*' character!) - * + * @param termStr Term token to use for building term for the query (without trailing '*' + * character!) * @return Resulting {@link org.apache.lucene.search.Query} built for the term - * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to disallow + * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to + * disallow */ - protected Query getPrefixQuery(String field, String termStr) throws ParseException - { + protected Query getPrefixQuery(String field, String termStr) throws ParseException { if (!allowLeadingWildcard && termStr.startsWith("*")) throw new ParseException("'*' not allowed as first character in PrefixQuery"); BytesRef term = getAnalyzer().normalize(field, termStr); @@ -794,38 +761,44 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer return newPrefixQuery(t); } - /** - * Factory method for generating a query (similar to - * {@link #getWildcardQuery}). Called when parser parses - * an input term token that has the fuzzy suffix (~) appended. + /** + * Factory method for generating a query (similar to {@link #getWildcardQuery}). Called when + * parser parses an input term token that has the fuzzy suffix (~) appended. * * @param field Name of the field query will use. * @param termStr Term token to use for building term for the query - * * @return Resulting {@link org.apache.lucene.search.Query} built for the term - * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to disallow + * @exception org.apache.lucene.queryparser.classic.ParseException throw in overridden method to + * disallow */ - protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException - { + protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) + throws ParseException { BytesRef term = getAnalyzer().normalize(field, termStr); Term t = new Term(field, term); return newFuzzyQuery(t, minSimilarity, fuzzyPrefixLength); } - - // extracted from the .jj grammar - Query handleBareTokenQuery(String qfield, Token term, Token fuzzySlop, boolean prefix, boolean wildcard, boolean fuzzy, boolean regexp) throws ParseException { + // extracted from the .jj grammar + Query handleBareTokenQuery( + String qfield, + Token term, + Token fuzzySlop, + boolean prefix, + boolean wildcard, + boolean fuzzy, + boolean regexp) + throws ParseException { Query q; - String termImage=discardEscapeChar(term.image); + String termImage = discardEscapeChar(term.image); if (wildcard) { q = getWildcardQuery(qfield, term.image); } else if (prefix) { - q = getPrefixQuery(qfield, - discardEscapeChar(term.image.substring - (0, term.image.length()-1))); + q = + getPrefixQuery( + qfield, discardEscapeChar(term.image.substring(0, term.image.length() - 1))); } else if (regexp) { - q = getRegexpQuery(qfield, term.image.substring(1, term.image.length()-1)); + q = getRegexpQuery(qfield, term.image.substring(1, term.image.length() - 1)); } else if (fuzzy) { q = handleBareFuzzy(qfield, fuzzySlop, termImage); } else { @@ -834,15 +807,16 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer return q; } - Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) - throws ParseException { + Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException { Query q; float fms = fuzzyMinSim; try { fms = Float.parseFloat(fuzzySlop.image.substring(1)); - } catch (Exception ignored) { } - if(fms < 0.0f){ - throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !"); + } catch (Exception ignored) { + } + if (fms < 0.0f) { + throw new ParseException( + "Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !"); } else if (fms >= 1.0f && fms != (int) fms) { throw new ParseException("Fractional edit distances are not allowed!"); } @@ -852,14 +826,15 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer // extracted from the .jj grammar Query handleQuotedTerm(String qfield, Token term, Token fuzzySlop) throws ParseException { - int s = phraseSlop; // default + int s = phraseSlop; // default if (fuzzySlop != null) { try { - s = (int)Float.parseFloat(fuzzySlop.image.substring(1)); + s = (int) Float.parseFloat(fuzzySlop.image.substring(1)); + } catch (Exception ignored) { } - catch (Exception ignored) { } } - return getFieldQuery(qfield, discardEscapeChar(term.image.substring(1, term.image.length()-1)), s); + return getFieldQuery( + qfield, discardEscapeChar(term.image.substring(1, term.image.length() - 1)), s); } // extracted from the .jj grammar @@ -868,11 +843,10 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer float f = (float) 1.0; try { f = Float.parseFloat(boost.image); - } - catch (Exception ignored) { - /* Should this be handled somehow? (defaults to "no boost", if - * boost number is invalid) - */ + } catch (Exception ignored) { + /* Should this be handled somehow? (defaults to "no boost", if + * boost number is invalid) + */ } // avoid boosting null queries, such as those caused by stop words @@ -883,15 +857,12 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer return q; } - - /** - * Returns a String where the escape char has been - * removed, or kept only once if there was a double escape. - * - * Supports escaped unicode characters, e. g. translates - * \\u0041 to A. + * Returns a String where the escape char has been removed, or kept only once if there was a + * double escape. * + *

    Supports escaped unicode characters, e. g. translates \\u0041 to A + * . */ String discardEscapeChar(String input) throws ParseException { // Create char array to hold unescaped char sequence @@ -919,7 +890,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer codePoint += hexToInt(curChar) * codePointMultiplier; codePointMultiplier >>>= 4; if (codePointMultiplier == 0) { - output[length++] = (char)codePoint; + output[length++] = (char) codePoint; codePoint = 0; } } else if (lastCharWasEscapeChar) { @@ -957,7 +928,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer static final int hexToInt(char c) throws ParseException { if ('0' <= c && c <= '9') { return c - '0'; - } else if ('a' <= c && c <= 'f'){ + } else if ('a' <= c && c <= 'f') { return c - 'a' + 10; } else if ('A' <= c && c <= 'F') { return c - 'A' + 10; @@ -967,8 +938,8 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer } /** - * Returns a String where those characters that QueryParser - * expects to be escaped are escaped by a preceding \. + * Returns a String where those characters that QueryParser expects to be escaped are escaped by a + * preceding \. */ public static String escape(String s) { StringBuilder sb = new StringBuilder(); @@ -976,13 +947,12 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer char c = s.charAt(i); // These characters are part of the query syntax and must be escaped if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' || c == ':' - || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~' - || c == '*' || c == '?' || c == '|' || c == '&' || c == '/') { + || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~' + || c == '*' || c == '?' || c == '|' || c == '&' || c == '/') { sb.append('\\'); } sb.append(c); } return sb.toString(); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/package-info.java index 9f77eb9b7e1..cd446f42cee 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/package-info.java @@ -15,291 +15,382 @@ * limitations under the License. */ - /** * A simple query parser implemented with JavaCC. * - *

    Note that JavaCC defines lots of public classes, methods and fields - * that do not need to be public.  These clutter the documentation.  - * Sorry. - *

    Note that because JavaCC defines a class named Token, org.apache.lucene.analysis.Token - * must always be fully qualified in source code in this package. + *

    Note that JavaCC defines lots of public classes, methods and fields that do not need to be + * public.  These clutter the documentation.  Sorry. * - *

    NOTE: {@link org.apache.lucene.queryparser.flexible.standard} has an alternative queryparser that matches the syntax of this one, but is more modular, - * enabling substantial customization to how a query is created. + *

    Note that because JavaCC defines a class named Token, + * org.apache.lucene.analysis.Token must always be fully qualified in source code in this + * package. + * + *

    NOTE: {@link org.apache.lucene.queryparser.flexible.standard} has an alternative + * queryparser that matches the syntax of this one, but is more modular, enabling substantial + * customization to how a query is created. * *

    Query Parser Syntax

    * *
    + * * + * *
    - * - * + * + *

    + * *

    Overview

    + * *
    - *

    Although Lucene provides the ability to create your own - * queries through its API, it also provides a rich query - * language through the Query Parser, a lexer which - * interprets a string into a Lucene Query using JavaCC. - *

    Generally, the query parser syntax may change from - * release to release. This page describes the syntax as of - * the current release. If you are using a different - * version of Lucene, please consult the copy of - * docs/queryparsersyntax.html that was distributed - * with the version you are using. - *

    - * Before choosing to use the provided Query Parser, please consider the following: - *

      - * - *
    1. If you are programmatically generating a query string and then - * parsing it with the query parser then you should seriously consider building - * your queries directly with the query API. In other words, the query - * parser is designed for human-entered text, not for program-generated - * text.
    2. - * - * - *
    3. Untokenized fields are best added directly to queries, and not - * through the query parser. If a field's values are generated programmatically - * by the application, then so should query clauses for this field. - * An analyzer, which the query parser uses, is designed to convert human-entered - * text to terms. Program-generated values, like dates, keywords, etc., - * should be consistently program-generated.
    4. - * - * - *
    5. In a query form, fields which are general text should use the query - * parser. All others, such as date ranges, keywords, etc. are better added - * directly through the query API. A field with a limit set of values, - * that can be specified with a pull-down menu should not be added to a - * query string which is subsequently parsed, but rather added as a - * TermQuery clause.
    6. - * + * + *

      Although Lucene provides the ability to create your own queries through its API, it also + * provides a rich query language through the Query Parser, a lexer which interprets a string into a + * Lucene Query using JavaCC. + * + *

      Generally, the query parser syntax may change from release to release. This page describes the + * syntax as of the current release. If you are using a different version of Lucene, please consult + * the copy of docs/queryparsersyntax.html that was distributed with + * the version you are using. + * + *

      Before choosing to use the provided Query Parser, please consider the following: + * + *

        + *
      1. If you are programmatically generating a query string and then parsing it with the query + * parser then you should seriously consider building your queries directly with the query + * API. In other words, the query parser is designed for human-entered text, not for + * program-generated text. + *
      2. Untokenized fields are best added directly to queries, and not through the query parser. If + * a field's values are generated programmatically by the application, then so should query + * clauses for this field. An analyzer, which the query parser uses, is designed to convert + * human-entered text to terms. Program-generated values, like dates, keywords, etc., should + * be consistently program-generated. + *
      3. In a query form, fields which are general text should use the query parser. All others, + * such as date ranges, keywords, etc. are better added directly through the query API. A + * field with a limit set of values, that can be specified with a pull-down menu should not be + * added to a query string which is subsequently parsed, but rather added as a TermQuery + * clause. *
      - * + * *
    - * - * - * + * + *

    + * *

    Terms

    + * *
    - *

    A query is broken up into terms and operators. There are two types of terms: Single Terms and Phrases. + * + *

    A query is broken up into terms and operators. There are two types of terms: Single Terms and + * Phrases. + * *

    A Single Term is a single word such as "test" or "hello". + * *

    A Phrase is a group of words surrounded by double quotes such as "hello dolly". - *

    Multiple terms can be combined together with Boolean operators to form a more complex query (see below). - *

    Note: The analyzer used to create the index will be used on the terms and phrases in the query string. - * So it is important to choose an analyzer that will not interfere with the terms used in the query string. - *

    - * - * - * + * + *

    Multiple terms can be combined together with Boolean operators to form a more complex query + * (see below). + * + *

    Note: The analyzer used to create the index will be used on the terms and phrases in the query + * string. So it is important to choose an analyzer that will not interfere with the terms used in + * the query string. + * + *

    + * *

    Fields

    + * *
    - *

    Lucene supports fielded data. When performing a search you can either specify a field, or use the default field. The field names and default field is implementation specific. - *

    You can search any field by typing the field name followed by a colon ":" and then the term you are looking for. - *

    As an example, let's assume a Lucene index contains two fields, title and text and text is the default field. - * If you want to find the document entitled "The Right Way" which contains the text "don't go this way", you can enter: + * + *

    Lucene supports fielded data. When performing a search you can either specify a field, or use + * the default field. The field names and default field is implementation specific. + * + *

    You can search any field by typing the field name followed by a colon ":" and then the term + * you are looking for. + * + *

    As an example, let's assume a Lucene index contains two fields, title and text and text is the + * default field. If you want to find the document entitled "The Right Way" which contains the text + * "don't go this way", you can enter: + * *

    title:"The Right Way" AND text:go
    + * *

    or + * *

    title:"The Right Way" AND go
    + * *

    Since text is the default field, the field indicator is not required. + * *

    Note: The field is only valid for the term that it directly precedes, so the query + * *

    title:The Right Way
    - *

    Will only find "The" in the title field. It will find "Right" and "Way" in the default field (in this case the text field). - *

    - * - * - * + * + *

    Will only find "The" in the title field. It will find "Right" and "Way" in the default field + * (in this case the text field). + * + *

    + * *

    Term Modifiers

    + * *
    - *

    Lucene supports modifying query terms to provide a wide range of searching options. - * + * + *

    Lucene supports modifying query terms to provide a wide range of searching options. + * *

    Wildcard Searches

    - *

    Lucene supports single and multiple character wildcard searches within single terms - * (not within phrase queries). + * + *

    Lucene supports single and multiple character wildcard searches within single terms (not + * within phrase queries). + * *

    To perform a single character wildcard search use the "?" symbol. + * *

    To perform a multiple character wildcard search use the "*" symbol. - *

    The single character wildcard search looks for terms that match that with the single character replaced. For example, to search for "text" or "test" you can use the search: + * + *

    The single character wildcard search looks for terms that match that with the single character + * replaced. For example, to search for "text" or "test" you can use the search: + * *

    te?t
    - *

    Multiple character wildcard searches looks for 0 or more characters. For example, to search for test, tests or tester, you can use the search: + * + *

    Multiple character wildcard searches looks for 0 or more characters. For example, to search + * for test, tests or tester, you can use the search: + * *

    test*
    + * *

    You can also use the wildcard searches in the middle of a term. + * *

    te*t
    - *

    Note: You cannot use a * or ? symbol as the first character of a search. - * + * + *

    Note: You cannot use a * or ? symbol as the first character of a search. + * *

    Regular Expression Searches

    - *

    Lucene supports regular expression searches matching a pattern between forward slashes "/". The syntax may change across releases, but the current supported - * syntax is documented in the {@link org.apache.lucene.util.automaton.RegExp RegExp} class. For example to find documents containing "moat" or "boat": - * + * + *

    Lucene supports regular expression searches matching a pattern between forward slashes "/". + * The syntax may change across releases, but the current supported syntax is documented in the + * {@link org.apache.lucene.util.automaton.RegExp RegExp} class. For example to find documents + * containing "moat" or "boat": + * *

    /[mb]oat/
    + * * + * *

    Fuzzy Searches

    - *

    Lucene supports fuzzy searches based on Damerau-Levenshtein Distance. To do a fuzzy search use the tilde, "~", symbol at the end of a Single word Term. For example to search for a term similar in spelling to "roam" use the fuzzy search: + * + *

    Lucene supports fuzzy searches based on Damerau-Levenshtein Distance. To do a fuzzy search use + * the tilde, "~", symbol at the end of a Single word Term. For example to search for a term similar + * in spelling to "roam" use the fuzzy search: + * *

    roam~
    + * *

    This search will find terms like foam and roams. - *

    An additional (optional) parameter can specify the maximum number of edits allowed. The value is between 0 and 2, For example: + * + *

    An additional (optional) parameter can specify the maximum number of edits allowed. The value + * is between 0 and 2, For example: + * *

    roam~1
    + * *

    The default that is used if the parameter is not given is 2 edit distances. - *

    Previously, a floating point value was allowed here. This syntax is considered deprecated and will be removed in Lucene 5.0 - * + * + *

    Previously, a floating point value was allowed here. This syntax is considered deprecated and + * will be removed in Lucene 5.0 + * *

    Proximity Searches

    - *

    Lucene supports finding words are a within a specific distance away. To do a proximity search use the tilde, "~", symbol at the end of a Phrase. For example to search for a "apache" and "jakarta" within 10 words of each other in a document use the search: + * + *

    Lucene supports finding words are a within a specific distance away. To do a proximity search + * use the tilde, "~", symbol at the end of a Phrase. For example to search for a "apache" and + * "jakarta" within 10 words of each other in a document use the search: + * *

    "jakarta apache"~10
    + * * + * *

    Range Searches

    - *

    Range Queries allow one to match documents whose field(s) values - * are between the lower and upper bound specified by the Range Query. - * Range Queries can be inclusive or exclusive of the upper and lower bounds. - * Sorting is done lexicographically. + * + *

    Range Queries allow one to match documents whose field(s) values are between the lower and + * upper bound specified by the Range Query. Range Queries can be inclusive or exclusive of the + * upper and lower bounds. Sorting is done lexicographically. + * *

    mod_date:[20020101 TO 20030101]
    - *

    This will find documents whose mod_date fields have values between 20020101 and 20030101, inclusive. - * Note that Range Queries are not reserved for date fields. You could also use range queries with non-date fields: + * + *

    This will find documents whose mod_date fields have values between 20020101 and 20030101, + * inclusive. Note that Range Queries are not reserved for date fields. You could also use range + * queries with non-date fields: + * *

    title:{Aida TO Carmen}
    - *

    This will find all documents whose titles are between Aida and Carmen, but not including Aida and Carmen. - *

    Inclusive range queries are denoted by square brackets. Exclusive range queries are denoted by - * curly brackets. - * + * + *

    This will find all documents whose titles are between Aida and Carmen, but not including Aida + * and Carmen. + * + *

    Inclusive range queries are denoted by square brackets. Exclusive range queries are denoted by + * curly brackets. + * *

    Boosting a Term

    - *

    Lucene provides the relevance level of matching documents based on the terms found. To boost a term use the caret, "^", symbol with a boost factor (a number) at the end of the term you are searching. The higher the boost factor, the more relevant the term will be. - *

    Boosting allows you to control the relevance of a document by boosting its term. For example, if you are searching for + * + *

    Lucene provides the relevance level of matching documents based on the terms found. To boost a + * term use the caret, "^", symbol with a boost factor (a number) at the end of the term you are + * searching. The higher the boost factor, the more relevant the term will be. + * + *

    Boosting allows you to control the relevance of a document by boosting its term. For example, + * if you are searching for + * *

    jakarta apache
    - *

    and you want the term "jakarta" to be more relevant boost it using the ^ symbol along with the boost factor next to the term. - * You would type: + * + *

    and you want the term "jakarta" to be more relevant boost it using the ^ symbol along with the + * boost factor next to the term. You would type: + * *

    jakarta^4 apache
    - *

    This will make documents with the term jakarta appear more relevant. You can also boost Phrase Terms as in the example: + * + *

    This will make documents with the term jakarta appear more relevant. You can also boost Phrase + * Terms as in the example: + * *

    "jakarta apache"^4 "Apache Lucene"
    - *

    By default, the boost factor is 1. Although the boost factor must be positive, it can be less than 1 (e.g. 0.2) - *

    - * - * - * - * + * + *

    By default, the boost factor is 1. Although the boost factor must be positive, it can be less + * than 1 (e.g. 0.2) + * + *

    + * *

    Boolean Operators

    + * *
    - *

    Boolean operators allow terms to be combined through logic operators. - * Lucene supports AND, "+", OR, NOT and "-" as Boolean operators(Note: Boolean operators must be ALL CAPS). - * + * + *

    Boolean operators allow terms to be combined through logic operators. Lucene supports AND, + * "+", OR, NOT and "-" as Boolean operators(Note: Boolean operators must be ALL CAPS). + * *

    OR

    - *

    The OR operator is the default conjunction operator. This means that if there is no Boolean operator between two terms, the OR operator is used. - * The OR operator links two terms and finds a matching document if either of the terms exist in a document. This is equivalent to a union using sets. - * The symbol || can be used in place of the word OR. + * + *

    The OR operator is the default conjunction operator. This means that if there is no Boolean + * operator between two terms, the OR operator is used. The OR operator links two terms and finds a + * matching document if either of the terms exist in a document. This is equivalent to a union using + * sets. The symbol || can be used in place of the word OR. + * *

    To search for documents that contain either "jakarta apache" or just "jakarta" use the query: + * *

    "jakarta apache" jakarta
    + * *

    or + * *

    "jakarta apache" OR jakarta
    + * * + * *

    AND

    - *

    The AND operator matches documents where both terms exist anywhere in the text of a single document. - * This is equivalent to an intersection using sets. The symbol && can be used in place of the word AND. - *

    To search for documents that contain "jakarta apache" and "Apache Lucene" use the query: + * + *

    The AND operator matches documents where both terms exist anywhere in the text of a single + * document. This is equivalent to an intersection using sets. The symbol && can be used in + * place of the word AND. + * + *

    To search for documents that contain "jakarta apache" and "Apache Lucene" use the query: + * *

    "jakarta apache" AND "Apache Lucene"
    + * * + * *

    +

    - *

    The "+" or required operator requires that the term after the "+" symbol exist somewhere in a the field of a single document. + * + *

    The "+" or required operator requires that the term after the "+" symbol exist somewhere in a + * the field of a single document. + * *

    To search for documents that must contain "jakarta" and may contain "lucene" use the query: + * *

    +jakarta lucene
    + * * + * *

    NOT

    - *

    The NOT operator excludes documents that contain the term after NOT. - * This is equivalent to a difference using sets. The symbol ! can be used in place of the word NOT. - *

    To search for documents that contain "jakarta apache" but not "Apache Lucene" use the query: + * + *

    The NOT operator excludes documents that contain the term after NOT. This is equivalent to a + * difference using sets. The symbol ! can be used in place of the word NOT. + * + *

    To search for documents that contain "jakarta apache" but not "Apache Lucene" use the query: + * *

    "jakarta apache" NOT "Apache Lucene"
    - *

    Note: The NOT operator cannot be used with just one term. For example, the following search will return no results: + * + *

    Note: The NOT operator cannot be used with just one term. For example, the following search + * will return no results: + * *

    NOT "jakarta apache"
    + * * + * *

    -

    + * *

    The "-" or prohibit operator excludes documents that contain the term after the "-" symbol. - *

    To search for documents that contain "jakarta apache" but not "Apache Lucene" use the query: + * + *

    To search for documents that contain "jakarta apache" but not "Apache Lucene" use the query: + * *

    "jakarta apache" -"Apache Lucene"
    + * *
    - * - * - * + * + *

    + * *

    Grouping

    + * *
    - *

    Lucene supports using parentheses to group clauses to form sub queries. This can be very useful if you want to control the boolean logic for a query. + * + *

    Lucene supports using parentheses to group clauses to form sub queries. This can be very + * useful if you want to control the boolean logic for a query. + * *

    To search for either "jakarta" or "apache" and "website" use the query: + * *

    (jakarta OR apache) AND website
    - *

    This eliminates any confusion and makes sure you that website must exist and either term jakarta or apache may exist. - *

    - * - * - * + * + *

    This eliminates any confusion and makes sure you that website must exist and either term + * jakarta or apache may exist. + * + *

    + * *

    Field Grouping

    + * *
    + * *

    Lucene supports using parentheses to group multiple clauses to a single field. - *

    To search for a title that contains both the word "return" and the phrase "pink panther" use the query: + * + *

    To search for a title that contains both the word "return" and the phrase "pink panther" use + * the query: + * *

    title:(+return +"pink panther")
    + * *
    - * - * - * + * + *

    + * *

    Escaping Special Characters

    + * *
    - *

    Lucene supports escaping special characters that are part of the query syntax. The current list special characters are + * + *

    Lucene supports escaping special characters that are part of the query syntax. The current + * list special characters are + * *

    + - && || ! ( ) { } [ ] ^ " ~ * ? : \ / - *

    To escape these character use the \ before the character. For example to search for (1+1):2 use the query: + * + *

    To escape these character use the \ before the character. For example to search for (1+1):2 + * use the query: + * *

    \(1\+1\)\:2
    + * *
    */ package org.apache.lucene.queryparser.classic; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java index 3be0a5467fb..1c1ef80b048 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Objects; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; @@ -47,25 +46,19 @@ import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; /** - * QueryParser which permits complex phrase query syntax eg "(john jon - * jonathan~) peters*". - *

    - * Performs potentially multiple passes over Query text to parse any nested - * logic in PhraseQueries. - First pass takes any PhraseQuery content between - * quotes and stores for subsequent pass. All other query content is parsed as - * normal - Second pass parses any stored PhraseQuery content, checking all - * embedded clauses are referring to the same field and therefore can be - * rewritten as Span queries. All PhraseQuery clauses are expressed as - * ComplexPhraseQuery objects - *

    - *

    - * This could arguably be done in one pass using a new QueryParser but here I am - * working within the constraints of the existing parser as a base class. This - * currently simply feeds all phrase content through an analyzer to select - * phrase terms - any "special" syntax such as * ~ * etc are not given special - * status - *

    - * + * QueryParser which permits complex phrase query syntax eg "(john jon jonathan~) peters*". + * + *

    Performs potentially multiple passes over Query text to parse any nested logic in + * PhraseQueries. - First pass takes any PhraseQuery content between quotes and stores for + * subsequent pass. All other query content is parsed as normal - Second pass parses any stored + * PhraseQuery content, checking all embedded clauses are referring to the same field and therefore + * can be rewritten as Span queries. All PhraseQuery clauses are expressed as ComplexPhraseQuery + * objects + * + *

    This could arguably be done in one pass using a new QueryParser but here I am working within + * the constraints of the existing parser as a base class. This currently simply feeds all phrase + * content through an analyzer to select phrase terms - any "special" syntax such as * ~ * etc are + * not given special status */ public class ComplexPhraseQueryParser extends QueryParser { private ArrayList complexPhrases = null; @@ -75,8 +68,8 @@ public class ComplexPhraseQueryParser extends QueryParser { private boolean inOrder = true; /** - * When inOrder is true, the search terms must - * exists in the documents as the same order as in query. + * When inOrder is true, the search terms must exists in the documents as the same + * order as in query. * * @param inOrder parameter to choose between ordered or un-ordered proximity search */ @@ -130,7 +123,8 @@ public class ComplexPhraseQueryParser extends QueryParser { // set of syntax restrictions (i.e. all fields must be same) isPass2ResolvingPhrases = true; try { - for (Iterator iterator = complexPhrases.iterator(); iterator.hasNext();) { + for (Iterator iterator = complexPhrases.iterator(); + iterator.hasNext(); ) { currentPhraseQuery = iterator.next(); // in each phrase, now parse the contents between quotes as a // separate parse operation @@ -159,18 +153,20 @@ public class ComplexPhraseQueryParser extends QueryParser { } // Helper method used to report on any clauses that appear in query syntax - private void checkPhraseClauseIsForSameField(String field) - throws ParseException { + private void checkPhraseClauseIsForSameField(String field) throws ParseException { if (!field.equals(currentPhraseQuery.field)) { - throw new ParseException("Cannot have clause for field \"" + field - + "\" nested in phrase " + " for field \"" + currentPhraseQuery.field - + "\""); + throw new ParseException( + "Cannot have clause for field \"" + + field + + "\" nested in phrase " + + " for field \"" + + currentPhraseQuery.field + + "\""); } } @Override - protected Query getWildcardQuery(String field, String termStr) - throws ParseException { + protected Query getWildcardQuery(String field, String termStr) throws ParseException { if (isPass2ResolvingPhrases) { checkPhraseClauseIsForSameField(field); } @@ -178,8 +174,9 @@ public class ComplexPhraseQueryParser extends QueryParser { } @Override - protected Query getRangeQuery(String field, String part1, String part2, - boolean startInclusive, boolean endInclusive) throws ParseException { + protected Query getRangeQuery( + String field, String part1, String part2, boolean startInclusive, boolean endInclusive) + throws ParseException { if (isPass2ResolvingPhrases) { checkPhraseClauseIsForSameField(field); } @@ -187,8 +184,8 @@ public class ComplexPhraseQueryParser extends QueryParser { } @Override - protected Query newRangeQuery(String field, String part1, String part2, - boolean startInclusive, boolean endInclusive) { + protected Query newRangeQuery( + String field, String part1, String part2, boolean startInclusive, boolean endInclusive) { RewriteMethod originalRewriteMethod = getMultiTermRewriteMethod(); try { if (isPass2ResolvingPhrases) { @@ -201,8 +198,8 @@ public class ComplexPhraseQueryParser extends QueryParser { } @Override - protected Query getFuzzyQuery(String field, String termStr, - float minSimilarity) throws ParseException { + protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) + throws ParseException { if (isPass2ResolvingPhrases) { checkPhraseClauseIsForSameField(field); } @@ -225,8 +222,8 @@ public class ComplexPhraseQueryParser extends QueryParser { private final Query[] contents = new Query[1]; - public ComplexPhraseQuery(String field, String phrasedQueryStringContents, - int slopFactor, boolean inOrder) { + public ComplexPhraseQuery( + String field, String phrasedQueryStringContents, int slopFactor, boolean inOrder) { this.field = Objects.requireNonNull(field); this.phrasedQueryStringContents = Objects.requireNonNull(phrasedQueryStringContents); this.slopFactor = slopFactor; @@ -245,11 +242,11 @@ public class ComplexPhraseQueryParser extends QueryParser { String oldDefaultParserField = qp.field; try { - //temporarily set the QueryParser to be parsing the default field for this phrase e.g author:"fred* smith" + // temporarily set the QueryParser to be parsing the default field for this phrase e.g + // author:"fred* smith" qp.field = this.field; contents[0] = qp.parse(phrasedQueryStringContents); - } - finally { + } finally { qp.field = oldDefaultParserField; } } @@ -263,10 +260,9 @@ public class ComplexPhraseQueryParser extends QueryParser { public Query rewrite(IndexReader reader) throws IOException { final Query contents = this.contents[0]; // ArrayList spanClauses = new ArrayList(); - if (contents instanceof TermQuery + if (contents instanceof TermQuery || contents instanceof MultiTermQuery - || contents instanceof SynonymQuery - ) { + || contents instanceof SynonymQuery) { return contents; } // Build a sequence of Span clauses arranged in a SpanNear - child @@ -274,10 +270,12 @@ public class ComplexPhraseQueryParser extends QueryParser { // Booleans e.g. nots and ors etc int numNegatives = 0; if (!(contents instanceof BooleanQuery)) { - throw new IllegalArgumentException("Unknown query type \"" - + contents.getClass().getName() - + "\" found in phrase query string \"" + phrasedQueryStringContents - + "\""); + throw new IllegalArgumentException( + "Unknown query type \"" + + contents.getClass().getName() + + "\" found in phrase query string \"" + + phrasedQueryStringContents + + "\""); } BooleanQuery bq = (BooleanQuery) contents; SpanQuery[] allSpanClauses = new SpanQuery[bq.clauses().size()]; @@ -298,8 +296,8 @@ public class ComplexPhraseQueryParser extends QueryParser { if (qc instanceof BooleanQuery || qc instanceof SynonymQuery) { ArrayList sc = new ArrayList<>(); - BooleanQuery booleanCaluse = qc instanceof BooleanQuery ? - (BooleanQuery) qc : convert((SynonymQuery) qc); + BooleanQuery booleanCaluse = + qc instanceof BooleanQuery ? (BooleanQuery) qc : convert((SynonymQuery) qc); addComplexPhraseClause(sc, booleanCaluse); if (sc.size() > 0) { allSpanClauses[i] = sc.get(0); @@ -307,24 +305,28 @@ public class ComplexPhraseQueryParser extends QueryParser { // Insert fake term e.g. phrase query was for "Fred Smithe*" and // there were no "Smithe*" terms - need to // prevent match on just "Fred". - allSpanClauses[i] = new SpanTermQuery(new Term(field, - "Dummy clause because no terms found - must match nothing")); + allSpanClauses[i] = + new SpanTermQuery( + new Term(field, "Dummy clause because no terms found - must match nothing")); } } else if (qc instanceof MatchNoDocsQuery) { // Insert fake term e.g. phrase query was for "Fred Smithe*" and // there were no "Smithe*" terms - need to // prevent match on just "Fred". - allSpanClauses[i] = new SpanTermQuery(new Term(field, - "Dummy clause because no terms found - must match nothing")); + allSpanClauses[i] = + new SpanTermQuery( + new Term(field, "Dummy clause because no terms found - must match nothing")); } else { if (qc instanceof TermQuery) { TermQuery tq = (TermQuery) qc; allSpanClauses[i] = new SpanTermQuery(tq.getTerm()); - } else { - throw new IllegalArgumentException("Unknown query type \"" - + qc.getClass().getName() - + "\" found in phrase query string \"" - + phrasedQueryStringContents + "\""); + } else { + throw new IllegalArgumentException( + "Unknown query type \"" + + qc.getClass().getName() + + "\" found in phrase query string \"" + + phrasedQueryStringContents + + "\""); } } @@ -346,8 +348,7 @@ public class ComplexPhraseQueryParser extends QueryParser { i += 1; } - SpanQuery[] includeClauses = positiveClauses - .toArray(new SpanQuery[positiveClauses.size()]); + SpanQuery[] includeClauses = positiveClauses.toArray(new SpanQuery[positiveClauses.size()]); SpanQuery include = null; if (includeClauses.length == 1) { @@ -355,19 +356,17 @@ public class ComplexPhraseQueryParser extends QueryParser { } else { // need to increase slop factor based on gaps introduced by // negatives - include = new SpanNearQuery(includeClauses, slopFactor + numNegatives, - inOrder); + include = new SpanNearQuery(includeClauses, slopFactor + numNegatives, inOrder); } // Use sequence of positive and negative values as the exclude. - SpanNearQuery exclude = new SpanNearQuery(allSpanClauses, slopFactor, - inOrder); + SpanNearQuery exclude = new SpanNearQuery(allSpanClauses, slopFactor, inOrder); SpanNotQuery snot = new SpanNotQuery(include, exclude); return snot; } private BooleanQuery convert(SynonymQuery qc) { BooleanQuery.Builder bqb = new BooleanQuery.Builder(); - for (Term t : qc.getTerms()){ + for (Term t : qc.getTerms()) { bqb.add(new BooleanClause(new TermQuery(t), Occur.SHOULD)); } return bqb.build(); @@ -408,25 +407,24 @@ public class ComplexPhraseQueryParser extends QueryParser { // Insert fake term e.g. phrase query was for "Fred Smithe*" and // there were no "Smithe*" terms - need to // prevent match on just "Fred". - SpanQuery stq = new SpanTermQuery(new Term(field, - "Dummy clause because no terms found - must match nothing")); + SpanQuery stq = + new SpanTermQuery( + new Term(field, "Dummy clause because no terms found - must match nothing")); chosenList.add(stq); } else { // TODO alternatively could call extract terms here? - throw new IllegalArgumentException("Unknown query type:" - + childQuery.getClass().getName()); + throw new IllegalArgumentException( + "Unknown query type:" + childQuery.getClass().getName()); } } if (ors.size() == 0) { return; } - SpanOrQuery soq = new SpanOrQuery(ors - .toArray(new SpanQuery[ors.size()])); + SpanOrQuery soq = new SpanOrQuery(ors.toArray(new SpanQuery[ors.size()])); if (nots.size() == 0) { spanClauses.add(soq); } else { - SpanOrQuery snqs = new SpanOrQuery(nots - .toArray(new SpanQuery[nots.size()])); + SpanOrQuery snqs = new SpanOrQuery(nots.toArray(new SpanQuery[nots.size()])); SpanNotQuery snq = new SpanNotQuery(soq, snqs); spanClauses.add(snq); } @@ -458,15 +456,14 @@ public class ComplexPhraseQueryParser extends QueryParser { @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } private boolean equalsTo(ComplexPhraseQuery other) { - return field.equals(other.field) && - phrasedQueryStringContents.equals(other.phrasedQueryStringContents) && - slopFactor == other.slopFactor && - inOrder == other.inOrder; + return field.equals(other.field) + && phrasedQueryStringContents.equals(other.phrasedQueryStringContents) + && slopFactor == other.slopFactor + && inOrder == other.inOrder; } } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/package-info.java index fbe1ccec679..1b76d7d8e5d 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/package-info.java @@ -14,9 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * QueryParser which permits complex phrase query syntax eg "(john jon jonathan~) peters*" - */ -package org.apache.lucene.queryparser.complexPhrase; +/** QueryParser which permits complex phrase query syntax eg "(john jon jonathan~) peters*" */ +package org.apache.lucene.queryparser.complexPhrase; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtendableQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtendableQueryParser.java index b71846dfdd3..7a79c850b35 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtendableQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtendableQueryParser.java @@ -17,53 +17,48 @@ package org.apache.lucene.queryparser.ext; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.queryparser.ext.Extensions.Pair; import org.apache.lucene.queryparser.classic.ParseException; import org.apache.lucene.queryparser.classic.QueryParser; +import org.apache.lucene.queryparser.ext.Extensions.Pair; import org.apache.lucene.search.Query; /** - * The {@link ExtendableQueryParser} enables arbitrary query parser extension - * based on a customizable field naming scheme. The lucene query syntax allows - * implicit and explicit field definitions as query prefix followed by a colon - * (':') character. The {@link ExtendableQueryParser} allows to encode extension - * keys into the field symbol associated with a registered instance of - * {@link ParserExtension}. A customizable separation character separates the - * extension key from the actual field symbol. The {@link ExtendableQueryParser} - * splits (@see {@link Extensions#splitExtensionField(String, String)}) the - * extension key from the field symbol and tries to resolve the associated - * {@link ParserExtension}. If the parser can't resolve the key or the field - * token does not contain a separation character, {@link ExtendableQueryParser} - * yields the same behavior as its super class {@link QueryParser}. Otherwise, - * if the key is associated with a {@link ParserExtension} instance, the parser - * builds an instance of {@link ExtensionQuery} to be processed by - * {@link ParserExtension#parse(ExtensionQuery)}.If a extension field does not - * contain a field part the default field for the query will be used. - *

    - * To guarantee that an extension field is processed with its associated - * extension, the extension query part must escape any special characters like - * '*' or '['. If the extension query contains any whitespace characters, the - * extension query part must be enclosed in quotes. - * Example ('_' used as separation character): + * The {@link ExtendableQueryParser} enables arbitrary query parser extension based on a + * customizable field naming scheme. The lucene query syntax allows implicit and explicit field + * definitions as query prefix followed by a colon (':') character. The {@link + * ExtendableQueryParser} allows to encode extension keys into the field symbol associated with a + * registered instance of {@link ParserExtension}. A customizable separation character separates the + * extension key from the actual field symbol. The {@link ExtendableQueryParser} splits (@see {@link + * Extensions#splitExtensionField(String, String)}) the extension key from the field symbol and + * tries to resolve the associated {@link ParserExtension}. If the parser can't resolve the key or + * the field token does not contain a separation character, {@link ExtendableQueryParser} yields the + * same behavior as its super class {@link QueryParser}. Otherwise, if the key is associated with a + * {@link ParserExtension} instance, the parser builds an instance of {@link ExtensionQuery} to be + * processed by {@link ParserExtension#parse(ExtensionQuery)}.If a extension field does not contain + * a field part the default field for the query will be used. + * + *

    To guarantee that an extension field is processed with its associated extension, the extension + * query part must escape any special characters like '*' or '['. If the extension query contains + * any whitespace characters, the extension query part must be enclosed in quotes. Example ('_' used + * as separation character): + * *

      *   title_customExt:"Apache Lucene\?" OR content_customExt:prefix\*
      * 
    - * + * * Search on the default field: + * *
      *   _customExt:"Apache Lucene\?" OR _customExt:prefix\*
      * 
    - *

    - * The {@link ExtendableQueryParser} itself does not implement the logic how - * field and extension key are separated or ordered. All logic regarding the - * extension key and field symbol parsing is located in {@link Extensions}. - * Customized extension schemes should be implemented by sub-classing + * + *

    The {@link ExtendableQueryParser} itself does not implement the logic how field and extension + * key are separated or ordered. All logic regarding the extension key and field symbol parsing is + * located in {@link Extensions}. Customized extension schemes should be implemented by sub-classing * {@link Extensions}. - *

    - *

    - * For details about the default encoding scheme see {@link Extensions}. - *

    - * + * + *

    For details about the default encoding scheme see {@link Extensions}. + * * @see Extensions * @see ParserExtension * @see ExtensionQuery @@ -73,18 +68,14 @@ public class ExtendableQueryParser extends QueryParser { private final String defaultField; private final Extensions extensions; - /** - * Default empty extensions instance - */ + /** Default empty extensions instance */ private static final Extensions DEFAULT_EXTENSION = new Extensions(); /** * Creates a new {@link ExtendableQueryParser} instance - * - * @param f - * the default query field - * @param a - * the analyzer used to find terms in a query string + * + * @param f the default query field + * @param a the analyzer used to find terms in a query string */ public ExtendableQueryParser(final String f, final Analyzer a) { this(f, a, DEFAULT_EXTENSION); @@ -92,16 +83,12 @@ public class ExtendableQueryParser extends QueryParser { /** * Creates a new {@link ExtendableQueryParser} instance - * - * @param f - * the default query field - * @param a - * the analyzer used to find terms in a query string - * @param ext - * the query parser extensions + * + * @param f the default query field + * @param a the analyzer used to find terms in a query string + * @param ext the query parser extensions */ - public ExtendableQueryParser(final String f, - final Analyzer a, final Extensions ext) { + public ExtendableQueryParser(final String f, final Analyzer a, final Extensions ext) { super(f, a); this.defaultField = f; this.extensions = ext; @@ -109,7 +96,7 @@ public class ExtendableQueryParser extends QueryParser { /** * Returns the extension field delimiter character. - * + * * @return the extension field delimiter character. */ public char getExtensionFieldDelimiter() { @@ -119,15 +106,12 @@ public class ExtendableQueryParser extends QueryParser { @Override protected Query getFieldQuery(final String field, final String queryText, boolean quoted) throws ParseException { - final Pair splitExtensionField = this.extensions - .splitExtensionField(defaultField, field); - final ParserExtension extension = this.extensions - .getExtension(splitExtensionField.cud); + final Pair splitExtensionField = + this.extensions.splitExtensionField(defaultField, field); + final ParserExtension extension = this.extensions.getExtension(splitExtensionField.cud); if (extension != null) { - return extension.parse(new ExtensionQuery(this, splitExtensionField.cur, - queryText)); + return extension.parse(new ExtensionQuery(this, splitExtensionField.cur, queryText)); } return super.getFieldQuery(field, queryText, quoted); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtensionQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtensionQuery.java index 3c5e4c3a0a3..8e4acd17531 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtensionQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtensionQuery.java @@ -19,9 +19,9 @@ package org.apache.lucene.queryparser.ext; import org.apache.lucene.queryparser.classic.QueryParser; /** - * {@link ExtensionQuery} holds all query components extracted from the original - * query string like the query field and the extension query string. - * + * {@link ExtensionQuery} holds all query components extracted from the original query string like + * the query field and the extension query string. + * * @see Extensions * @see ExtendableQueryParser * @see ParserExtension @@ -34,11 +34,9 @@ public class ExtensionQuery { /** * Creates a new {@link ExtensionQuery} - * - * @param field - * the query field - * @param rawQueryString - * the raw extension query string + * + * @param field the query field + * @param rawQueryString the raw extension query string */ public ExtensionQuery(QueryParser topLevelParser, String field, String rawQueryString) { this.field = field; @@ -48,7 +46,7 @@ public class ExtensionQuery { /** * Returns the query field - * + * * @return the query field */ public String getField() { @@ -57,15 +55,16 @@ public class ExtensionQuery { /** * Returns the raw extension query string - * + * * @return the raw extension query string */ public String getRawQueryString() { return rawQueryString; } - + /** - * Returns the top level parser which created this {@link ExtensionQuery} + * Returns the top level parser which created this {@link ExtensionQuery} + * * @return the top level parser which created this {@link ExtensionQuery} */ public QueryParser getTopLevelParser() { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java index f27fbda313f..30234185035 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java @@ -16,43 +16,35 @@ */ package org.apache.lucene.queryparser.ext; +import java.util.HashMap; +import java.util.Map; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.queryparser.classic.QueryParserBase; -import java.util.HashMap; -import java.util.Map; - - /** - * The {@link Extensions} class represents an extension mapping to associate - * {@link ParserExtension} instances with extension keys. An extension key is a - * string encoded into a Lucene standard query parser field symbol recognized by - * {@link ExtendableQueryParser}. The query parser passes each extension field - * token to {@link #splitExtensionField(String, String)} to separate the - * extension key from the field identifier. - *

    - * In addition to the key to extension mapping this class also defines the field - * name overloading scheme. {@link ExtendableQueryParser} uses the given - * extension to split the actual field name and extension key by calling - * {@link #splitExtensionField(String, String)}. To change the order or the key - * / field name encoding scheme users can subclass {@link Extensions} to - * implement their own. - * + * The {@link Extensions} class represents an extension mapping to associate {@link ParserExtension} + * instances with extension keys. An extension key is a string encoded into a Lucene standard query + * parser field symbol recognized by {@link ExtendableQueryParser}. The query parser passes each + * extension field token to {@link #splitExtensionField(String, String)} to separate the extension + * key from the field identifier. + * + *

    In addition to the key to extension mapping this class also defines the field name overloading + * scheme. {@link ExtendableQueryParser} uses the given extension to split the actual field name and + * extension key by calling {@link #splitExtensionField(String, String)}. To change the order or the + * key / field name encoding scheme users can subclass {@link Extensions} to implement their own. + * * @see ExtendableQueryParser * @see ParserExtension */ public class Extensions { - private final Map extensions = new HashMap<>(); + private final Map extensions = new HashMap<>(); private final char extensionFieldDelimiter; - /** - * The default extension field delimiter character. This constant is set to - * ':' - */ + /** The default extension field delimiter character. This constant is set to ':' */ public static final char DEFAULT_EXTENSION_FIELD_DELIMITER = ':'; /** - * Creates a new {@link Extensions} instance with the - * {@link #DEFAULT_EXTENSION_FIELD_DELIMITER} as a delimiter character. + * Creates a new {@link Extensions} instance with the {@link #DEFAULT_EXTENSION_FIELD_DELIMITER} + * as a delimiter character. */ public Extensions() { this(DEFAULT_EXTENSION_FIELD_DELIMITER); @@ -60,9 +52,8 @@ public class Extensions { /** * Creates a new {@link Extensions} instance - * - * @param extensionFieldDelimiter - * the extensions field delimiter character + * + * @param extensionFieldDelimiter the extensions field delimiter character */ public Extensions(char extensionFieldDelimiter) { this.extensionFieldDelimiter = extensionFieldDelimiter; @@ -70,24 +61,21 @@ public class Extensions { /** * Adds a new {@link ParserExtension} instance associated with the given key. - * - * @param key - * the parser extension key - * @param extension - * the parser extension + * + * @param key the parser extension key + * @param extension the parser extension */ public void add(String key, ParserExtension extension) { this.extensions.put(key, extension); } /** - * Returns the {@link ParserExtension} instance for the given key or - * null if no extension can be found for the key. - * - * @param key - * the extension key - * @return the {@link ParserExtension} instance for the given key or - * null if no extension can be found for the key. + * Returns the {@link ParserExtension} instance for the given key or null if no + * extension can be found for the key. + * + * @param key the extension key + * @return the {@link ParserExtension} instance for the given key or null if no + * extension can be found for the key. */ public final ParserExtension getExtension(String key) { return this.extensions.get(key); @@ -95,7 +83,7 @@ public class Extensions { /** * Returns the extension field delimiter - * + * * @return the extension field delimiter */ public char getExtensionFieldDelimiter() { @@ -103,82 +91,65 @@ public class Extensions { } /** - * Splits a extension field and returns the field / extension part as a - * {@link Pair}. This method tries to split on the first occurrence of the - * extension field delimiter, if the delimiter is not present in the string - * the result will contain a null value for the extension key and - * the given field string as the field value. If the given extension field - * string contains no field identifier the result pair will carry the given - * default field as the field value. - * - * @param defaultField - * the default query field - * @param field - * the extension field string - * @return a {@link Pair} with the field name as the {@link Pair#cur} and the - * extension key as the {@link Pair#cud} + * Splits a extension field and returns the field / extension part as a {@link Pair}. This method + * tries to split on the first occurrence of the extension field delimiter, if the delimiter is + * not present in the string the result will contain a null value for the extension + * key and the given field string as the field value. If the given extension field string contains + * no field identifier the result pair will carry the given default field as the field value. + * + * @param defaultField the default query field + * @param field the extension field string + * @return a {@link Pair} with the field name as the {@link Pair#cur} and the extension key as the + * {@link Pair#cud} */ - public Pair splitExtensionField(String defaultField, - String field) { + public Pair splitExtensionField(String defaultField, String field) { int indexOf = field.indexOf(this.extensionFieldDelimiter); - if (indexOf < 0) - return new Pair<>(field, null); - final String indexField = indexOf == 0 ? defaultField : field.substring(0, - indexOf); + if (indexOf < 0) return new Pair<>(field, null); + final String indexField = indexOf == 0 ? defaultField : field.substring(0, indexOf); final String extensionKey = field.substring(indexOf + 1); return new Pair<>(indexField, extensionKey); - } /** - * Escapes an extension field. The default implementation is equivalent to - * {@link QueryParser#escape(String)}. - * - * @param extfield - * the extension field identifier - * @return the extension field identifier with all special chars escaped with - * a backslash character. + * Escapes an extension field. The default implementation is equivalent to {@link + * QueryParser#escape(String)}. + * + * @param extfield the extension field identifier + * @return the extension field identifier with all special chars escaped with a backslash + * character. */ public String escapeExtensionField(String extfield) { return QueryParserBase.escape(extfield); } /** - * Builds an extension field string from a given extension key and the default - * query field. The default field and the key are delimited with the extension - * field delimiter character. This method makes no assumption about the order - * of the extension key and the field. By default the extension key is - * appended to the end of the returned string while the field is added to the + * Builds an extension field string from a given extension key and the default query field. The + * default field and the key are delimited with the extension field delimiter character. This + * method makes no assumption about the order of the extension key and the field. By default the + * extension key is appended to the end of the returned string while the field is added to the * beginning. Special Query characters are escaped in the result. - *

    - * Note: {@link Extensions} subclasses must maintain the contract between - * {@link #buildExtensionField(String)} and - * {@link #splitExtensionField(String, String)} where the latter inverts the - * former. - *

    + * + *

    Note: {@link Extensions} subclasses must maintain the contract between {@link + * #buildExtensionField(String)} and {@link #splitExtensionField(String, String)} where the latter + * inverts the former. */ public String buildExtensionField(String extensionKey) { return buildExtensionField(extensionKey, ""); } /** - * Builds an extension field string from a given extension key and the - * extensions field. The field and the key are delimited with the extension - * field delimiter character. This method makes no assumption about the order - * of the extension key and the field. By default the extension key is - * appended to the end of the returned string while the field is added to the - * beginning. Special Query characters are escaped in the result. - *

    - * Note: {@link Extensions} subclasses must maintain the contract between - * {@link #buildExtensionField(String, String)} and - * {@link #splitExtensionField(String, String)} where the latter inverts the - * former. - *

    - * - * @param extensionKey - * the extension key - * @param field - * the field to apply the extension on. + * Builds an extension field string from a given extension key and the extensions field. The field + * and the key are delimited with the extension field delimiter character. This method makes no + * assumption about the order of the extension key and the field. By default the extension key is + * appended to the end of the returned string while the field is added to the beginning. Special + * Query characters are escaped in the result. + * + *

    Note: {@link Extensions} subclasses must maintain the contract between {@link + * #buildExtensionField(String, String)} and {@link #splitExtensionField(String, String)} where + * the latter inverts the former. + * + * @param extensionKey the extension key + * @param field the field to apply the extension on. * @return escaped extension field identifier * @see #buildExtensionField(String) to use the default query field */ @@ -191,29 +162,24 @@ public class Extensions { /** * This class represents a generic pair. - * - * @param - * the pairs first element - * @param - * the pairs last element of the pair. + * + * @param the pairs first element + * @param the pairs last element of the pair. */ - public static class Pair { + public static class Pair { public final Cur cur; public final Cud cud; /** * Creates a new Pair - * - * @param cur - * the pairs first element - * @param cud - * the pairs last element + * + * @param cur the pairs first element + * @param cud the pairs last element */ public Pair(Cur cur, Cud cud) { this.cur = cur; this.cud = cud; } } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ParserExtension.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ParserExtension.java index 17eb31472ba..9828d3b2424 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ParserExtension.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ParserExtension.java @@ -20,33 +20,28 @@ import org.apache.lucene.queryparser.classic.ParseException; import org.apache.lucene.search.Query; /** - * This class represents an extension base class to the Lucene standard - * {@link org.apache.lucene.queryparser.classic.QueryParser}. The - * {@link org.apache.lucene.queryparser.classic.QueryParser} is generated by the JavaCC - * parser generator. Changing or adding functionality or syntax in the standard - * query parser requires changes to the JavaCC source file. To enable extending - * the standard query parser without changing the JavaCC sources and re-generate - * the parser the {@link ParserExtension} can be customized and plugged into an - * instance of {@link ExtendableQueryParser}, a direct subclass of - * {@link org.apache.lucene.queryparser.classic.QueryParser}. - * + * This class represents an extension base class to the Lucene standard {@link + * org.apache.lucene.queryparser.classic.QueryParser}. The {@link + * org.apache.lucene.queryparser.classic.QueryParser} is generated by the JavaCC parser generator. + * Changing or adding functionality or syntax in the standard query parser requires changes to the + * JavaCC source file. To enable extending the standard query parser without changing the JavaCC + * sources and re-generate the parser the {@link ParserExtension} can be customized and plugged into + * an instance of {@link ExtendableQueryParser}, a direct subclass of {@link + * org.apache.lucene.queryparser.classic.QueryParser}. + * * @see Extensions * @see ExtendableQueryParser */ public abstract class ParserExtension { /** - * Processes the given {@link ExtensionQuery} and returns a corresponding - * {@link Query} instance. Subclasses must either return a {@link Query} - * instance or raise a {@link ParseException}. This method must not return - * null. - * - * @param query - * the extension query + * Processes the given {@link ExtensionQuery} and returns a corresponding {@link Query} instance. + * Subclasses must either return a {@link Query} instance or raise a {@link ParseException}. This + * method must not return null. + * + * @param query the extension query * @return a new query instance - * @throws ParseException - * if the query can not be parsed. + * @throws ParseException if the query can not be parsed. */ public abstract Query parse(final ExtensionQuery query) throws ParseException; - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/package-info.java index a5e9000c77f..663567f7b7f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/package-info.java @@ -14,9 +14,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Extendable QueryParser provides a simple and flexible extension mechanism by overloading query field names. + +/** + * Extendable QueryParser provides a simple and flexible extension mechanism by overloading query + * field names. */ package org.apache.lucene.queryparser.ext; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeError.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeError.java index e54017b5fb3..3062b400a79 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeError.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeError.java @@ -21,53 +21,42 @@ import org.apache.lucene.queryparser.flexible.messages.NLSException; /** * Error class with NLS support - * + * * @see org.apache.lucene.queryparser.flexible.messages.NLS * @see org.apache.lucene.queryparser.flexible.messages.Message */ public class QueryNodeError extends Error implements NLSException { private Message message; - /** - * @param message - * - NLS Message Object - */ + /** @param message - NLS Message Object */ public QueryNodeError(Message message) { super(message.getKey()); this.message = message; - } - /** - * @param throwable - * - @see java.lang.Error - */ + /** @param throwable - @see java.lang.Error */ public QueryNodeError(Throwable throwable) { super(throwable); } /** - * @param message - * - NLS Message Object - * @param throwable - * - @see java.lang.Error + * @param message - NLS Message Object + * @param throwable - @see java.lang.Error */ public QueryNodeError(Message message, Throwable throwable) { super(message.getKey(), throwable); this.message = message; - } /* * (non-Javadoc) - * + * * @see org.apache.lucene.messages.NLSException#getMessageObject() */ @Override public Message getMessageObject() { return this.message; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeException.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeException.java index 5450e8036a9..c6bb52e20d9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeException.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeException.java @@ -17,23 +17,18 @@ package org.apache.lucene.queryparser.flexible.core; import java.util.Locale; - +import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; +import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.messages.Message; import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.messages.NLS; import org.apache.lucene.queryparser.flexible.messages.NLSException; -import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; -import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** - *

    - * This exception should be thrown if something wrong happens when dealing with - * {@link QueryNode}s. - *

    - *

    - * It also supports NLS messages. - *

    - * + * This exception should be thrown if something wrong happens when dealing with {@link QueryNode}s. + * + *

    It also supports NLS messages. + * * @see Message * @see NLS * @see NLSException @@ -47,7 +42,6 @@ public class QueryNodeException extends Exception implements NLSException { super(message.getKey()); this.message = message; - } public QueryNodeException(Throwable throwable) { @@ -58,7 +52,6 @@ public class QueryNodeException extends Exception implements NLSException { super(message.getKey(), throwable); this.message = message; - } @Override @@ -84,5 +77,4 @@ public class QueryNodeException extends Exception implements NLSException { public String toString() { return this.message.getKey() + ": " + getLocalizedMessage(); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeParseException.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeParseException.java index 43f0c38f70e..5f373e86797 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeParseException.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryNodeParseException.java @@ -16,16 +16,16 @@ */ package org.apache.lucene.queryparser.flexible.core; +import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; +import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; +import org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser; import org.apache.lucene.queryparser.flexible.messages.Message; import org.apache.lucene.queryparser.flexible.messages.MessageImpl; -import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; -import org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser; -import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** - * This should be thrown when an exception happens during the query parsing from - * string to the query node tree. - * + * This should be thrown when an exception happens during the query parsing from string to the query + * node tree. + * * @see QueryNodeException * @see SyntaxParser * @see QueryNode @@ -54,18 +54,14 @@ public class QueryNodeParseException extends QueryNodeException { public void setQuery(CharSequence query) { this.query = query; - this.message = new MessageImpl( - QueryParserMessages.INVALID_SYNTAX_CANNOT_PARSE, query, ""); + this.message = new MessageImpl(QueryParserMessages.INVALID_SYNTAX_CANNOT_PARSE, query, ""); } public CharSequence getQuery() { return this.query; } - /** - * @param errorToken - * the errorToken in the query - */ + /** @param errorToken the errorToken in the query */ protected void setErrorToken(String errorToken) { this.errorToken = errorToken; } @@ -79,10 +75,10 @@ public class QueryNodeParseException extends QueryNodeException { } /** - * For EndOfLine and EndOfFile ("<EOF>") parsing problems the last char in the - * string is returned For the case where the parser is not able to figure out - * the line and column number -1 will be returned - * + * For EndOfLine and EndOfFile ("<EOF>") parsing problems the last char in the string is + * returned For the case where the parser is not able to figure out the line and column number -1 + * will be returned + * * @return line where the problem was found */ public int getBeginLine() { @@ -90,28 +86,22 @@ public class QueryNodeParseException extends QueryNodeException { } /** - * For EndOfLine and EndOfFile ("<EOF>") parsing problems the last char in the - * string is returned For the case where the parser is not able to figure out - * the line and column number -1 will be returned - * + * For EndOfLine and EndOfFile ("<EOF>") parsing problems the last char in the string is + * returned For the case where the parser is not able to figure out the line and column number -1 + * will be returned + * * @return column of the first char where the problem was found */ public int getBeginColumn() { return this.beginColumn; } - /** - * @param beginLine - * the beginLine to set - */ + /** @param beginLine the beginLine to set */ protected void setBeginLine(int beginLine) { this.beginLine = beginLine; } - /** - * @param beginColumn - * the beginColumn to set - */ + /** @param beginColumn the beginColumn to set */ protected void setBeginColumn(int beginColumn) { this.beginColumn = beginColumn; } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryParserHelper.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryParserHelper.java index 968ba179f13..2269a1540f7 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryParserHelper.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryParserHelper.java @@ -23,13 +23,12 @@ import org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor; /** - * This class is a helper for the query parser framework, it does all the three - * query parser phrases at once: text parsing, query processing and query - * building. - *

    - * It contains methods that allows the user to change the implementation used on - * the three phases. - * + * This class is a helper for the query parser framework, it does all the three query parser phrases + * at once: text parsing, query processing and query building. + * + *

    It contains methods that allows the user to change the implementation used on the three + * phases. + * * @see QueryNodeProcessor * @see SyntaxParser * @see QueryBuilder @@ -46,25 +45,23 @@ public class QueryParserHelper { private QueryConfigHandler config; /** - * Creates a query parser helper object using the specified configuration, - * text parser, processor and builder. - * - * @param queryConfigHandler - * the query configuration handler that will be initially set to this - * helper - * @param syntaxParser - * the text parser that will be initially set to this helper - * @param processor - * the query processor that will be initially set to this helper - * @param builder - * the query builder that will be initially set to this helper - * + * Creates a query parser helper object using the specified configuration, text parser, processor + * and builder. + * + * @param queryConfigHandler the query configuration handler that will be initially set to this + * helper + * @param syntaxParser the text parser that will be initially set to this helper + * @param processor the query processor that will be initially set to this helper + * @param builder the query builder that will be initially set to this helper * @see QueryNodeProcessor * @see SyntaxParser * @see QueryBuilder * @see QueryConfigHandler */ - public QueryParserHelper(QueryConfigHandler queryConfigHandler, SyntaxParser syntaxParser, QueryNodeProcessor processor, + public QueryParserHelper( + QueryConfigHandler queryConfigHandler, + SyntaxParser syntaxParser, + QueryNodeProcessor processor, QueryBuilder builder) { this.syntaxParser = syntaxParser; this.config = queryConfigHandler; @@ -74,16 +71,14 @@ public class QueryParserHelper { if (processor != null) { processor.setQueryConfigHandler(queryConfigHandler); } - } /** - * Returns the processor object used to process the query node tree, it - * returns null if no processor is used. - * - * @return the actual processor used to process the query node tree, - * null if no processor is used - * + * Returns the processor object used to process the query node tree, it returns null + * if no processor is used. + * + * @return the actual processor used to process the query node tree, null if no + * processor is used * @see QueryNodeProcessor * @see #setQueryNodeProcessor(QueryNodeProcessor) */ @@ -92,32 +87,26 @@ public class QueryParserHelper { } /** - * Sets the processor that will be used to process the query node tree. If - * there is any {@link QueryConfigHandler} returned by - * {@link #getQueryConfigHandler()}, it will be set on the processor. The - * argument can be null, which means that no processor will be - * used to process the query node tree. - * - * @param processor - * the processor that will be used to process the query node tree, - * this argument can be null - * + * Sets the processor that will be used to process the query node tree. If there is any {@link + * QueryConfigHandler} returned by {@link #getQueryConfigHandler()}, it will be set on the + * processor. The argument can be null, which means that no processor will be used to + * process the query node tree. + * + * @param processor the processor that will be used to process the query node tree, this argument + * can be null * @see #getQueryNodeProcessor() * @see QueryNodeProcessor */ public void setQueryNodeProcessor(QueryNodeProcessor processor) { this.processor = processor; this.processor.setQueryConfigHandler(getQueryConfigHandler()); - } /** - * Sets the text parser that will be used to parse the query string, it cannot - * be null. - * - * @param syntaxParser - * the text parser that will be used to parse the query string - * + * Sets the text parser that will be used to parse the query string, it cannot be null + * . + * + * @param syntaxParser the text parser that will be used to parse the query string * @see #getSyntaxParser() * @see SyntaxParser */ @@ -128,16 +117,13 @@ public class QueryParserHelper { } this.syntaxParser = syntaxParser; - } /** - * The query builder that will be used to build an object from the query node - * tree. It cannot be null. - * - * @param queryBuilder - * the query builder used to build something from the query node tree - * + * The query builder that will be used to build an object from the query node tree. It cannot be + * null. + * + * @param queryBuilder the query builder used to build something from the query node tree * @see #getQueryBuilder() * @see QueryBuilder */ @@ -148,16 +134,14 @@ public class QueryParserHelper { } this.builder = queryBuilder; - } /** - * Returns the query configuration handler, which is used during the query - * node tree processing. It can be null. - * - * @return the query configuration handler used on the query processing, - * null if not query configuration handler is defined - * + * Returns the query configuration handler, which is used during the query node tree processing. + * It can be null. + * + * @return the query configuration handler used on the query processing, null if not + * query configuration handler is defined * @see QueryConfigHandler * @see #setQueryConfigHandler(QueryConfigHandler) */ @@ -166,12 +150,10 @@ public class QueryParserHelper { } /** - * Returns the query builder used to build a object from the query node tree. - * The object produced by this builder is returned by - * {@link #parse(String, String)}. - * + * Returns the query builder used to build a object from the query node tree. The object produced + * by this builder is returned by {@link #parse(String, String)}. + * * @return the query builder - * * @see #setQueryBuilder(QueryBuilder) * @see QueryBuilder */ @@ -180,12 +162,10 @@ public class QueryParserHelper { } /** - * Returns the text parser used to build a query node tree from a query - * string. The default text parser instance returned by this method is a - * {@link SyntaxParser}. - * + * Returns the text parser used to build a query node tree from a query string. The default text + * parser instance returned by this method is a {@link SyntaxParser}. + * * @return the text parse used to build query node trees. - * * @see SyntaxParser * @see #setSyntaxParser(SyntaxParser) */ @@ -194,14 +174,11 @@ public class QueryParserHelper { } /** - * Sets the query configuration handler that will be used during query - * processing. It can be null. It's also set to the processor - * returned by {@link #getQueryNodeProcessor()}. - * - * @param config - * the query configuration handler used during query processing, it - * can be null - * + * Sets the query configuration handler that will be used during query processing. It can be + * null. It's also set to the processor returned by {@link #getQueryNodeProcessor()}. + * + * @param config the query configuration handler used during query processing, it can be + * null * @see #getQueryConfigHandler() * @see QueryConfigHandler */ @@ -212,7 +189,6 @@ public class QueryParserHelper { if (processor != null) { processor.setQueryConfigHandler(config); } - } /** @@ -220,28 +196,21 @@ public class QueryParserHelper { *
    * In this method the three phases are executed:
    *
    - *      1st - the query string is parsed using the - * text parser returned by {@link #getSyntaxParser()}, the result is a query - * node tree
    + *      1st - the query string is parsed using the text parser returned + * by {@link #getSyntaxParser()}, the result is a query node tree
    *
    - *      2nd - the query node tree is processed by the - * processor returned by {@link #getQueryNodeProcessor()}
    + *      2nd - the query node tree is processed by the processor returned + * by {@link #getQueryNodeProcessor()}
    *
    - *      3th - a object is built from the query node - * tree using the builder returned by {@link #getQueryBuilder()} - * - * @param query - * the query string - * @param defaultField - * the default field used by the text parser - * + *      3th - a object is built from the query node tree using the + * builder returned by {@link #getQueryBuilder()} + * + * @param query the query string + * @param defaultField the default field used by the text parser * @return the object built from the query - * - * @throws QueryNodeException - * if something wrong happens along the three phases + * @throws QueryNodeException if something wrong happens along the three phases */ - public Object parse(String query, String defaultField) - throws QueryNodeException { + public Object parse(String query, String defaultField) throws QueryNodeException { QueryNode queryTree = getSyntaxParser().parse(query, defaultField); QueryNodeProcessor processor = getQueryNodeProcessor(); @@ -251,7 +220,5 @@ public class QueryParserHelper { } return getQueryBuilder().build(queryTree); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryBuilder.java index b7e3257fd1c..84d6d0e7f7d 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryBuilder.java @@ -20,21 +20,17 @@ import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** - * This interface is used by implementors classes that builds some kind of - * object from a query tree. - * + * This interface is used by implementors classes that builds some kind of object from a query tree. + * * @see QueryTreeBuilder */ public interface QueryBuilder { /** * Builds some kind of object from a query tree. - * - * @param queryNode - * the query tree root node - * + * + * @param queryNode the query tree root node * @return some object generated from the query tree */ Object build(QueryNode queryNode) throws QueryNodeException; - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java index 45f91749f33..d82bf9ec15b 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java @@ -18,60 +18,54 @@ package org.apache.lucene.queryparser.flexible.core.builders; import java.util.HashMap; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.nodes.FieldableNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.standard.parser.EscapeQuerySyntaxImpl; /** * This class should be used when there is a builder for each type of node. - * - * The type of node may be defined in 2 different ways: - by the field name, - * when the node implements the {@link FieldableNode} interface - by its class, - * it keeps checking the class and all the interfaces and classes this class - * implements/extends until it finds a builder for that class/interface - * - * This class always check if there is a builder for the field name before it - * checks for the node class. So, field name builders have precedence over class - * builders. - * - * When a builder is found for a node, it's called and the node is passed to the - * builder. If the returned built object is not null, it's tagged - * on the node using the tag {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID}. - * - * The children are usually built before the parent node. However, if a builder - * associated to a node is an instance of {@link QueryTreeBuilder}, the node is - * delegated to this builder and it's responsible to build the node and its - * children. - * + * + *

    The type of node may be defined in 2 different ways: - by the field name, when the node + * implements the {@link FieldableNode} interface - by its class, it keeps checking the class and + * all the interfaces and classes this class implements/extends until it finds a builder for that + * class/interface + * + *

    This class always check if there is a builder for the field name before it checks for the node + * class. So, field name builders have precedence over class builders. + * + *

    When a builder is found for a node, it's called and the node is passed to the builder. If the + * returned built object is not null, it's tagged on the node using the tag {@link + * QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID}. + * + *

    The children are usually built before the parent node. However, if a builder associated to a + * node is an instance of {@link QueryTreeBuilder}, the node is delegated to this builder and it's + * responsible to build the node and its children. + * * @see QueryBuilder */ public class QueryTreeBuilder implements QueryBuilder { /** - * This tag is used to tag the nodes in a query tree with the built objects - * produced from their own associated builder. + * This tag is used to tag the nodes in a query tree with the built objects produced from their + * own associated builder. */ - public static final String QUERY_TREE_BUILDER_TAGID = QueryTreeBuilder.class - .getName(); + public static final String QUERY_TREE_BUILDER_TAGID = QueryTreeBuilder.class.getName(); private HashMap, QueryBuilder> queryNodeBuilders; private HashMap fieldNameBuilders; - /** - * {@link QueryTreeBuilder} constructor. - */ + /** {@link QueryTreeBuilder} constructor. */ public QueryTreeBuilder() { // empty constructor } /** * Associates a field name with a builder. - * + * * @param fieldName the field name * @param builder the builder to be associated */ @@ -82,25 +76,21 @@ public class QueryTreeBuilder implements QueryBuilder { } this.fieldNameBuilders.put(fieldName.toString(), builder); - - } /** * Associates a class with a builder - * + * * @param queryNodeClass the class * @param builder the builder to be associated */ - public void setBuilder(Class queryNodeClass, - QueryBuilder builder) { + public void setBuilder(Class queryNodeClass, QueryBuilder builder) { if (this.queryNodeBuilders == null) { this.queryNodeBuilders = new HashMap<>(); } this.queryNodeBuilders.put(queryNodeClass, builder); - } private void process(QueryNode node) throws QueryNodeException { @@ -116,15 +106,11 @@ public class QueryTreeBuilder implements QueryBuilder { for (QueryNode child : children) { process(child); } - } - } processNode(node, builder); - } - } private QueryBuilder getBuilder(QueryNode node) { @@ -138,7 +124,6 @@ public class QueryTreeBuilder implements QueryBuilder { } builder = this.fieldNameBuilders.get(field); - } if (builder == null && this.queryNodeBuilders != null) { @@ -157,29 +142,24 @@ public class QueryTreeBuilder implements QueryBuilder { if (builder != null) { break; } - } - } } while (builder == null && (clazz = clazz.getSuperclass()) != null); - } return builder; - } - private void processNode(QueryNode node, QueryBuilder builder) - throws QueryNodeException { + private void processNode(QueryNode node, QueryBuilder builder) throws QueryNodeException { if (builder == null) { - throw new QueryNodeException(new MessageImpl( - QueryParserMessages.LUCENE_QUERY_CONVERSION_ERROR, node - .toQueryString(new EscapeQuerySyntaxImpl()), node.getClass() - .getName())); - + throw new QueryNodeException( + new MessageImpl( + QueryParserMessages.LUCENE_QUERY_CONVERSION_ERROR, + node.toQueryString(new EscapeQuerySyntaxImpl()), + node.getClass().getName())); } Object obj = builder.build(node); @@ -187,7 +167,6 @@ public class QueryTreeBuilder implements QueryBuilder { if (obj != null) { node.setTag(QUERY_TREE_BUILDER_TAGID, obj); } - } private QueryBuilder getQueryBuilder(Class clazz) { @@ -197,27 +176,21 @@ public class QueryTreeBuilder implements QueryBuilder { } return null; - } /** - * Builds some kind of object from a query tree. Each node in the query tree - * is built using an specific builder associated to it. - * + * Builds some kind of object from a query tree. Each node in the query tree is built using an + * specific builder associated to it. + * * @param queryNode the query tree root node - * * @return the built object - * - * @throws QueryNodeException if some node builder throws a - * {@link QueryNodeException} or if there is a node which had no - * builder associated to it + * @throws QueryNodeException if some node builder throws a {@link QueryNodeException} or if there + * is a node which had no builder associated to it */ @Override public Object build(QueryNode queryNode) throws QueryNodeException { process(queryNode); return queryNode.getTag(QUERY_TREE_BUILDER_TAGID); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/package-info.java index 9649b007c66..0858685a8ee 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/package-info.java @@ -19,14 +19,14 @@ * Necessary classes to implement query builders. * *

    Query Parser Builders

    - *

    - * The package org.apache.lucene.queryParser.builders contains the interface that - * builders must implement, it also contain a utility {@link org.apache.lucene.queryparser.flexible.core.builders.QueryTreeBuilder}, which walks the tree - * and call the Builder for each node in the tree. - * Builder normally convert QueryNode Object into a Lucene Query Object, - * and normally it's a one-to-one mapping class. * - * But other builders implementations can by written to convert QueryNode objects to other non lucene objects. + *

    The package org.apache.lucene.queryParser.builders contains the interface that + * builders must implement, it also contain a utility {@link + * org.apache.lucene.queryparser.flexible.core.builders.QueryTreeBuilder}, which walks the tree and + * call the Builder for each node in the tree. Builder normally convert QueryNode Object into a + * Lucene Query Object, and normally it's a one-to-one mapping class. + * + *

    But other builders implementations can by written to convert QueryNode objects to other non + * lucene objects. */ package org.apache.lucene.queryparser.flexible.core.builders; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/AbstractQueryConfig.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/AbstractQueryConfig.java index bfbde4d4fc1..d9d4c989da8 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/AbstractQueryConfig.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/AbstractQueryConfig.java @@ -19,99 +19,89 @@ package org.apache.lucene.queryparser.flexible.core.config; import java.util.HashMap; /** - *

    - * This class is the base of {@link QueryConfigHandler} and {@link FieldConfig}. - * It has operations to set, unset and get configuration values. - *

    - *

    - * Each configuration is is a key->value pair. The key should be an unique - * {@link ConfigurationKey} instance and it also holds the value's type. - *

    - * + * This class is the base of {@link QueryConfigHandler} and {@link FieldConfig}. It has operations + * to set, unset and get configuration values. + * + *

    Each configuration is is a key->value pair. The key should be an unique {@link + * ConfigurationKey} instance and it also holds the value's type. + * * @see ConfigurationKey */ public abstract class AbstractQueryConfig { - - final private HashMap, Object> configMap = new HashMap<>(); - + + private final HashMap, Object> configMap = new HashMap<>(); + AbstractQueryConfig() { // although this class is public, it can only be constructed from package } - + /** * Returns the value held by the given key. - * + * * @param the value's type - * * @param key the key, cannot be null - * * @return the value held by the given key */ @SuppressWarnings("unchecked") public T get(ConfigurationKey key) { - + if (key == null) { throw new IllegalArgumentException("key must not be null!"); } - + return (T) this.configMap.get(key); - } /** * Returns true if there is a value set with the given key, otherwise false. - * + * * @param the value's type * @param key the key, cannot be null * @return true if there is a value set with the given key, otherwise false */ public boolean has(ConfigurationKey key) { - + if (key == null) { throw new IllegalArgumentException("key must not be null!"); } - + return this.configMap.containsKey(key); - } - + /** * Sets a key and its value. - * + * * @param the value's type * @param key the key, cannot be null * @param value value to set */ public void set(ConfigurationKey key, T value) { - + if (key == null) { throw new IllegalArgumentException("key must not be null!"); } - + if (value == null) { unset(key); - + } else { this.configMap.put(key, value); } - } /** * Unsets the given key and its value. - * + * * @param the value's type * @param key the key * @return true if the key and value was set and removed, otherwise false */ public boolean unset(ConfigurationKey key) { - + if (key == null) { throw new IllegalArgumentException("key must not be null!"); } - - return this.configMap.remove(key) != null; - - } + return this.configMap.remove(key) != null; + } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/ConfigurationKey.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/ConfigurationKey.java index 7f402e71e23..92ed5f5da19 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/ConfigurationKey.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/ConfigurationKey.java @@ -17,25 +17,22 @@ package org.apache.lucene.queryparser.flexible.core.config; /** - * An instance of this class represents a key that is used to retrieve a value - * from {@link AbstractQueryConfig}. It also holds the value's type, which is - * defined in the generic argument. - * + * An instance of this class represents a key that is used to retrieve a value from {@link + * AbstractQueryConfig}. It also holds the value's type, which is defined in the generic argument. + * * @see AbstractQueryConfig */ -final public class ConfigurationKey { - +public final class ConfigurationKey { + private ConfigurationKey() {} - + /** * Creates a new instance. - * + * * @param the value's type - * * @return a new instance */ public static ConfigurationKey newInstance() { return new ConfigurationKey<>(); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/FieldConfig.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/FieldConfig.java index fae6f9fc2ec..0d7295175c6 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/FieldConfig.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/FieldConfig.java @@ -16,16 +16,14 @@ */ package org.apache.lucene.queryparser.flexible.core.config; -/** - * This class represents a field configuration. - */ +/** This class represents a field configuration. */ public class FieldConfig extends AbstractQueryConfig { private String fieldName; - + /** * Constructs a {@link FieldConfig} - * + * * @param fieldName the field name, it must not be null * @throws IllegalArgumentException if the field name is null */ @@ -36,12 +34,11 @@ public class FieldConfig extends AbstractQueryConfig { } this.fieldName = fieldName; - } /** * Returns the field name this configuration represents. - * + * * @return the field name */ public String getField() { @@ -50,8 +47,10 @@ public class FieldConfig extends AbstractQueryConfig { @Override public String toString() { - return ""; + return ""; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/FieldConfigListener.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/FieldConfigListener.java index 87bbd1c11f8..2c58d61d450 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/FieldConfigListener.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/FieldConfigListener.java @@ -17,10 +17,10 @@ package org.apache.lucene.queryparser.flexible.core.config; /** - * This interface should be implemented by classes that wants to listen for - * field configuration requests. The implementation receives a - * {@link FieldConfig} object and may add/change its configuration. - * + * This interface should be implemented by classes that wants to listen for field configuration + * requests. The implementation receives a {@link FieldConfig} object and may add/change its + * configuration. + * * @see FieldConfig * @see QueryConfigHandler */ @@ -28,10 +28,8 @@ public interface FieldConfigListener { /** * This method is called ever time a field configuration is requested. - * - * @param fieldConfig - * the field configuration requested, should never be null + * + * @param fieldConfig the field configuration requested, should never be null */ void buildFieldConfig(FieldConfig fieldConfig); - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/QueryConfigHandler.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/QueryConfigHandler.java index 41362c363a6..89b17e15b80 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/QueryConfigHandler.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/QueryConfigHandler.java @@ -17,42 +17,36 @@ package org.apache.lucene.queryparser.flexible.core.config; import java.util.LinkedList; - import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor; import org.apache.lucene.queryparser.flexible.core.util.StringUtils; /** - * This class can be used to hold any query configuration and no field - * configuration. For field configuration, it creates an empty - * {@link FieldConfig} object and delegate it to field config listeners, - * these are responsible for setting up all the field configuration. - * - * {@link QueryConfigHandler} should be extended by classes that intends to - * provide configuration to {@link QueryNodeProcessor} objects. - * - * The class that extends {@link QueryConfigHandler} should also provide - * {@link FieldConfig} objects for each collection field. - * + * This class can be used to hold any query configuration and no field configuration. For field + * configuration, it creates an empty {@link FieldConfig} object and delegate it to field config + * listeners, these are responsible for setting up all the field configuration. + * + *

    {@link QueryConfigHandler} should be extended by classes that intends to provide configuration + * to {@link QueryNodeProcessor} objects. + * + *

    The class that extends {@link QueryConfigHandler} should also provide {@link FieldConfig} + * objects for each collection field. + * * @see FieldConfig * @see FieldConfigListener * @see QueryConfigHandler */ public abstract class QueryConfigHandler extends AbstractQueryConfig { - - final private LinkedList listeners = new LinkedList<>(); + + private final LinkedList listeners = new LinkedList<>(); /** - * Returns an implementation of - * {@link FieldConfig} for a specific field name. If the implemented - * {@link QueryConfigHandler} does not know a specific field name, it may - * return null, indicating there is no configuration for that - * field. - * - * @param fieldName - * the field name - * @return a {@link FieldConfig} object containing the field name - * configuration or null, if the implemented - * {@link QueryConfigHandler} has no configuration for that field + * Returns an implementation of {@link FieldConfig} for a specific field name. If the implemented + * {@link QueryConfigHandler} does not know a specific field name, it may return null + * , indicating there is no configuration for that field. + * + * @param fieldName the field name + * @return a {@link FieldConfig} object containing the field name configuration or null + * , if the implemented {@link QueryConfigHandler} has no configuration for that field */ public FieldConfig getFieldConfig(String fieldName) { FieldConfig fieldConfig = new FieldConfig(StringUtils.toString(fieldName)); @@ -62,18 +56,14 @@ public abstract class QueryConfigHandler extends AbstractQueryConfig { } return fieldConfig; - } /** - * Adds a listener. The added listeners are called in the order they are - * added. - * - * @param listener - * the listener to be added + * Adds a listener. The added listeners are called in the order they are added. + * + * @param listener the listener to be added */ public void addFieldConfigListener(FieldConfigListener listener) { this.listeners.add(listener); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/package-info.java index a07c95af0fd..2df122cb436 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/package-info.java @@ -14,22 +14,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Base classes used to configure the query processing. - * + * *

    Query Configuration Interfaces

    - *

    - * The package org.apache.lucene.queryparser.flexible.config contains query configuration handler - * abstract class that all config handlers should extend. - *

    - * See {@link org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler} for a reference - * implementation. - *

    - * The {@link org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler} and {@link org.apache.lucene.queryparser.flexible.core.config.FieldConfig} are used in the processors to access config - * information in a flexible and independent way. - * See {@link org.apache.lucene.queryparser.flexible.standard.processors.TermRangeQueryNodeProcessor} for a + * + *

    The package org.apache.lucene.queryparser.flexible.config contains query + * configuration handler abstract class that all config handlers should extend. + * + *

    See {@link org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler} + * for a reference implementation. + * + *

    The {@link org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler} and {@link + * org.apache.lucene.queryparser.flexible.core.config.FieldConfig} are used in the processors to + * access config information in a flexible and independent way. See {@link + * org.apache.lucene.queryparser.flexible.standard.processors.TermRangeQueryNodeProcessor} for a * reference implementation. */ package org.apache.lucene.queryparser.flexible.core.config; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/messages/QueryParserMessages.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/messages/QueryParserMessages.java index bf790e901b1..e48d7b65bc3 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/messages/QueryParserMessages.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/messages/QueryParserMessages.java @@ -18,9 +18,7 @@ package org.apache.lucene.queryparser.flexible.core.messages; import org.apache.lucene.queryparser.flexible.messages.NLS; -/** - * Flexible Query Parser message bundle class - */ +/** Flexible Query Parser message bundle class */ public class QueryParserMessages extends NLS { private static final String BUNDLE_NAME = QueryParserMessages.class.getName(); @@ -54,5 +52,4 @@ public class QueryParserMessages extends NLS { public static String NUMBER_CLASS_NOT_SUPPORTED_BY_NUMERIC_RANGE_QUERY; public static String UNSUPPORTED_NUMERIC_DATA_TYPE; public static String NUMERIC_CANNOT_BE_EMPTY; - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/messages/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/messages/package-info.java index 22b8bebf17f..1a5cb7f264c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/messages/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/messages/package-info.java @@ -14,13 +14,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Messages usually used by query parser implementations. - * + * *

    Query Parser Messages

    - * + * * Messages for the Flexible Query Parser, they use org.apache.lucene.messages.NLS API. */ package org.apache.lucene.queryparser.flexible.core.messages; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/AndQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/AndQueryNode.java index f2e3597338a..b7b6c677110 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/AndQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/AndQueryNode.java @@ -16,39 +16,28 @@ */ package org.apache.lucene.queryparser.flexible.core.nodes; +import java.util.List; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; -import java.util.List; - - -/** - * A {@link AndQueryNode} represents an AND boolean operation performed on a - * list of nodes. - */ +/** A {@link AndQueryNode} represents an AND boolean operation performed on a list of nodes. */ public class AndQueryNode extends BooleanQueryNode { - /** - * @param clauses - * - the query nodes to be and'ed - */ + /** @param clauses - the query nodes to be and'ed */ public AndQueryNode(List clauses) { super(clauses); if ((clauses == null) || (clauses.size() == 0)) { - throw new IllegalArgumentException( - "AND query must have at least one clause"); + throw new IllegalArgumentException("AND query must have at least one clause"); } } @Override public String toString() { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); sb.append(""); for (QueryNode child : getChildren()) { sb.append("\n"); sb.append(child.toString()); - } sb.append("\n"); return sb.toString(); @@ -56,8 +45,7 @@ public class AndQueryNode extends BooleanQueryNode { @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); String filler = ""; @@ -67,11 +55,8 @@ public class AndQueryNode extends BooleanQueryNode { } // in case is root or the parent is a group node avoid parenthesis - if ((getParent() != null && getParent() instanceof GroupQueryNode) - || isRoot()) + if ((getParent() != null && getParent() instanceof GroupQueryNode) || isRoot()) return sb.toString(); - else - return "( " + sb.toString() + " )"; + else return "( " + sb.toString() + " )"; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/AnyQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/AnyQueryNode.java index 1e1592eee45..843e328bf20 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/AnyQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/AnyQueryNode.java @@ -16,25 +16,16 @@ */ package org.apache.lucene.queryparser.flexible.core.nodes; +import java.util.List; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; -import java.util.List; - - -/** - * A {@link AnyQueryNode} represents an ANY operator performed on a list of - * nodes. - */ +/** A {@link AnyQueryNode} represents an ANY operator performed on a list of nodes. */ public class AnyQueryNode extends AndQueryNode { private CharSequence field = null; private int minimumMatchingmElements = 0; - /** - * @param clauses - * - the query nodes to be or'ed - */ - public AnyQueryNode(List clauses, CharSequence field, - int minimumMatchingElements) { + /** @param clauses - the query nodes to be or'ed */ + public AnyQueryNode(List clauses, CharSequence field, int minimumMatchingElements) { super(clauses); this.field = field; this.minimumMatchingmElements = minimumMatchingElements; @@ -52,12 +43,9 @@ public class AnyQueryNode extends AndQueryNode { if (clause instanceof FieldableNode) { ((FieldableNode) clause).setField(field); } - } } - } - } public int getMinimumMatchingElements() { @@ -66,7 +54,7 @@ public class AnyQueryNode extends AndQueryNode { /** * returns null if the field was not specified - * + * * @return the field */ public CharSequence getField() { @@ -75,20 +63,15 @@ public class AnyQueryNode extends AndQueryNode { /** * returns - null if the field was not specified - * + * * @return the field as a String */ public String getFieldAsString() { - if (this.field == null) - return null; - else - return this.field.toString(); + if (this.field == null) return null; + else return this.field.toString(); } - /** - * @param field - * - the field to set - */ + /** @param field - the field to set */ public void setField(CharSequence field) { this.field = field; } @@ -106,10 +89,17 @@ public class AnyQueryNode extends AndQueryNode { @Override public String toString() { if (getChildren() == null || getChildren().size() == 0) - return ""; + return ""; StringBuilder sb = new StringBuilder(); - sb.append(" clauses) { setLeaf(false); allocate(); @@ -40,8 +35,7 @@ public class BooleanQueryNode extends QueryNodeImpl { @Override public String toString() { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); sb.append(""); for (QueryNode child : getChildren()) { @@ -54,8 +48,7 @@ public class BooleanQueryNode extends QueryNodeImpl { @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); String filler = ""; @@ -65,11 +58,9 @@ public class BooleanQueryNode extends QueryNodeImpl { } // in case is root or the parent is a group node avoid parenthesis - if ((getParent() != null && getParent() instanceof GroupQueryNode) - || isRoot()) + if ((getParent() != null && getParent() instanceof GroupQueryNode) || isRoot()) return sb.toString(); - else - return "( " + sb.toString() + " )"; + else return "( " + sb.toString() + " )"; } @Override @@ -80,5 +71,4 @@ public class BooleanQueryNode extends QueryNodeImpl { return clone; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/BoostQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/BoostQueryNode.java index d7cce31a32d..42bc83cef88 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/BoostQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/BoostQueryNode.java @@ -17,18 +17,16 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeError; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; /** - * A {@link BoostQueryNode} boosts the QueryNode tree which is under this node. - * So, it must only and always have one child. - * - * The boost value may vary from 0.0 to 1.0. - * + * A {@link BoostQueryNode} boosts the QueryNode tree which is under this node. So, it must only and + * always have one child. + * + *

    The boost value may vary from 0.0 to 1.0. */ public class BoostQueryNode extends QueryNodeImpl { @@ -36,16 +34,14 @@ public class BoostQueryNode extends QueryNodeImpl { /** * Constructs a boost node - * - * @param query - * the query to be boosted - * @param value - * the boost value, it may vary from 0.0 to 1.0 + * + * @param query the query to be boosted + * @param value the boost value, it may vary from 0.0 to 1.0 */ public BoostQueryNode(QueryNode query, float value) { if (query == null) { - throw new QueryNodeError(new MessageImpl( - QueryParserMessages.NODE_ACTION_NOT_SUPPORTED, "query", "null")); + throw new QueryNodeError( + new MessageImpl(QueryParserMessages.NODE_ACTION_NOT_SUPPORTED, "query", "null")); } this.value = value; @@ -56,7 +52,7 @@ public class BoostQueryNode extends QueryNodeImpl { /** * Returns the single child which this node boosts. - * + * * @return the single child which this node boosts */ public QueryNode getChild() { @@ -67,12 +63,11 @@ public class BoostQueryNode extends QueryNodeImpl { } return children.get(0); - } /** * Returns the boost value. It may vary from 0.0 to 1.0. - * + * * @return the boost value */ public float getValue() { @@ -81,30 +76,24 @@ public class BoostQueryNode extends QueryNodeImpl { /** * Returns the boost value parsed to a string. - * + * * @return the parsed value */ private CharSequence getValueString() { Float f = Float.valueOf(this.value); - if (f == f.longValue()) - return "" + f.longValue(); - else - return "" + f; - + if (f == f.longValue()) return "" + f.longValue(); + else return "" + f; } @Override public String toString() { - return "" + "\n" - + getChild().toString() + "\n"; + return "" + "\n" + getChild().toString() + "\n"; } @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChild() == null) - return ""; - return getChild().toQueryString(escapeSyntaxParser) + "^" - + getValueString(); + if (getChild() == null) return ""; + return getChild().toQueryString(escapeSyntaxParser) + "^" + getValueString(); } @Override @@ -115,5 +104,4 @@ public class BoostQueryNode extends QueryNodeImpl { return clone; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/DeletedQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/DeletedQueryNode.java index 51f60f076ca..483737a2ff4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/DeletedQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/DeletedQueryNode.java @@ -17,13 +17,11 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; - import org.apache.lucene.queryparser.flexible.core.processors.RemoveDeletedQueryNodesProcessor; /** - * A {@link DeletedQueryNode} represents a node that was deleted from the query - * node tree. It can be removed from the tree using the - * {@link RemoveDeletedQueryNodesProcessor} processor. + * A {@link DeletedQueryNode} represents a node that was deleted from the query node tree. It can be + * removed from the tree using the {@link RemoveDeletedQueryNodesProcessor} processor. */ public class DeletedQueryNode extends QueryNodeImpl { @@ -46,7 +44,5 @@ public class DeletedQueryNode extends QueryNodeImpl { DeletedQueryNode clone = (DeletedQueryNode) super.cloneTree(); return clone; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldQueryNode.java index e9813c4cc13..92026d8299c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldQueryNode.java @@ -16,59 +16,40 @@ */ package org.apache.lucene.queryparser.flexible.core.nodes; +import java.util.Locale; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; -import java.util.Locale; +/** A {@link FieldQueryNode} represents a element that contains field/text tuple */ +public class FieldQueryNode extends QueryNodeImpl + implements FieldValuePairQueryNode, TextableQueryNode { - -/** - * A {@link FieldQueryNode} represents a element that contains field/text tuple - */ -public class FieldQueryNode extends QueryNodeImpl implements FieldValuePairQueryNode, TextableQueryNode { - - /** - * The term's field - */ + /** The term's field */ protected CharSequence field; - /** - * The term's text. - */ + /** The term's text. */ protected CharSequence text; - /** - * The term's begin position. - */ + /** The term's begin position. */ protected int begin; - /** - * The term's end position. - */ + /** The term's end position. */ protected int end; - /** - * The term's position increment. - */ + /** The term's position increment. */ protected int positionIncrement; /** - * @param field - * - field name - * @param text - * - value - * @param begin - * - position in the query string - * @param end - * - position in the query string + * @param field - field name + * @param text - value + * @param begin - position in the query string + * @param end - position in the query string */ - public FieldQueryNode(CharSequence field, CharSequence text, int begin, - int end) { + public FieldQueryNode(CharSequence field, CharSequence text, int begin, int end) { this.field = field; this.text = text; this.begin = begin; this.end = end; this.setLeaf(true); - } protected CharSequence getTermEscaped(EscapeQuerySyntax escaper) { @@ -90,30 +71,31 @@ public class FieldQueryNode extends QueryNodeImpl implements FieldValuePairQuery @Override public String toString() { - return ""; + return ""; } - /** - * @return the term - */ + /** @return the term */ public String getTextAsString() { - if (this.text == null) - return null; - else - return this.text.toString(); + if (this.text == null) return null; + else return this.text.toString(); } /** * returns null if the field was not specified in the query string - * + * * @return the field */ public String getFieldAsString() { - if (this.field == null) - return null; - else - return this.field.toString(); + if (this.field == null) return null; + else return this.field.toString(); } public int getBegin() { @@ -152,7 +134,7 @@ public class FieldQueryNode extends QueryNodeImpl implements FieldValuePairQuery /** * Returns the term. - * + * * @return The "original" form of the term. */ @Override @@ -160,10 +142,7 @@ public class FieldQueryNode extends QueryNodeImpl implements FieldValuePairQuery return this.text; } - /** - * @param text - * the text to set - */ + /** @param text the text to set */ @Override public void setText(CharSequence text) { this.text = text; @@ -180,7 +159,6 @@ public class FieldQueryNode extends QueryNodeImpl implements FieldValuePairQuery fqn.toQueryStringIgnoreFields = this.toQueryStringIgnoreFields; return fqn; - } @Override @@ -192,5 +170,4 @@ public class FieldQueryNode extends QueryNodeImpl implements FieldValuePairQuery public void setValue(CharSequence value) { setText(value); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldValuePairQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldValuePairQueryNode.java index 85f0bfb87a0..b334a30254d 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldValuePairQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldValuePairQueryNode.java @@ -17,13 +17,11 @@ package org.apache.lucene.queryparser.flexible.core.nodes; /** - * This interface should be implemented by {@link QueryNode} that holds a field - * and an arbitrary value. - * + * This interface should be implemented by {@link QueryNode} that holds a field and an arbitrary + * value. + * * @see FieldableNode * @see ValueQueryNode */ -public interface FieldValuePairQueryNode extends - FieldableNode, ValueQueryNode { - -} +public interface FieldValuePairQueryNode + extends FieldableNode, ValueQueryNode {} diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldableNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldableNode.java index 3ca9b5faf66..203c47f3396 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldableNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldableNode.java @@ -17,28 +17,25 @@ package org.apache.lucene.queryparser.flexible.core.nodes; /** - * A query node implements {@link FieldableNode} interface to indicate that its - * children and itself are associated to a specific field. - * - * If it has any children which also implements this interface, it must ensure - * the children are associated to the same field. - * + * A query node implements {@link FieldableNode} interface to indicate that its children and itself + * are associated to a specific field. + * + *

    If it has any children which also implements this interface, it must ensure the children are + * associated to the same field. */ public interface FieldableNode extends QueryNode { /** * Returns the field associated to the node and every node under it. - * + * * @return the field name */ CharSequence getField(); /** * Associates the node to a field. - * - * @param fieldName - * the field name + * + * @param fieldName the field name */ void setField(CharSequence fieldName); - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FuzzyQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FuzzyQueryNode.java index 8dc3cf07ae4..c69831f9925 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FuzzyQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FuzzyQueryNode.java @@ -18,10 +18,7 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; -/** - * A {@link FuzzyQueryNode} represents a element that contains - * field/text/similarity tuple - */ +/** A {@link FuzzyQueryNode} represents a element that contains field/text/similarity tuple */ public class FuzzyQueryNode extends FieldQueryNode { private float similarity; @@ -29,25 +26,18 @@ public class FuzzyQueryNode extends FieldQueryNode { private int prefixLength; /** - * @param field - * Name of the field query will use. - * @param termStr - * Term token to use for building term for the query + * @param field Name of the field query will use. + * @param termStr Term token to use for building term for the query */ /** - * @param field - * - Field name - * @param term - * - Value - * @param minSimilarity - * - similarity value - * @param begin - * - position in the query string - * @param end - * - position in the query string + * @param field - Field name + * @param term - Value + * @param minSimilarity - similarity value + * @param begin - position in the query string + * @param end - position in the query string */ - public FuzzyQueryNode(CharSequence field, CharSequence term, - float minSimilarity, int begin, int end) { + public FuzzyQueryNode( + CharSequence field, CharSequence term, float minSimilarity, int begin, int end) { super(field, term, begin, end); this.similarity = minSimilarity; setLeaf(true); @@ -72,8 +62,13 @@ public class FuzzyQueryNode extends FieldQueryNode { @Override public String toString() { - return ""; + return ""; } public void setSimilarity(float similarity) { @@ -89,9 +84,7 @@ public class FuzzyQueryNode extends FieldQueryNode { return clone; } - /** - * @return the similarity - */ + /** @return the similarity */ public float getSimilarity() { return this.similarity; } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/GroupQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/GroupQueryNode.java index e28ac029a69..22449e74cdb 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/GroupQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/GroupQueryNode.java @@ -18,28 +18,24 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import java.util.ArrayList; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeError; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; /** - * A {@link GroupQueryNode} represents a location where the original user typed - * real parenthesis on the query string. This class is useful for queries like: - * a) a AND b OR c b) ( a AND b) OR c - * - * Parenthesis might be used to define the boolean operation precedence. + * A {@link GroupQueryNode} represents a location where the original user typed real parenthesis on + * the query string. This class is useful for queries like: a) a AND b OR c b) ( a AND b) OR c + * + *

    Parenthesis might be used to define the boolean operation precedence. */ public class GroupQueryNode extends QueryNodeImpl { - /** - * This QueryNode is used to identify parenthesis on the original query string - */ + /** This QueryNode is used to identify parenthesis on the original query string */ public GroupQueryNode(QueryNode query) { if (query == null) { - throw new QueryNodeError(new MessageImpl( - QueryParserMessages.PARAMETER_VALUE_NOT_SUPPORTED, "query", "null")); + throw new QueryNodeError( + new MessageImpl(QueryParserMessages.PARAMETER_VALUE_NOT_SUPPORTED, "query", "null")); } allocate(); @@ -58,8 +54,7 @@ public class GroupQueryNode extends QueryNodeImpl { @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChild() == null) - return ""; + if (getChild() == null) return ""; return "( " + getChild().toQueryString(escapeSyntaxParser) + " )"; } @@ -76,5 +71,4 @@ public class GroupQueryNode extends QueryNodeImpl { list.add(child); this.set(list); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/MatchAllDocsQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/MatchAllDocsQueryNode.java index 39d06ce9dbe..9668ad16c33 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/MatchAllDocsQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/MatchAllDocsQueryNode.java @@ -19,8 +19,8 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; /** - * A {@link MatchAllDocsQueryNode} indicates that a query node tree or subtree - * will match all documents if executed in the index. + * A {@link MatchAllDocsQueryNode} indicates that a query node tree or subtree will match all + * documents if executed in the index. */ public class MatchAllDocsQueryNode extends QueryNodeImpl { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/MatchNoDocsQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/MatchNoDocsQueryNode.java index d676c4d3972..97df7d082d5 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/MatchNoDocsQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/MatchNoDocsQueryNode.java @@ -17,9 +17,8 @@ package org.apache.lucene.queryparser.flexible.core.nodes; /** - * A {@link MatchNoDocsQueryNode} indicates that a query node tree or subtree - * will not match any documents if executed in the index. - * + * A {@link MatchNoDocsQueryNode} indicates that a query node tree or subtree will not match any + * documents if executed in the index. */ public class MatchNoDocsQueryNode extends DeletedQueryNode { @@ -31,5 +30,4 @@ public class MatchNoDocsQueryNode extends DeletedQueryNode { public String toString() { return ""; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ModifierQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ModifierQueryNode.java index 5e9855af458..ed0fbdeb514 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ModifierQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ModifierQueryNode.java @@ -18,38 +18,40 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import java.util.ArrayList; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; -import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; import org.apache.lucene.queryparser.flexible.core.QueryNodeError; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; +import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; /** - * A {@link ModifierQueryNode} indicates the modifier value (+,-,?,NONE) for - * each term on the query string. For example "+t1 -t2 t3" will have a tree of: + * A {@link ModifierQueryNode} indicates the modifier value (+,-,?,NONE) for each term on the query + * string. For example "+t1 -t2 t3" will have a tree of: + * *

    + * * <BooleanQueryNode> <ModifierQueryNode modifier="MOD_REQ"> <t1/> * </ModifierQueryNode> <ModifierQueryNode modifier="MOD_NOT"> <t2/> * </ModifierQueryNode> <t3/> </BooleanQueryNode> + * *
    */ public class ModifierQueryNode extends QueryNodeImpl { - /** - * Modifier type: such as required (REQ), prohibited (NOT) - */ + /** Modifier type: such as required (REQ), prohibited (NOT) */ public enum Modifier { - MOD_NONE, MOD_NOT, MOD_REQ; + MOD_NONE, + MOD_NOT, + MOD_REQ; @Override public String toString() { switch (this) { - case MOD_NONE: - return "MOD_NONE"; - case MOD_NOT: - return "MOD_NOT"; - case MOD_REQ: - return "MOD_REQ"; + case MOD_NONE: + return "MOD_NONE"; + case MOD_NOT: + return "MOD_NOT"; + case MOD_REQ: + return "MOD_REQ"; } // this code is never executed return "MOD_DEFAULT"; @@ -57,12 +59,12 @@ public class ModifierQueryNode extends QueryNodeImpl { public String toDigitString() { switch (this) { - case MOD_NONE: - return ""; - case MOD_NOT: - return "-"; - case MOD_REQ: - return "+"; + case MOD_NONE: + return ""; + case MOD_NOT: + return "-"; + case MOD_REQ: + return "+"; } // this code is never executed return ""; @@ -70,12 +72,12 @@ public class ModifierQueryNode extends QueryNodeImpl { public String toLargeString() { switch (this) { - case MOD_NONE: - return ""; - case MOD_NOT: - return "NOT "; - case MOD_REQ: - return "+"; + case MOD_NONE: + return ""; + case MOD_NOT: + return "NOT "; + case MOD_REQ: + return "+"; } // this code is never executed return ""; @@ -86,16 +88,14 @@ public class ModifierQueryNode extends QueryNodeImpl { /** * Used to store the modifier value on the original query string - * - * @param query - * - QueryNode subtree - * @param mod - * - Modifier Value + * + * @param query - QueryNode subtree + * @param mod - Modifier Value */ public ModifierQueryNode(QueryNode query, Modifier mod) { if (query == null) { - throw new QueryNodeError(new MessageImpl( - QueryParserMessages.PARAMETER_VALUE_NOT_SUPPORTED, "query", "null")); + throw new QueryNodeError( + new MessageImpl(QueryParserMessages.PARAMETER_VALUE_NOT_SUPPORTED, "query", "null")); } allocate(); @@ -114,14 +114,17 @@ public class ModifierQueryNode extends QueryNodeImpl { @Override public String toString() { - return "" + "\n" - + getChild().toString() + "\n"; + return "" + + "\n" + + getChild().toString() + + "\n"; } @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChild() == null) - return ""; + if (getChild() == null) return ""; String leftParenthensis = ""; String rightParenthensis = ""; @@ -132,11 +135,15 @@ public class ModifierQueryNode extends QueryNodeImpl { } if (getChild() instanceof BooleanQueryNode) { - return this.modifier.toLargeString() + leftParenthensis - + getChild().toQueryString(escapeSyntaxParser) + rightParenthensis; + return this.modifier.toLargeString() + + leftParenthensis + + getChild().toQueryString(escapeSyntaxParser) + + rightParenthensis; } else { - return this.modifier.toDigitString() + leftParenthensis - + getChild().toQueryString(escapeSyntaxParser) + rightParenthensis; + return this.modifier.toDigitString() + + leftParenthensis + + getChild().toQueryString(escapeSyntaxParser) + + rightParenthensis; } } @@ -154,5 +161,4 @@ public class ModifierQueryNode extends QueryNodeImpl { list.add(child); this.set(list); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/NoTokenFoundQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/NoTokenFoundQueryNode.java index e0e73b484ee..695f77eb4d3 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/NoTokenFoundQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/NoTokenFoundQueryNode.java @@ -19,8 +19,8 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; /** - * A {@link NoTokenFoundQueryNode} is used if a term is convert into no tokens - * by the tokenizer/lemmatizer/analyzer (null). + * A {@link NoTokenFoundQueryNode} is used if a term is convert into no tokens by the + * tokenizer/lemmatizer/analyzer (null). */ public class NoTokenFoundQueryNode extends DeletedQueryNode { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/OpaqueQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/OpaqueQueryNode.java index 1c349d7c9cc..0c89b8e282e 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/OpaqueQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/OpaqueQueryNode.java @@ -19,9 +19,9 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; /** - * A {@link OpaqueQueryNode} is used for specify values that are not supposed to - * be parsed by the parser. For example: and XPATH query in the middle of a - * query string a b @xpath:'/bookstore/book[1]/title' c d + * A {@link OpaqueQueryNode} is used for specify values that are not supposed to be parsed by the + * parser. For example: and XPATH query in the middle of a query string a + * b @xpath:'/bookstore/book[1]/title' c d */ public class OpaqueQueryNode extends QueryNodeImpl { @@ -30,17 +30,14 @@ public class OpaqueQueryNode extends QueryNodeImpl { private CharSequence value = null; /** - * @param schema - * - schema identifier - * @param value - * - value that was not parsed + * @param schema - schema identifier + * @param value - value that was not parsed */ public OpaqueQueryNode(CharSequence schema, CharSequence value) { this.setLeaf(true); this.schema = schema; this.value = value; - } @Override @@ -63,18 +60,13 @@ public class OpaqueQueryNode extends QueryNodeImpl { return clone; } - /** - * @return the schema - */ + /** @return the schema */ public CharSequence getSchema() { return this.schema; } - /** - * @return the value - */ + /** @return the value */ public CharSequence getValue() { return this.value; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/OrQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/OrQueryNode.java index 93525689fb2..5aada599976 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/OrQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/OrQueryNode.java @@ -18,38 +18,27 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import java.util.Iterator; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; -/** - * A {@link OrQueryNode} represents an OR boolean operation performed on a list - * of nodes. - * - */ +/** A {@link OrQueryNode} represents an OR boolean operation performed on a list of nodes. */ public class OrQueryNode extends BooleanQueryNode { - /** - * @param clauses - * - the query nodes to be or'ed - */ + /** @param clauses - the query nodes to be or'ed */ public OrQueryNode(List clauses) { super(clauses); if ((clauses == null) || (clauses.size() == 0)) { - throw new IllegalArgumentException( - "OR query must have at least one clause"); + throw new IllegalArgumentException("OR query must have at least one clause"); } } @Override public String toString() { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); sb.append(""); for (QueryNode child : getChildren()) { sb.append("\n"); sb.append(child.toString()); - } sb.append("\n"); return sb.toString(); @@ -57,21 +46,18 @@ public class OrQueryNode extends BooleanQueryNode { @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); String filler = ""; - for (Iterator it = getChildren().iterator(); it.hasNext();) { + for (Iterator it = getChildren().iterator(); it.hasNext(); ) { sb.append(filler).append(it.next().toQueryString(escapeSyntaxParser)); filler = " OR "; } // in case is root or the parent is a group node avoid parenthesis - if ((getParent() != null && getParent() instanceof GroupQueryNode) - || isRoot()) + if ((getParent() != null && getParent() instanceof GroupQueryNode) || isRoot()) return sb.toString(); - else - return "( " + sb.toString() + " )"; + else return "( " + sb.toString() + " )"; } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java index 586044c94d9..5eeda6038f4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java @@ -19,49 +19,39 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import java.util.ArrayList; import java.util.List; import java.util.Locale; - import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax.Type; /** - * A {@link PathQueryNode} is used to store queries like - * /company/USA/California /product/shoes/brown. QueryText are objects that - * contain the text, begin position and end position in the query. - *

    - * Example how the text parser creates these objects: - *

    + * A {@link PathQueryNode} is used to store queries like /company/USA/California + * /product/shoes/brown. QueryText are objects that contain the text, begin position and end + * position in the query. + * + *

    Example how the text parser creates these objects: + * *

    - * List values = ArrayList(); 
    - * values.add(new PathQueryNode.QueryText("company", 1, 7)); 
    - * values.add(new PathQueryNode.QueryText("USA", 9, 12)); 
    - * values.add(new PathQueryNode.QueryText("California", 14, 23)); 
    + * List values = ArrayList();
    + * values.add(new PathQueryNode.QueryText("company", 1, 7));
    + * values.add(new PathQueryNode.QueryText("USA", 9, 12));
    + * values.add(new PathQueryNode.QueryText("California", 14, 23));
      * QueryNode q = new PathQueryNode(values);
      * 
    */ public class PathQueryNode extends QueryNodeImpl { - /** - * Term text with a beginning and end position - */ + /** Term text with a beginning and end position */ public static class QueryText implements Cloneable { CharSequence value = null; - /** - * != null The term's begin position. - */ + /** != null The term's begin position. */ int begin; - /** - * The term's end position. - */ + /** The term's end position. */ int end; /** - * @param value - * - text value - * @param begin - * - position in the query string - * @param end - * - position in the query string + * @param value - text value + * @param begin - position in the query string + * @param end - position in the query string */ public QueryText(CharSequence value, int begin, int end) { super(); @@ -79,23 +69,17 @@ public class PathQueryNode extends QueryNodeImpl { return clone; } - /** - * @return the value - */ + /** @return the value */ public CharSequence getValue() { return value; } - /** - * @return the begin - */ + /** @return the begin */ public int getBegin() { return begin; } - /** - * @return the end - */ + /** @return the end */ public int getEnd() { return end; } @@ -108,38 +92,32 @@ public class PathQueryNode extends QueryNodeImpl { private List values = null; - /** - * @param pathElements - * - List of QueryText objects - */ + /** @param pathElements - List of QueryText objects */ public PathQueryNode(List pathElements) { this.values = pathElements; if (pathElements.size() <= 1) { // this should not happen - throw new RuntimeException( - "PathQuerynode requires more 2 or more path elements."); + throw new RuntimeException("PathQuerynode requires more 2 or more path elements."); } } /** * Returns the a List with all QueryText elements - * + * * @return QueryText List size */ public List getPathElements() { return values; } - /** - * Returns the a List with all QueryText elements - */ + /** Returns the a List with all QueryText elements */ public void setPathElements(List elements) { this.values = elements; } /** * Returns the a specific QueryText element - * + * * @return QueryText List size */ public QueryText getPathElement(int index) { @@ -148,7 +126,7 @@ public class PathQueryNode extends QueryNodeImpl { /** * Returns the CharSequence value of a specific QueryText element - * + * * @return the CharSequence for a specific QueryText element */ public CharSequence getFirstPathElement() { @@ -157,7 +135,7 @@ public class PathQueryNode extends QueryNodeImpl { /** * Returns a List QueryText element from position startIndex - * + * * @return a List QueryText element from position startIndex */ public List getPathElements(int startIndex) { @@ -187,8 +165,7 @@ public class PathQueryNode extends QueryNodeImpl { path.append("/").append(getFirstPathElement()); for (QueryText pathelement : getPathElements(1)) { - CharSequence value = escaper.escape(pathelement.value, Locale - .getDefault(), Type.STRING); + CharSequence value = escaper.escape(pathelement.value, Locale.getDefault(), Type.STRING); path.append("/\"").append(value).append("\""); } return path.toString(); @@ -198,8 +175,13 @@ public class PathQueryNode extends QueryNodeImpl { public String toString() { QueryText text = this.values.get(0); - return ""; + return ""; } @Override @@ -217,5 +199,4 @@ public class PathQueryNode extends QueryNodeImpl { return clone; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PhraseSlopQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PhraseSlopQueryNode.java index ec669449006..70288effc57 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PhraseSlopQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PhraseSlopQueryNode.java @@ -16,26 +16,22 @@ */ package org.apache.lucene.queryparser.flexible.core.nodes; -import org.apache.lucene.search.PhraseQuery; // javadocs -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeError; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; +import org.apache.lucene.search.PhraseQuery; // javadocs -/** - * Query node for {@link PhraseQuery}'s slop factor. - */ +/** Query node for {@link PhraseQuery}'s slop factor. */ public class PhraseSlopQueryNode extends QueryNodeImpl implements FieldableNode { private int value = 0; - /** - * @exception QueryNodeError throw in overridden method to disallow - */ + /** @exception QueryNodeError throw in overridden method to disallow */ public PhraseSlopQueryNode(QueryNode query, int value) { if (query == null) { - throw new QueryNodeError(new MessageImpl( - QueryParserMessages.NODE_ACTION_NOT_SUPPORTED, "query", "null")); + throw new QueryNodeError( + new MessageImpl(QueryParserMessages.NODE_ACTION_NOT_SUPPORTED, "query", "null")); } this.value = value; @@ -54,25 +50,24 @@ public class PhraseSlopQueryNode extends QueryNodeImpl implements FieldableNode private CharSequence getValueString() { Float f = Float.valueOf(this.value); - if (f == f.longValue()) - return "" + f.longValue(); - else - return "" + f; - + if (f == f.longValue()) return "" + f.longValue(); + else return "" + f; } @Override public String toString() { - return "" + "\n" - + getChild().toString() + "\n"; + return "" + + "\n" + + getChild().toString() + + "\n"; } @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChild() == null) - return ""; - return getChild().toQueryString(escapeSyntaxParser) + "~" - + getValueString(); + if (getChild() == null) return ""; + return getChild().toQueryString(escapeSyntaxParser) + "~" + getValueString(); } @Override @@ -93,7 +88,6 @@ public class PhraseSlopQueryNode extends QueryNodeImpl implements FieldableNode } return null; - } @Override @@ -103,7 +97,5 @@ public class PhraseSlopQueryNode extends QueryNodeImpl implements FieldableNode if (child instanceof FieldableNode) { ((FieldableNode) child).setField(fieldName); } - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ProximityQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ProximityQueryNode.java index 9dfd65b4daf..a3596c06ff7 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ProximityQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ProximityQueryNode.java @@ -17,43 +17,46 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; -import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; import org.apache.lucene.queryparser.flexible.core.QueryNodeError; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; +import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; /** - * A {@link ProximityQueryNode} represents a query where the terms should meet - * specific distance conditions. (a b c) WITHIN [SENTENCE|PARAGRAPH|NUMBER] - * [INORDER] ("a" "b" "c") WITHIN [SENTENCE|PARAGRAPH|NUMBER] [INORDER] - * - * TODO: Add this to the future standard Lucene parser/processor/builder + * A {@link ProximityQueryNode} represents a query where the terms should meet specific distance + * conditions. (a b c) WITHIN [SENTENCE|PARAGRAPH|NUMBER] [INORDER] ("a" "b" "c") WITHIN + * [SENTENCE|PARAGRAPH|NUMBER] [INORDER] + * + *

    TODO: Add this to the future standard Lucene parser/processor/builder */ public class ProximityQueryNode extends BooleanQueryNode { - /** - * Distance condition: PARAGRAPH, SENTENCE, or NUMBER - */ + /** Distance condition: PARAGRAPH, SENTENCE, or NUMBER */ public enum Type { PARAGRAPH { @Override - CharSequence toQueryString() { return "WITHIN PARAGRAPH"; } + CharSequence toQueryString() { + return "WITHIN PARAGRAPH"; + } }, - SENTENCE { + SENTENCE { @Override - CharSequence toQueryString() { return "WITHIN SENTENCE"; } + CharSequence toQueryString() { + return "WITHIN SENTENCE"; + } }, - NUMBER { + NUMBER { @Override - CharSequence toQueryString() { return "WITHIN"; } + CharSequence toQueryString() { + return "WITHIN"; + } }; abstract CharSequence toQueryString(); } /** utility class containing the distance condition and number */ - static public class ProximityType { + public static class ProximityType { int pDistance = 0; Type pType = null; @@ -74,20 +77,14 @@ public class ProximityQueryNode extends BooleanQueryNode { private CharSequence field = null; /** - * @param clauses - * - QueryNode children - * @param field - * - field name - * @param type - * - type of proximity query - * @param distance - * - positive integer that specifies the distance - * @param inorder - * - true, if the tokens should be matched in the order of the - * clauses + * @param clauses - QueryNode children + * @param field - field name + * @param type - type of proximity query + * @param distance - positive integer that specifies the distance + * @param inorder - true, if the tokens should be matched in the order of the clauses */ - public ProximityQueryNode(List clauses, CharSequence field, - Type type, int distance, boolean inorder) { + public ProximityQueryNode( + List clauses, CharSequence field, Type type, int distance, boolean inorder) { super(clauses); setLeaf(false); this.proximityType = type; @@ -95,37 +92,30 @@ public class ProximityQueryNode extends BooleanQueryNode { this.field = field; if (type == Type.NUMBER) { if (distance <= 0) { - throw new QueryNodeError(new MessageImpl( - QueryParserMessages.PARAMETER_VALUE_NOT_SUPPORTED, "distance", - distance)); + throw new QueryNodeError( + new MessageImpl( + QueryParserMessages.PARAMETER_VALUE_NOT_SUPPORTED, "distance", distance)); } else { this.distance = distance; } - } clearFields(clauses, field); } /** - * @param clauses - * - QueryNode children - * @param field - * - field name - * @param type - * - type of proximity query - * @param inorder - * - true, if the tokens should be matched in the order of the - * clauses + * @param clauses - QueryNode children + * @param field - field name + * @param type - type of proximity query + * @param inorder - true, if the tokens should be matched in the order of the clauses */ - public ProximityQueryNode(List clauses, CharSequence field, - Type type, boolean inorder) { + public ProximityQueryNode( + List clauses, CharSequence field, Type type, boolean inorder) { this(clauses, field, type, -1, inorder); } - static private void clearFields(List nodes, CharSequence field) { - if (nodes == null || nodes.size() == 0) - return; + private static void clearFields(List nodes, CharSequence field) { + if (nodes == null || nodes.size() == 0) return; for (QueryNode clause : nodes) { @@ -142,15 +132,28 @@ public class ProximityQueryNode extends BooleanQueryNode { @Override public String toString() { - String distanceSTR = ((this.distance == -1) ? ("") - : (" distance='" + this.distance) + "'"); + String distanceSTR = ((this.distance == -1) ? ("") : (" distance='" + this.distance) + "'"); if (getChildren() == null || getChildren().size() == 0) - return ""; StringBuilder sb = new StringBuilder(); - sb.append(""); + sb.append(""); for (QueryNode child : getChildren()) { sb.append("\n"); sb.append(child.toString()); @@ -161,9 +164,10 @@ public class ProximityQueryNode extends BooleanQueryNode { @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - String withinSTR = this.proximityType.toQueryString() - + ((this.distance == -1) ? ("") : (" " + this.distance)) - + ((this.inorder) ? (" INORDER") : ("")); + String withinSTR = + this.proximityType.toQueryString() + + ((this.distance == -1) ? ("") : (" " + this.distance)) + + ((this.inorder) ? (" INORDER") : ("")); StringBuilder sb = new StringBuilder(); if (getChildren() == null || getChildren().size() == 0) { @@ -194,16 +198,14 @@ public class ProximityQueryNode extends BooleanQueryNode { return clone; } - /** - * @return the distance - */ + /** @return the distance */ public int getDistance() { return this.distance; } /** * returns null if the field was not specified in the query string - * + * * @return the field */ public CharSequence getField() { @@ -212,29 +214,21 @@ public class ProximityQueryNode extends BooleanQueryNode { /** * returns null if the field was not specified in the query string - * + * * @return the field */ public String getFieldAsString() { - if (this.field == null) - return null; - else - return this.field.toString(); + if (this.field == null) return null; + else return this.field.toString(); } - /** - * @param field - * the field to set - */ + /** @param field the field to set */ public void setField(CharSequence field) { this.field = field; } - /** - * @return terms must be matched in the specified order - */ + /** @return terms must be matched in the specified order */ public boolean isInOrder() { return this.inorder; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNode.java index 4f15f7c7c91..859503bedb4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNode.java @@ -16,16 +16,11 @@ */ package org.apache.lucene.queryparser.flexible.core.nodes; -import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; - import java.util.List; import java.util.Map; +import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; - -/** - * A {@link QueryNode} is a interface implemented by all nodes on a QueryNode - * tree. - */ +/** A {@link QueryNode} is a interface implemented by all nodes on a QueryNode tree. */ public interface QueryNode { /** convert to a query string understood by the query parser */ @@ -44,18 +39,16 @@ public interface QueryNode { /** verify if a node contains a tag */ public boolean containsTag(String tagName); - - /** - * Returns object stored under that tag name - */ + + /** Returns object stored under that tag name */ public Object getTag(String tagName); - + public QueryNode getParent(); /** - * Recursive clone the QueryNode tree The tags are not copied to the new tree - * when you call the cloneTree() method - * + * Recursive clone the QueryNode tree The tags are not copied to the new tree when you call the + * cloneTree() method + * * @return the cloned tree */ public QueryNode cloneTree() throws CloneNotSupportedException; @@ -72,32 +65,28 @@ public interface QueryNode { public void set(List children); /** - * Associate the specified value with the specified tagName. If the tagName - * already exists, the old value is replaced. The tagName and value cannot be - * null. tagName will be converted to lowercase. + * Associate the specified value with the specified tagName. If the tagName already exists, the + * old value is replaced. The tagName and value cannot be null. tagName will be converted to + * lowercase. */ public void setTag(String tagName, Object value); - - /** - * Unset a tag. tagName will be converted to lowercase. - */ + + /** Unset a tag. tagName will be converted to lowercase. */ public void unsetTag(String tagName); - + /** - * Returns a map containing all tags attached to this query node. - * + * Returns a map containing all tags attached to this query node. + * * @return a map containing all tags attached to this query node */ public Map getTagMap(); - /** - * Removes this query node from its parent. - */ + /** Removes this query node from its parent. */ public void removeFromParent(); - /** * Remove a child node + * * @param childNode Which child to remove */ public void removeChildren(QueryNode childNode); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java index 6f70ec09575..59b3d74d37c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java @@ -23,15 +23,11 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.ResourceBundle; - -import org.apache.lucene.queryparser.flexible.messages.NLS; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.util.StringUtils; +import org.apache.lucene.queryparser.flexible.messages.NLS; -/** - * A {@link QueryNodeImpl} is the default implementation of the interface - * {@link QueryNode} - */ +/** A {@link QueryNodeImpl} is the default implementation of the interface {@link QueryNode} */ public abstract class QueryNodeImpl implements QueryNode, Cloneable { /* index default field */ @@ -52,34 +48,31 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { } else { this.clauses.clear(); } - } @Override public final void add(QueryNode child) { if (isLeaf() || this.clauses == null || child == null) { - throw new IllegalArgumentException(NLS - .getLocalizedMessage(QueryParserMessages.NODE_ACTION_NOT_SUPPORTED)); + throw new IllegalArgumentException( + NLS.getLocalizedMessage(QueryParserMessages.NODE_ACTION_NOT_SUPPORTED)); } this.clauses.add(child); ((QueryNodeImpl) child).setParent(this); - } @Override public final void add(List children) { if (isLeaf() || this.clauses == null) { - throw new IllegalArgumentException(NLS - .getLocalizedMessage(QueryParserMessages.NODE_ACTION_NOT_SUPPORTED)); + throw new IllegalArgumentException( + NLS.getLocalizedMessage(QueryParserMessages.NODE_ACTION_NOT_SUPPORTED)); } for (QueryNode child : children) { add(child); } - } @Override @@ -91,28 +84,27 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { public final void set(List children) { if (isLeaf() || this.clauses == null) { - ResourceBundle bundle = ResourceBundle - .getBundle("org.apache.lucene.queryParser.messages.QueryParserMessages", Locale.getDefault()); - String message = bundle.getObject("Q0008E.NODE_ACTION_NOT_SUPPORTED") - .toString(); + ResourceBundle bundle = + ResourceBundle.getBundle( + "org.apache.lucene.queryParser.messages.QueryParserMessages", Locale.getDefault()); + String message = bundle.getObject("Q0008E.NODE_ACTION_NOT_SUPPORTED").toString(); throw new IllegalArgumentException(message); - } // reset parent value for (QueryNode child : children) { child.removeFromParent(); } - + ArrayList existingChildren = new ArrayList<>(getChildren()); for (QueryNode existingChild : existingChildren) { existingChild.removeFromParent(); } - + // allocate new children list allocate(); - + // add new children and set parent add(children); } @@ -147,8 +139,8 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { } /** - * @return a List for QueryNode object. Returns null, for nodes that do not - * contain children. All leaf Nodes return null. + * @return a List for QueryNode object. Returns null, for nodes that do not contain children. All + * leaf Nodes return null. */ @Override public final List getChildren() { @@ -197,14 +189,12 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { return getParent() == null; } - /** - * If set to true the method toQueryString will not write field names - */ + /** If set to true the method toQueryString will not write field names */ protected boolean toQueryStringIgnoreFields = false; /** * This method is use toQueryString to detect if fld is the default field - * + * * @param fld - field name * @return true if fld is the default field */ @@ -214,20 +204,17 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { // #toQueryString(org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax)} // should receive the default field value directly by parameter protected boolean isDefaultField(CharSequence fld) { - if (this.toQueryStringIgnoreFields) - return true; - if (fld == null) - return true; - if (QueryNodeImpl.PLAINTEXT_FIELD_NAME.equals(StringUtils.toString(fld))) - return true; + if (this.toQueryStringIgnoreFields) return true; + if (fld == null) return true; + if (QueryNodeImpl.PLAINTEXT_FIELD_NAME.equals(StringUtils.toString(fld))) return true; return false; } /** * Every implementation of this class should return pseudo xml like this: - * - * For FieldQueryNode: <field start='1' end='2' field='subject' text='foo'/> - * + * + *

    For FieldQueryNode: <field start='1' end='2' field='subject' text='foo'/> + * * @see org.apache.lucene.queryparser.flexible.core.nodes.QueryNode#toString() */ @Override @@ -237,7 +224,7 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { /** * Returns a map containing all tags attached to this query node. - * + * * @return a map containing all tags attached to this query node */ @Override @@ -247,10 +234,10 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { } @Override - public void removeChildren(QueryNode childNode){ + public void removeChildren(QueryNode childNode) { Iterator it = this.clauses.iterator(); - while(it.hasNext()){ - if(it.next() == childNode){ + while (it.hasNext()) { + if (it.next() == childNode) { it.remove(); } } @@ -265,5 +252,4 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { parent.removeChildren(this); } } - } // end class QueryNodeImpl diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QuotedFieldQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QuotedFieldQueryNode.java index d2f9a2bb62d..5828cfa8e71 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QuotedFieldQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QuotedFieldQueryNode.java @@ -18,24 +18,16 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; -/** - * A {@link QuotedFieldQueryNode} represents phrase query. Example: - * "life is great" - */ +/** A {@link QuotedFieldQueryNode} represents phrase query. Example: "life is great" */ public class QuotedFieldQueryNode extends FieldQueryNode { /** - * @param field - * - field name - * @param text - * - value - * @param begin - * - position in the query string - * @param end - * - position in the query string + * @param field - field name + * @param text - value + * @param begin - position in the query string + * @param end - position in the query string */ - public QuotedFieldQueryNode(CharSequence field, CharSequence text, int begin, - int end) { + public QuotedFieldQueryNode(CharSequence field, CharSequence text, int begin, int end) { super(field, text, begin, end); } @@ -50,8 +42,15 @@ public class QuotedFieldQueryNode extends FieldQueryNode { @Override public String toString() { - return ""; + return ""; } @Override @@ -60,5 +59,4 @@ public class QuotedFieldQueryNode extends FieldQueryNode { // nothing to do here return clone; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/RangeQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/RangeQueryNode.java index c9f804e77b3..15eeb4bef36 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/RangeQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/RangeQueryNode.java @@ -17,19 +17,16 @@ package org.apache.lucene.queryparser.flexible.core.nodes; /** - * This interface should be implemented by a {@link QueryNode} that represents - * some kind of range query. - * + * This interface should be implemented by a {@link QueryNode} that represents some kind of range + * query. */ -public interface RangeQueryNode> extends - FieldableNode { +public interface RangeQueryNode> extends FieldableNode { T getLowerBound(); - + T getUpperBound(); - + boolean isLowerInclusive(); - + boolean isUpperInclusive(); - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/SlopQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/SlopQueryNode.java index 16e19e395ac..0ad088dba7f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/SlopQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/SlopQueryNode.java @@ -16,35 +16,32 @@ */ package org.apache.lucene.queryparser.flexible.core.nodes; -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; -import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; import org.apache.lucene.queryparser.flexible.core.QueryNodeError; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; +import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; /** * A {@link SlopQueryNode} represents phrase query with a slop. - * - * From Lucene FAQ: Is there a way to use a proximity operator (like near or - * within) with Lucene? There is a variable called slop that allows you to - * perform NEAR/WITHIN-like queries. By default, slop is set to 0 so that only - * exact phrases will match. When using TextParser you can use this syntax to - * specify the slop: "doug cutting"~2 will find documents that contain - * "doug cutting" as well as ones that contain "cutting doug". + * + *

    From Lucene FAQ: Is there a way to use a proximity operator (like near or within) with Lucene? + * There is a variable called slop that allows you to perform NEAR/WITHIN-like queries. By default, + * slop is set to 0 so that only exact phrases will match. When using TextParser you can use this + * syntax to specify the slop: "doug cutting"~2 will find documents that contain "doug cutting" as + * well as ones that contain "cutting doug". */ public class SlopQueryNode extends QueryNodeImpl implements FieldableNode { private int value = 0; /** - * @param query - * - QueryNode Tree with the phrase - * @param value - * - slop value + * @param query - QueryNode Tree with the phrase + * @param value - slop value */ public SlopQueryNode(QueryNode query, int value) { if (query == null) { - throw new QueryNodeError(new MessageImpl( - QueryParserMessages.NODE_ACTION_NOT_SUPPORTED, "query", "null")); + throw new QueryNodeError( + new MessageImpl(QueryParserMessages.NODE_ACTION_NOT_SUPPORTED, "query", "null")); } this.value = value; @@ -63,25 +60,19 @@ public class SlopQueryNode extends QueryNodeImpl implements FieldableNode { private CharSequence getValueString() { Float f = Float.valueOf(this.value); - if (f == f.longValue()) - return "" + f.longValue(); - else - return "" + f; - + if (f == f.longValue()) return "" + f.longValue(); + else return "" + f; } @Override public String toString() { - return "" + "\n" - + getChild().toString() + "\n"; + return "" + "\n" + getChild().toString() + "\n"; } @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChild() == null) - return ""; - return getChild().toQueryString(escapeSyntaxParser) + "~" - + getValueString(); + if (getChild() == null) return ""; + return getChild().toQueryString(escapeSyntaxParser) + "~" + getValueString(); } @Override @@ -102,7 +93,6 @@ public class SlopQueryNode extends QueryNodeImpl implements FieldableNode { } return null; - } @Override @@ -112,7 +102,5 @@ public class SlopQueryNode extends QueryNodeImpl implements FieldableNode { if (child instanceof FieldableNode) { ((FieldableNode) child).setField(fieldName); } - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/TextableQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/TextableQueryNode.java index 57a3c173197..642e5fcdd8f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/TextableQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/TextableQueryNode.java @@ -16,13 +16,10 @@ */ package org.apache.lucene.queryparser.flexible.core.nodes; -/** - * Interface for a node that has text as a {@link CharSequence} - */ +/** Interface for a node that has text as a {@link CharSequence} */ public interface TextableQueryNode { CharSequence getText(); void setText(CharSequence text); - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/TokenizedPhraseQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/TokenizedPhraseQueryNode.java index b48ab54ca98..f5a7b0acab0 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/TokenizedPhraseQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/TokenizedPhraseQueryNode.java @@ -17,15 +17,13 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; /** * A {@link TokenizedPhraseQueryNode} represents a node created by a code that * tokenizes/lemmatizes/analyzes. */ -public class TokenizedPhraseQueryNode extends QueryNodeImpl implements - FieldableNode { +public class TokenizedPhraseQueryNode extends QueryNodeImpl implements FieldableNode { public TokenizedPhraseQueryNode() { setLeaf(false); @@ -34,8 +32,7 @@ public class TokenizedPhraseQueryNode extends QueryNodeImpl implements @Override public String toString() { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); sb.append(""); for (QueryNode child : getChildren()) { @@ -49,8 +46,7 @@ public class TokenizedPhraseQueryNode extends QueryNodeImpl implements // This text representation is not re-parseable @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); String filler = ""; @@ -64,8 +60,7 @@ public class TokenizedPhraseQueryNode extends QueryNodeImpl implements @Override public QueryNode cloneTree() throws CloneNotSupportedException { - TokenizedPhraseQueryNode clone = (TokenizedPhraseQueryNode) super - .cloneTree(); + TokenizedPhraseQueryNode clone = (TokenizedPhraseQueryNode) super.cloneTree(); // nothing to do @@ -82,7 +77,6 @@ public class TokenizedPhraseQueryNode extends QueryNodeImpl implements } else { return ((FieldableNode) children.get(0)).getField(); } - } @Override @@ -96,11 +90,7 @@ public class TokenizedPhraseQueryNode extends QueryNodeImpl implements if (child instanceof FieldableNode) { ((FieldableNode) child).setField(fieldName); } - } - } - } - } // end class MultitermQueryNode diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ValueQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ValueQueryNode.java index 38100300242..b9eda590c0f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ValueQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ValueQueryNode.java @@ -16,14 +16,10 @@ */ package org.apache.lucene.queryparser.flexible.core.nodes; -/** - * This interface should be implemented by {@link QueryNode} that holds an - * arbitrary value. - */ +/** This interface should be implemented by {@link QueryNode} that holds an arbitrary value. */ public interface ValueQueryNode extends QueryNode { - + public void setValue(T value); - + public T getValue(); - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/package-info.java index ee3d67f924a..b9e185e2882 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/package-info.java @@ -14,65 +14,73 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Query nodes commonly used by query parser implementations. * *

    Query Nodes

    - *

    - * The package org.apache.lucene.queryParser.nodes contains all the basic query nodes. The interface - * that represents a query node is {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode}. - *

    - * {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode}s are used by the text parser to create a syntax tree. - * These nodes are designed to be used by UI or other text parsers. - * The default Lucene text parser is {@link org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser}, - * it implements Lucene's standard syntax. - *

    - * {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} interface should be implemented by all query nodes, - * the class {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNodeImpl} implements {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} and is extended - * by all current query node implementations. - *

    - * A query node tree can be printed to the a stream, and it generates a pseudo XML representation + * + *

    The package org.apache.lucene.queryParser.nodes contains all the basic query + * nodes. The interface that represents a query node is {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode}. + * + *

    {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode}s are used by the text + * parser to create a syntax tree. These nodes are designed to be used by UI or other text parsers. + * The default Lucene text parser is {@link + * org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser}, it implements + * Lucene's standard syntax. + * + *

    {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} interface should be + * implemented by all query nodes, the class {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNodeImpl} implements {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} and is extended by all current query + * node implementations. + * + *

    A query node tree can be printed to the a stream, and it generates a pseudo XML representation * with all the nodes. - *

    - *

    - * A query node tree can also generate a query string that can be parsed back by the original text parser, - * at this point only the standard lucene syntax is supported. - *

    - * Grouping nodes: + * + *

    A query node tree can also generate a query string that can be parsed back by the original + * text parser, at this point only the standard lucene syntax is supported. + * + *

    Grouping nodes: + * *

      - *
    • AndQueryNode - used for AND operator
    • - *
    • AnyQueryNode - used for ANY operator
    • - *
    • OrQueryNode - used for OR operator
    • - *
    • BooleanQueryNode - used when no operator is specified
    • - *
    • ModifierQueryNode - used for modifier operator
    • - *
    • GroupQueryNode - used for parenthesis
    • - *
    • BoostQueryNode - used for boost operator
    • - *
    • SlopQueryNode - phrase slop
    • - *
    • FuzzyQueryNode - fuzzy node
    • - *
    • TermRangeQueryNode - used for parametric field:[low_value TO high_value]
    • - *
    • ProximityQueryNode - used for proximity search
    • - *
    • LegacyNumericRangeQueryNode - used for numeric range search
    • - *
    • TokenizedPhraseQueryNode - used by tokenizers/lemmatizers/analyzers for phrases/autophrases
    • + *
    • AndQueryNode - used for AND operator + *
    • AnyQueryNode - used for ANY operator + *
    • OrQueryNode - used for OR operator + *
    • BooleanQueryNode - used when no operator is specified + *
    • ModifierQueryNode - used for modifier operator + *
    • GroupQueryNode - used for parenthesis + *
    • BoostQueryNode - used for boost operator + *
    • SlopQueryNode - phrase slop + *
    • FuzzyQueryNode - fuzzy node + *
    • TermRangeQueryNode - used for parametric field:[low_value TO high_value] + *
    • ProximityQueryNode - used for proximity search + *
    • LegacyNumericRangeQueryNode - used for numeric range search + *
    • TokenizedPhraseQueryNode - used by tokenizers/lemmatizers/analyzers for phrases/autophrases *
    - *

    - * Leaf Nodes: + * + *

    Leaf Nodes: + * *

      - *
    • FieldQueryNode - field/value node
    • - *
    • LegacyNumericQueryNode - used for numeric search
    • - *
    • PathQueryNode - {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} object used with path-like queries
    • - *
    • OpaqueQueryNode - Used as for part of the query that can be parsed by other parsers. schema/value
    • - *
    • PrefixWildcardQueryNode - non-phrase wildcard query
    • - *
    • QuotedFieldQUeryNode - regular phrase node
    • - *
    • WildcardQueryNode - non-phrase wildcard query
    • + *
    • FieldQueryNode - field/value node + *
    • LegacyNumericQueryNode - used for numeric search + *
    • PathQueryNode - {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} object + * used with path-like queries + *
    • OpaqueQueryNode - Used as for part of the query that can be parsed by other parsers. + * schema/value + *
    • PrefixWildcardQueryNode - non-phrase wildcard query + *
    • QuotedFieldQUeryNode - regular phrase node + *
    • WildcardQueryNode - non-phrase wildcard query *
    - *

    - * Utility Nodes: + * + *

    Utility Nodes: + * *

      - *
    • DeletedQueryNode - used by processors on optimizations
    • - *
    • MatchAllDocsQueryNode - used by processors on optimizations
    • - *
    • MatchNoDocsQueryNode - used by processors on optimizations
    • - *
    • NoTokenFoundQueryNode - used by tokenizers/lemmatizers/analyzers
    • + *
    • DeletedQueryNode - used by processors on optimizations + *
    • MatchAllDocsQueryNode - used by processors on optimizations + *
    • MatchNoDocsQueryNode - used by processors on optimizations + *
    • NoTokenFoundQueryNode - used by tokenizers/lemmatizers/analyzers *
    */ package org.apache.lucene.queryparser.flexible.core.nodes; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/package-info.java index dd8c4cd63ca..b5e1ffe35ec 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/package-info.java @@ -14,35 +14,41 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Core classes of the flexible query parser framework. * *

    Flexible Query Parser

    - * - *

    - * This package contains the necessary classes to implement a query parser. - * - *

    - * A query parser is divided in at least 2 phases, text parsing and query building, and one optional phase called query processing. - * + * + *

    This package contains the necessary classes to implement a query parser. + * + *

    A query parser is divided in at least 2 phases, text parsing and query building, and one + * optional phase called query processing. + * *

    First Phase: Text Parsing

    - *

    - * The text parsing phase is performed by a text parser, which implements {@link org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser} interface. - * A text parser is responsible to get a query string and convert it to a {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree, - * which is an object structure that represents the elements defined in the query string. - * + * + *

    The text parsing phase is performed by a text parser, which implements {@link + * org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser} interface. A text parser is + * responsible to get a query string and convert it to a {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree, which is an object structure + * that represents the elements defined in the query string. + * *

    Second (optional) Phase: Query Processing

    - *

    - * The query processing phase is performed by a query processor, which implements {@link org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor}. - * A query processor is responsible to perform any processing on a {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree. This phase - * is optional and is used only if an extra processing, validation, query expansion, etc needs to be performed in a {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree. - * The {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree can be either be generated by a text parser or programmatically created. - * + * + *

    The query processing phase is performed by a query processor, which implements {@link + * org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor}. A query processor is + * responsible to perform any processing on a {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree. This phase is optional and is + * used only if an extra processing, validation, query expansion, etc needs to be performed in a + * {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree. The {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree can be either be generated by a + * text parser or programmatically created. + * *

    Third Phase: Query Building

    - *

    - * The query building phase is performed by a query builder, which implements {@link org.apache.lucene.queryparser.flexible.core.builders.QueryBuilder}. - * A query builder is responsible to convert a {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree into an arbitrary object, which - * is usually used to be executed against a search index. + * + *

    The query building phase is performed by a query builder, which implements {@link + * org.apache.lucene.queryparser.flexible.core.builders.QueryBuilder}. A query builder is + * responsible to convert a {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree + * into an arbitrary object, which is usually used to be executed against a search index. */ package org.apache.lucene.queryparser.flexible.core; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/EscapeQuerySyntax.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/EscapeQuerySyntax.java index 0cf6b8246f5..155b84b09c7 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/EscapeQuerySyntax.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/EscapeQuerySyntax.java @@ -19,25 +19,23 @@ package org.apache.lucene.queryparser.flexible.core.parser; import java.util.Locale; /** - * A parser needs to implement {@link EscapeQuerySyntax} to allow the QueryNode - * to escape the queries, when the toQueryString method is called. + * A parser needs to implement {@link EscapeQuerySyntax} to allow the QueryNode to escape the + * queries, when the toQueryString method is called. */ public interface EscapeQuerySyntax { /** - * Type of escaping: String for escaping syntax, - * NORMAL for escaping reserved words (like AND) in terms + * Type of escaping: String for escaping syntax, NORMAL for escaping reserved words (like AND) in + * terms */ public enum Type { - STRING, NORMAL; + STRING, + NORMAL; } /** - * @param text - * - text to be escaped - * @param locale - * - locale for the current query - * @param type - * - select the type of escape operation to use + * @param text - text to be escaped + * @param locale - locale for the current query + * @param type - select the type of escape operation to use * @return escaped text */ CharSequence escape(CharSequence text, Locale locale, Type type); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/SyntaxParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/SyntaxParser.java index d08d13c78ea..c2b4f5899f1 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/SyntaxParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/SyntaxParser.java @@ -19,17 +19,12 @@ package org.apache.lucene.queryparser.flexible.core.parser; import org.apache.lucene.queryparser.flexible.core.QueryNodeParseException; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; -/** - * A parser needs to implement {@link SyntaxParser} interface - */ +/** A parser needs to implement {@link SyntaxParser} interface */ public interface SyntaxParser { /** - * @param query - * - query data to be parsed - * @param field - * - default field name + * @param query - query data to be parsed + * @param field - default field name * @return QueryNode tree */ - public QueryNode parse(CharSequence query, CharSequence field) - throws QueryNodeParseException; + public QueryNode parse(CharSequence query, CharSequence field) throws QueryNodeParseException; } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/package-info.java index 1533cd4169b..dd887e94659 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/parser/package-info.java @@ -14,21 +14,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Necessary interfaces to implement text parsers. - * + * *

    Parser

    - *

    - * The package org.apache.lucene.queryparser.flexible.parser contains interfaces + * + *

    The package org.apache.lucene.queryparser.flexible.parser contains interfaces * that should be implemented by the parsers. - * - * Parsers produce QueryNode Trees from a string object. - * These package still needs some work to add support to for multiple parsers. - * - * Features that should be supported for the future, related with the parser: - * - QueryNode tree should be able convertible to any parser syntax. - * - The query syntax should support calling other parsers. - * - QueryNode tree created by multiple parsers. + * + *

    Parsers produce QueryNode Trees from a string object. These package still needs some work to + * add support to for multiple parsers. + * + *

    Features that should be supported for the future, related with the parser: + * + *

      + *
    • QueryNode tree should be able convertible to any parser syntax. + *
    • The query syntax should support calling other parsers. + *
    • QueryNode tree created by multiple parsers. + *
    */ package org.apache.lucene.queryparser.flexible.core.parser; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/NoChildOptimizationQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/NoChildOptimizationQueryNodeProcessor.java index 873c425940d..9f3649ad11d 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/NoChildOptimizationQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/NoChildOptimizationQueryNodeProcessor.java @@ -17,23 +17,17 @@ package org.apache.lucene.queryparser.flexible.core.processors; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.*; /** - *

    - * A {@link NoChildOptimizationQueryNodeProcessor} removes every - * BooleanQueryNode, BoostQueryNode, TokenizedPhraseQueryNode or - * ModifierQueryNode that do not have a valid children. - *

    - *

    - * Example: When the children of these nodes are removed for any reason then the - * nodes may become invalid. - *

    + * A {@link NoChildOptimizationQueryNodeProcessor} removes every BooleanQueryNode, BoostQueryNode, + * TokenizedPhraseQueryNode or ModifierQueryNode that do not have a valid children. + * + *

    Example: When the children of these nodes are removed for any reason then the nodes may become + * invalid. */ -public class NoChildOptimizationQueryNodeProcessor extends - QueryNodeProcessorImpl { +public class NoChildOptimizationQueryNodeProcessor extends QueryNodeProcessorImpl { public NoChildOptimizationQueryNodeProcessor() { // empty constructor @@ -42,7 +36,8 @@ public class NoChildOptimizationQueryNodeProcessor extends @Override protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { - if (node instanceof BooleanQueryNode || node instanceof BoostQueryNode + if (node instanceof BooleanQueryNode + || node instanceof BoostQueryNode || node instanceof TokenizedPhraseQueryNode || node instanceof ModifierQueryNode) { @@ -55,32 +50,24 @@ public class NoChildOptimizationQueryNodeProcessor extends if (!(child instanceof DeletedQueryNode)) { return node; } - } - } return new MatchNoDocsQueryNode(); - } return node; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessor.java index 7f735fa2b1c..8e46ffc9bd3 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessor.java @@ -21,21 +21,18 @@ import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** - * A {@link QueryNodeProcessor} is an interface for classes that process a - * {@link QueryNode} tree. - *

    - * The implementor of this class should perform some operation on a query node - * tree and return the same or another query node tree. - *

    - * It also may carry a {@link QueryConfigHandler} object that contains - * configuration about the query represented by the query tree or the - * collection/index where it's intended to be executed. - *

    - * In case there is any {@link QueryConfigHandler} associated to the query tree - * to be processed, it should be set using - * {@link QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler)} before - * {@link QueryNodeProcessor#process(QueryNode)} is invoked. - * + * A {@link QueryNodeProcessor} is an interface for classes that process a {@link QueryNode} tree. + * + *

    The implementor of this class should perform some operation on a query node tree and return + * the same or another query node tree. + * + *

    It also may carry a {@link QueryConfigHandler} object that contains configuration about the + * query represented by the query tree or the collection/index where it's intended to be executed. + * + *

    In case there is any {@link QueryConfigHandler} associated to the query tree to be processed, + * it should be set using {@link QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler)} + * before {@link QueryNodeProcessor#process(QueryNode)} is invoked. + * * @see QueryNode * @see QueryNodeProcessor * @see QueryConfigHandler @@ -43,28 +40,23 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; public interface QueryNodeProcessor { /** - * Processes a query node tree. It may return the same or another query tree. - * I should never return null. - * - * @param queryTree - * tree root node - * + * Processes a query node tree. It may return the same or another query tree. I should never + * return null. + * + * @param queryTree tree root node * @return the processed query tree */ public QueryNode process(QueryNode queryTree) throws QueryNodeException; - /** - * Sets the {@link QueryConfigHandler} associated to the query tree. - */ + /** Sets the {@link QueryConfigHandler} associated to the query tree. */ public void setQueryConfigHandler(QueryConfigHandler queryConfigHandler); /** - * Returns the {@link QueryConfigHandler} associated to the query tree if any, - * otherwise it returns null - * - * @return the {@link QueryConfigHandler} associated to the query tree if any, - * otherwise it returns null + * Returns the {@link QueryConfigHandler} associated to the query tree if any, otherwise it + * returns null + * + * @return the {@link QueryConfigHandler} associated to the query tree if any, otherwise it + * returns null */ public QueryConfigHandler getQueryConfigHandler(); - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorImpl.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorImpl.java index 58a467bc72b..bd937cb0757 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorImpl.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorImpl.java @@ -18,31 +18,23 @@ package org.apache.lucene.queryparser.flexible.core.processors; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** - *

    - * This is a default implementation for the {@link QueryNodeProcessor} - * interface, it's an abstract class, so it should be extended by classes that - * want to process a {@link QueryNode} tree. - *

    - *

    - * This class process {@link QueryNode}s from left to right in the tree. While - * it's walking down the tree, for every node, - * {@link #preProcessNode(QueryNode)} is invoked. After a node's children are - * processed, {@link #postProcessNode(QueryNode)} is invoked for that node. - * {@link #setChildrenOrder(List)} is invoked before - * {@link #postProcessNode(QueryNode)} only if the node has at least one child, - * in {@link #setChildrenOrder(List)} the implementor might redefine the + * This is a default implementation for the {@link QueryNodeProcessor} interface, it's an abstract + * class, so it should be extended by classes that want to process a {@link QueryNode} tree. + * + *

    This class process {@link QueryNode}s from left to right in the tree. While it's walking down + * the tree, for every node, {@link #preProcessNode(QueryNode)} is invoked. After a node's children + * are processed, {@link #postProcessNode(QueryNode)} is invoked for that node. {@link + * #setChildrenOrder(List)} is invoked before {@link #postProcessNode(QueryNode)} only if the node + * has at least one child, in {@link #setChildrenOrder(List)} the implementor might redefine the * children order or remove any children from the children list. - *

    - *

    - * Here is an example about how it process the nodes: - *

    - * + * + *

    Here is an example about how it process the nodes: + * *

      *      a
      *     / \
    @@ -50,9 +42,9 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
      *   / \
      *  c   d
      * 
    - * + * * Here is the order the methods would be invoked for the tree described above: - * + * *
      *      preProcessNode( a );
      *      preProcessNode( b );
    @@ -67,7 +59,7 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
      *      setChildrenOrder( aChildrenList );
      *      postProcessNode( a )
      * 
    - * + * * @see org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor */ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { @@ -89,8 +81,7 @@ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { return processIteration(queryTree); } - private QueryNode processIteration(QueryNode queryTree) - throws QueryNodeException { + private QueryNode processIteration(QueryNode queryTree) throws QueryNodeException { queryTree = preProcessNode(queryTree); processChildren(queryTree); @@ -98,16 +89,13 @@ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { queryTree = postProcessNode(queryTree); return queryTree; - } /** * This method is called every time a child is processed. - * - * @param queryTree - * the query node child to be processed - * @throws QueryNodeException - * if something goes wrong during the query node processing + * + * @param queryTree the query node child to be processed + * @throws QueryNodeException if something goes wrong during the query node processing */ protected void processChildren(QueryNode queryTree) throws QueryNodeException { @@ -125,11 +113,9 @@ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { if (child == null) { throw new NullPointerException(); - } newChildren.add(child); - } List orderedChildrenList = setChildrenOrder(newChildren); @@ -139,9 +125,7 @@ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { } finally { newChildren.beingUsed = false; } - } - } private ChildrenList allocateChildrenList() { @@ -154,30 +138,24 @@ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { list.clear(); break; - } - } if (list == null) { list = new ChildrenList(); this.childrenListPool.add(list); - } list.beingUsed = true; return list; - } /** - * For reference about this method check: - * {@link QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler)}. - * - * @param queryConfigHandler - * the query configuration handler to be set. - * + * For reference about this method check: {@link + * QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler)}. + * + * @param queryConfigHandler the query configuration handler to be set. * @see QueryNodeProcessor#getQueryConfigHandler() * @see QueryConfigHandler */ @@ -187,11 +165,9 @@ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { } /** - * For reference about this method check: - * {@link QueryNodeProcessor#getQueryConfigHandler()}. - * + * For reference about this method check: {@link QueryNodeProcessor#getQueryConfigHandler()}. + * * @return QueryConfigHandler the query configuration handler to be set. - * * @see QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler) * @see QueryConfigHandler */ @@ -202,52 +178,35 @@ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { /** * This method is invoked for every node when walking down the tree. - * - * @param node - * the query node to be pre-processed - * + * + * @param node the query node to be pre-processed * @return a query node - * - * @throws QueryNodeException - * if something goes wrong during the query node processing + * @throws QueryNodeException if something goes wrong during the query node processing */ - abstract protected QueryNode preProcessNode(QueryNode node) - throws QueryNodeException; + protected abstract QueryNode preProcessNode(QueryNode node) throws QueryNodeException; /** * This method is invoked for every node when walking up the tree. - * - * @param node - * node the query node to be post-processed - * + * + * @param node node the query node to be post-processed * @return a query node - * - * @throws QueryNodeException - * if something goes wrong during the query node processing + * @throws QueryNodeException if something goes wrong during the query node processing */ - abstract protected QueryNode postProcessNode(QueryNode node) - throws QueryNodeException; + protected abstract QueryNode postProcessNode(QueryNode node) throws QueryNodeException; /** - * This method is invoked for every node that has at least on child. It's - * invoked right before {@link #postProcessNode(QueryNode)} is invoked. - * - * @param children - * the list containing all current node's children - * - * @return a new list containing all children that should be set to the - * current node - * - * @throws QueryNodeException - * if something goes wrong during the query node processing + * This method is invoked for every node that has at least on child. It's invoked right before + * {@link #postProcessNode(QueryNode)} is invoked. + * + * @param children the list containing all current node's children + * @return a new list containing all children that should be set to the current node + * @throws QueryNodeException if something goes wrong during the query node processing */ - abstract protected List setChildrenOrder(List children) + protected abstract List setChildrenOrder(List children) throws QueryNodeException; private static class ChildrenList extends ArrayList { boolean beingUsed; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorPipeline.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorPipeline.java index f6e9d0085c8..41f8ecf7690 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorPipeline.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorPipeline.java @@ -17,52 +17,41 @@ package org.apache.lucene.queryparser.flexible.core.processors; import java.util.*; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** - * A {@link QueryNodeProcessorPipeline} class should be used to build a query - * node processor pipeline. - * - * When a query node tree is processed using this class, it passes the query - * node tree to each processor on the pipeline and the result from each - * processor is passed to the next one, always following the order the - * processors were on the pipeline. - * - * When a {@link QueryConfigHandler} object is set on a - * {@link QueryNodeProcessorPipeline}, it also takes care of setting this - * {@link QueryConfigHandler} on all processor on pipeline. - * + * A {@link QueryNodeProcessorPipeline} class should be used to build a query node processor + * pipeline. + * + *

    When a query node tree is processed using this class, it passes the query node tree to each + * processor on the pipeline and the result from each processor is passed to the next one, always + * following the order the processors were on the pipeline. + * + *

    When a {@link QueryConfigHandler} object is set on a {@link QueryNodeProcessorPipeline}, it + * also takes care of setting this {@link QueryConfigHandler} on all processor on pipeline. */ -public class QueryNodeProcessorPipeline implements QueryNodeProcessor, - List { +public class QueryNodeProcessorPipeline implements QueryNodeProcessor, List { private LinkedList processors = new LinkedList<>(); private QueryConfigHandler queryConfig; - /** - * Constructs an empty query node processor pipeline. - */ + /** Constructs an empty query node processor pipeline. */ public QueryNodeProcessorPipeline() { // empty constructor } - /** - * Constructs with a {@link QueryConfigHandler} object. - */ + /** Constructs with a {@link QueryConfigHandler} object. */ public QueryNodeProcessorPipeline(QueryConfigHandler queryConfigHandler) { this.queryConfig = queryConfigHandler; } /** - * For reference about this method check: - * {@link QueryNodeProcessor#getQueryConfigHandler()}. - * + * For reference about this method check: {@link QueryNodeProcessor#getQueryConfigHandler()}. + * * @return QueryConfigHandler the query configuration handler to be set. - * * @see QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler) * @see QueryConfigHandler */ @@ -72,14 +61,10 @@ public class QueryNodeProcessorPipeline implements QueryNodeProcessor, } /** - * For reference about this method check: - * {@link QueryNodeProcessor#process(QueryNode)}. - * + * For reference about this method check: {@link QueryNodeProcessor#process(QueryNode)}. + * * @param queryTree the query node tree to be processed - * - * @throws QueryNodeException if something goes wrong during the query node - * processing - * + * @throws QueryNodeException if something goes wrong during the query node processing * @see QueryNode */ @Override @@ -90,15 +75,13 @@ public class QueryNodeProcessorPipeline implements QueryNodeProcessor, } return queryTree; - } /** - * For reference about this method check: - * {@link QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler)}. - * + * For reference about this method check: {@link + * QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler)}. + * * @param queryConfigHandler the query configuration handler to be set. - * * @see QueryNodeProcessor#getQueryConfigHandler() * @see QueryConfigHandler */ @@ -109,12 +92,9 @@ public class QueryNodeProcessorPipeline implements QueryNodeProcessor, for (QueryNodeProcessor processor : this.processors) { processor.setQueryConfigHandler(this.queryConfig); } - } - /** - * @see List#add(Object) - */ + /** @see List#add(Object) */ @Override public boolean add(QueryNodeProcessor processor) { boolean added = this.processors.add(processor); @@ -124,22 +104,16 @@ public class QueryNodeProcessorPipeline implements QueryNodeProcessor, } return added; - } - /** - * @see List#add(int, Object) - */ + /** @see List#add(int, Object) */ @Override public void add(int index, QueryNodeProcessor processor) { this.processors.add(index, processor); processor.setQueryConfigHandler(this.queryConfig); - } - /** - * @see List#addAll(Collection) - */ + /** @see List#addAll(Collection) */ @Override public boolean addAll(Collection c) { boolean anyAdded = this.processors.addAll(c); @@ -149,12 +123,9 @@ public class QueryNodeProcessorPipeline implements QueryNodeProcessor, } return anyAdded; - } - /** - * @see List#addAll(int, Collection) - */ + /** @see List#addAll(int, Collection) */ @Override public boolean addAll(int index, Collection c) { boolean anyAdded = this.processors.addAll(index, c); @@ -164,166 +135,125 @@ public class QueryNodeProcessorPipeline implements QueryNodeProcessor, } return anyAdded; - } - /** - * @see List#clear() - */ + /** @see List#clear() */ @Override public void clear() { this.processors.clear(); } - /** - * @see List#contains(Object) - */ + /** @see List#contains(Object) */ @Override public boolean contains(Object o) { return this.processors.contains(o); } - /** - * @see List#containsAll(Collection) - */ + /** @see List#containsAll(Collection) */ @Override public boolean containsAll(Collection c) { return this.processors.containsAll(c); } - /** - * @see List#get(int) - */ + /** @see List#get(int) */ @Override public QueryNodeProcessor get(int index) { return this.processors.get(index); } - /** - * @see List#indexOf(Object) - */ + /** @see List#indexOf(Object) */ @Override public int indexOf(Object o) { return this.processors.indexOf(o); } - /** - * @see List#isEmpty() - */ + /** @see List#isEmpty() */ @Override public boolean isEmpty() { return this.processors.isEmpty(); } - /** - * @see List#iterator() - */ + /** @see List#iterator() */ @Override public Iterator iterator() { return this.processors.iterator(); } - /** - * @see List#lastIndexOf(Object) - */ + /** @see List#lastIndexOf(Object) */ @Override public int lastIndexOf(Object o) { return this.processors.lastIndexOf(o); } - /** - * @see List#listIterator() - */ + /** @see List#listIterator() */ @Override public ListIterator listIterator() { return this.processors.listIterator(); } - /** - * @see List#listIterator(int) - */ + /** @see List#listIterator(int) */ @Override public ListIterator listIterator(int index) { return this.processors.listIterator(index); } - /** - * @see List#remove(Object) - */ + /** @see List#remove(Object) */ @Override public boolean remove(Object o) { return this.processors.remove(o); } - /** - * @see List#remove(int) - */ + /** @see List#remove(int) */ @Override public QueryNodeProcessor remove(int index) { return this.processors.remove(index); } - /** - * @see List#removeAll(Collection) - */ + /** @see List#removeAll(Collection) */ @Override public boolean removeAll(Collection c) { return this.processors.removeAll(c); } - /** - * @see List#retainAll(Collection) - */ + /** @see List#retainAll(Collection) */ @Override public boolean retainAll(Collection c) { return this.processors.retainAll(c); } - /** - * @see List#set(int, Object) - */ + /** @see List#set(int, Object) */ @Override public QueryNodeProcessor set(int index, QueryNodeProcessor processor) { QueryNodeProcessor oldProcessor = this.processors.set(index, processor); - + if (oldProcessor != processor) { processor.setQueryConfigHandler(this.queryConfig); } - + return oldProcessor; - } - /** - * @see List#size() - */ + /** @see List#size() */ @Override public int size() { return this.processors.size(); } - /** - * @see List#subList(int, int) - */ + /** @see List#subList(int, int) */ @Override public List subList(int fromIndex, int toIndex) { return this.processors.subList(fromIndex, toIndex); } - /** - * @see List#toArray(Object[]) - */ + /** @see List#toArray(Object[]) */ @Override public T[] toArray(T[] array) { return this.processors.toArray(array); } - /** - * @see List#toArray() - */ + /** @see List#toArray() */ @Override public Object[] toArray() { return this.processors.toArray(); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/RemoveDeletedQueryNodesProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/RemoveDeletedQueryNodesProcessor.java index d25148574f3..7329fe8326f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/RemoveDeletedQueryNodesProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/RemoveDeletedQueryNodesProcessor.java @@ -18,17 +18,15 @@ package org.apache.lucene.queryparser.flexible.core.processors; import java.util.Iterator; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.DeletedQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.MatchNoDocsQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** - * A {@link QueryNodeProcessorPipeline} class removes every instance of - * {@link DeletedQueryNode} from a query node tree. If the resulting root node - * is a {@link DeletedQueryNode}, {@link MatchNoDocsQueryNode} is returned. - * + * A {@link QueryNodeProcessorPipeline} class removes every instance of {@link DeletedQueryNode} + * from a query node tree. If the resulting root node is a {@link DeletedQueryNode}, {@link + * MatchNoDocsQueryNode} is returned. */ public class RemoveDeletedQueryNodesProcessor extends QueryNodeProcessorImpl { @@ -40,15 +38,12 @@ public class RemoveDeletedQueryNodesProcessor extends QueryNodeProcessorImpl { public QueryNode process(QueryNode queryTree) throws QueryNodeException { queryTree = super.process(queryTree); - if (queryTree instanceof DeletedQueryNode - && !(queryTree instanceof MatchNoDocsQueryNode)) { + if (queryTree instanceof DeletedQueryNode && !(queryTree instanceof MatchNoDocsQueryNode)) { return new MatchNoDocsQueryNode(); - } return queryTree; - } @Override @@ -64,49 +59,39 @@ public class RemoveDeletedQueryNodesProcessor extends QueryNodeProcessorImpl { } else { removeBoolean = true; - for (Iterator it = children.iterator(); it.hasNext();) { + for (Iterator it = children.iterator(); it.hasNext(); ) { if (!(it.next() instanceof DeletedQueryNode)) { removeBoolean = false; break; - } - } - } if (removeBoolean) { return new DeletedQueryNode(); } - } return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { for (int i = 0; i < children.size(); i++) { if (children.get(i) instanceof DeletedQueryNode) { children.remove(i--); } - } return children; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/package-info.java index 8edfdb11639..11728da87a9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/package-info.java @@ -19,27 +19,31 @@ * Interfaces and implementations used by query node processors * *

    Query Node Processors

    - *

    - * The package org.apache.lucene.queryParser.processors contains interfaces - * that should be implemented by every query node processor. - *

    - * The interface that every query node processor should implement is {@link org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor}. - * A query node processor should be used to process a {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree. - * {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} trees can be programmatically created or generated by a - * text parser. See {@link org.apache.lucene.queryparser.flexible.core.parser} for more details about text parsers. * - *

    - * A query node processor should be used to process a {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree. - * {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} trees can be programmatically created or generated by a - * text parser. See {@link org.apache.lucene.queryparser.flexible.core.parser} for more details about text parsers. + *

    The package org.apache.lucene.queryParser.processors contains interfaces that + * should be implemented by every query node processor. * - *

    - * A pipeline of processors can be assembled using {@link org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorPipeline}. + *

    The interface that every query node processor should implement is {@link + * org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor}. A query node + * processor should be used to process a {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree. {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} trees can be programmatically + * created or generated by a text parser. See {@link + * org.apache.lucene.queryparser.flexible.core.parser} for more details about text parsers. * - *

    - * Implementors may want to extend {@link org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl}, which simplifies - * the implementation, because it walks automatically the {@link org.apache.lucene.queryparser.flexible.core.nodes.QueryNode}. See - * {@link org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl} for more details. + *

    A query node processor should be used to process a {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} tree. {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode} trees can be programmatically + * created or generated by a text parser. See {@link + * org.apache.lucene.queryparser.flexible.core.parser} for more details about text parsers. + * + *

    A pipeline of processors can be assembled using {@link + * org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorPipeline}. + * + *

    Implementors may want to extend {@link + * org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl}, which simplifies + * the implementation, because it walks automatically the {@link + * org.apache.lucene.queryparser.flexible.core.nodes.QueryNode}. See {@link + * org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl} for more details. */ package org.apache.lucene.queryparser.flexible.core.processors; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java index 7d82510625b..96307bcf4c1 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java @@ -16,79 +16,69 @@ */ package org.apache.lucene.queryparser.flexible.core.util; +import java.util.ArrayList; +import java.util.List; import org.apache.lucene.queryparser.flexible.core.QueryNodeError; import org.apache.lucene.queryparser.flexible.core.nodes.AndQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; -import java.util.ArrayList; -import java.util.List; - - -/** - * Allow joining 2 QueryNode Trees, into one. - */ +/** Allow joining 2 QueryNode Trees, into one. */ public final class QueryNodeOperation { private QueryNodeOperation() { // Exists only to defeat instantiation. } private enum ANDOperation { - BOTH, Q1, Q2, NONE + BOTH, + Q1, + Q2, + NONE } /** - * perform a logical and of 2 QueryNode trees. if q1 and q2 are ANDQueryNode - * nodes it uses head Node from q1 and adds the children of q2 to q1 if q1 is - * a AND node and q2 is not, add q2 as a child of the head node of q1 if q2 is - * a AND node and q1 is not, add q1 as a child of the head node of q2 if q1 - * and q2 are not ANDQueryNode nodes, create a AND node and make q1 and q2 - * children of that node if q1 or q2 is null it returns the not null node if - * q1 = q2 = null it returns null + * perform a logical and of 2 QueryNode trees. if q1 and q2 are ANDQueryNode nodes it uses head + * Node from q1 and adds the children of q2 to q1 if q1 is a AND node and q2 is not, add q2 as a + * child of the head node of q1 if q2 is a AND node and q1 is not, add q1 as a child of the head + * node of q2 if q1 and q2 are not ANDQueryNode nodes, create a AND node and make q1 and q2 + * children of that node if q1 or q2 is null it returns the not null node if q1 = q2 = null it + * returns null */ - public final static QueryNode logicalAnd(QueryNode q1, QueryNode q2) { - if (q1 == null) - return q2; - if (q2 == null) - return q1; + public static final QueryNode logicalAnd(QueryNode q1, QueryNode q2) { + if (q1 == null) return q2; + if (q2 == null) return q1; ANDOperation op = null; - if (q1 instanceof AndQueryNode && q2 instanceof AndQueryNode) - op = ANDOperation.BOTH; - else if (q1 instanceof AndQueryNode) - op = ANDOperation.Q1; - else if (q2 instanceof AndQueryNode) - op = ANDOperation.Q2; - else - op = ANDOperation.NONE; + if (q1 instanceof AndQueryNode && q2 instanceof AndQueryNode) op = ANDOperation.BOTH; + else if (q1 instanceof AndQueryNode) op = ANDOperation.Q1; + else if (q2 instanceof AndQueryNode) op = ANDOperation.Q2; + else op = ANDOperation.NONE; try { QueryNode result = null; switch (op) { - case NONE: - List children = new ArrayList<>(); - children.add(q1.cloneTree()); - children.add(q2.cloneTree()); - result = new AndQueryNode(children); - return result; - case Q1: - result = q1.cloneTree(); - result.add(q2.cloneTree()); - return result; - case Q2: - result = q2.cloneTree(); - result.add(q1.cloneTree()); - return result; - case BOTH: - result = q1.cloneTree(); - result.add(q2.cloneTree().getChildren()); - return result; + case NONE: + List children = new ArrayList<>(); + children.add(q1.cloneTree()); + children.add(q2.cloneTree()); + result = new AndQueryNode(children); + return result; + case Q1: + result = q1.cloneTree(); + result.add(q2.cloneTree()); + return result; + case Q2: + result = q2.cloneTree(); + result.add(q1.cloneTree()); + return result; + case BOTH: + result = q1.cloneTree(); + result.add(q2.cloneTree().getChildren()); + return result; } } catch (CloneNotSupportedException e) { throw new QueryNodeError(e); } return null; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/StringUtils.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/StringUtils.java index 94f47bea9c4..a801a3a2d68 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/StringUtils.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/StringUtils.java @@ -16,20 +16,16 @@ */ package org.apache.lucene.queryparser.flexible.core.util; -/** - * String manipulation routines - */ -final public class StringUtils { +/** String manipulation routines */ +public final class StringUtils { public static String toString(Object obj) { - + if (obj != null) { return obj.toString(); - + } else { return null; } - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/UnescapedCharSequence.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/UnescapedCharSequence.java index 5e74cac5861..815714fd084 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/UnescapedCharSequence.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/UnescapedCharSequence.java @@ -18,28 +18,21 @@ package org.apache.lucene.queryparser.flexible.core.util; import java.util.Locale; -/** - * CharsSequence with escaped chars information. - */ +/** CharsSequence with escaped chars information. */ public final class UnescapedCharSequence implements CharSequence { private char[] chars; private boolean[] wasEscaped; - /** - * Create a escaped CharSequence - */ - public UnescapedCharSequence(char[] chars, boolean[] wasEscaped, int offset, - int length) { + /** Create a escaped CharSequence */ + public UnescapedCharSequence(char[] chars, boolean[] wasEscaped, int offset, int length) { this.chars = new char[length]; this.wasEscaped = new boolean[length]; System.arraycopy(chars, offset, this.chars, 0, length); System.arraycopy(wasEscaped, offset, this.wasEscaped, 0, length); } - /** - * Create a non-escaped CharSequence - */ + /** Create a non-escaped CharSequence */ public UnescapedCharSequence(CharSequence text) { this.chars = new char[text.length()]; this.wasEscaped = new boolean[text.length()]; @@ -49,9 +42,7 @@ public final class UnescapedCharSequence implements CharSequence { } } - /** - * Create a copy of an existent UnescapedCharSequence - */ + /** Create a copy of an existent UnescapedCharSequence */ @SuppressWarnings("unused") private UnescapedCharSequence(UnescapedCharSequence text) { this.chars = new char[text.length()]; @@ -76,8 +67,7 @@ public final class UnescapedCharSequence implements CharSequence { public CharSequence subSequence(int start, int end) { int newLength = end - start; - return new UnescapedCharSequence(this.chars, this.wasEscaped, start, - newLength); + return new UnescapedCharSequence(this.chars, this.wasEscaped, start, newLength); } @Override @@ -87,7 +77,7 @@ public final class UnescapedCharSequence implements CharSequence { /** * Return a escaped String - * + * * @return a escaped String */ public String toStringEscaped() { @@ -96,8 +86,7 @@ public final class UnescapedCharSequence implements CharSequence { for (int i = 0; i >= this.length(); i++) { if (this.chars[i] == '\\') { result.append('\\'); - } else if (this.wasEscaped[i]) - result.append('\\'); + } else if (this.wasEscaped[i]) result.append('\\'); result.append(this.chars[i]); } @@ -106,9 +95,8 @@ public final class UnescapedCharSequence implements CharSequence { /** * Return a escaped String - * - * @param enabledChars - * - array of chars to be escaped + * + * @param enabledChars - array of chars to be escaped * @return a escaped String */ public String toStringEscaped(char[] enabledChars) { @@ -134,19 +122,18 @@ public final class UnescapedCharSequence implements CharSequence { public boolean wasEscaped(int index) { return this.wasEscaped[index]; } - - static final public boolean wasEscaped(CharSequence text, int index) { + + public static final boolean wasEscaped(CharSequence text, int index) { if (text instanceof UnescapedCharSequence) - return ((UnescapedCharSequence)text).wasEscaped[index]; + return ((UnescapedCharSequence) text).wasEscaped[index]; else return false; } - + public static CharSequence toLowerCase(CharSequence text, Locale locale) { if (text instanceof UnescapedCharSequence) { char[] chars = text.toString().toLowerCase(locale).toCharArray(); - boolean[] wasEscaped = ((UnescapedCharSequence)text).wasEscaped; + boolean[] wasEscaped = ((UnescapedCharSequence) text).wasEscaped; return new UnescapedCharSequence(chars, wasEscaped, 0, chars.length); - } else - return new UnescapedCharSequence(text.toString().toLowerCase(locale)); + } else return new UnescapedCharSequence(text.toString().toLowerCase(locale)); } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/package-info.java index d5f8e5b16e1..d6c8ddd87d2 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/package-info.java @@ -14,12 +14,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Utility classes to used with the Query Parser. + * *

    Utility classes to used with the Query Parser

    - *

    - * This package contains utility classes used with the query parsers. + * + *

    This package contains utility classes used with the query parsers. */ package org.apache.lucene.queryparser.flexible.core.util; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/Message.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/Message.java index e677ac70b5d..9a22fb3a60d 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/Message.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/Message.java @@ -19,8 +19,8 @@ package org.apache.lucene.queryparser.flexible.messages; import java.util.Locale; /** - * Message Interface for a lazy loading. - * For Native Language Support (NLS), system of software internationalization. + * Message Interface for a lazy loading. For Native Language Support (NLS), system of software + * internationalization. */ public interface Message { @@ -31,5 +31,4 @@ public interface Message { public String getLocalizedMessage(); public String getLocalizedMessage(Locale locale); - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/MessageImpl.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/MessageImpl.java index de911518522..45669a83b50 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/MessageImpl.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/MessageImpl.java @@ -19,8 +19,8 @@ package org.apache.lucene.queryparser.flexible.messages; import java.util.Locale; /** - * Default implementation of Message interface. - * For Native Language Support (NLS), system of software internationalization. + * Default implementation of Message interface. For Native Language Support (NLS), system of + * software internationalization. */ public class MessageImpl implements Message { @@ -30,7 +30,6 @@ public class MessageImpl implements Message { public MessageImpl(String key) { this.key = key; - } public MessageImpl(String key, Object... args) { @@ -69,5 +68,4 @@ public class MessageImpl implements Message { } return sb.toString(); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java index b32ba2fc723..3462a066e23 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java @@ -28,22 +28,20 @@ import java.util.ResourceBundle; /** * MessageBundles classes extend this class, to implement a bundle. - * - * For Native Language Support (NLS), system of software internationalization. - * - * This interface is similar to the NLS class in eclipse.osgi.util.NLS class - - * initializeMessages() method resets the values of all static strings, should - * only be called by classes that extend from NLS (see TestMessages.java for - * reference) - performs validation of all message in a bundle, at class load - * time - performs per message validation at runtime - see NLSTest.java for - * usage reference - * - * MessageBundle classes may subclass this type. + * + *

    For Native Language Support (NLS), system of software internationalization. + * + *

    This interface is similar to the NLS class in eclipse.osgi.util.NLS class - + * initializeMessages() method resets the values of all static strings, should only be called by + * classes that extend from NLS (see TestMessages.java for reference) - performs validation of all + * message in a bundle, at class load time - performs per message validation at runtime - see + * NLSTest.java for usage reference + * + *

    MessageBundle classes may subclass this type. */ public class NLS { - private static Map> bundles = - new HashMap<>(0); + private static Map> bundles = new HashMap<>(0); protected NLS() { // Do not instantiate @@ -56,14 +54,12 @@ public class NLS { public static String getLocalizedMessage(String key, Locale locale) { Object message = getResourceBundleObject(key, locale); if (message == null) { - return "Message with key:" + key + " and locale: " + locale - + " not found."; + return "Message with key:" + key + " and locale: " + locale + " not found."; } return message.toString(); } - public static String getLocalizedMessage(String key, Locale locale, - Object... args) { + public static String getLocalizedMessage(String key, Locale locale, Object... args) { String str = getLocalizedMessage(key, locale); if (args.length > 0) { @@ -78,19 +74,16 @@ public class NLS { } /** - * Initialize a given class with the message bundle Keys Should be called from - * a class that extends NLS in a static block at class load time. - * - * @param bundleName - * Property file with that contains the message bundle - * @param clazz - * where constants will reside + * Initialize a given class with the message bundle Keys Should be called from a class that + * extends NLS in a static block at class load time. + * + * @param bundleName Property file with that contains the message bundle + * @param clazz where constants will reside */ protected static void initializeMessages(String bundleName, Class clazz) { try { load(clazz); - if (!bundles.containsKey(bundleName)) - bundles.put(bundleName, clazz); + if (!bundles.containsKey(bundleName)) bundles.put(bundleName, clazz); } catch (Throwable e) { // ignore all errors and exceptions // because this function is supposed to be called at class load time. @@ -101,15 +94,13 @@ public class NLS { // slow resource checking // need to loop thru all registered resource bundles - for (Iterator it = bundles.keySet().iterator(); it.hasNext();) { + for (Iterator it = bundles.keySet().iterator(); it.hasNext(); ) { Class clazz = bundles.get(it.next()); - ResourceBundle resourceBundle = ResourceBundle.getBundle(clazz.getName(), - locale); + ResourceBundle resourceBundle = ResourceBundle.getBundle(clazz.getName(), locale); if (resourceBundle != null) { try { Object obj = resourceBundle.getObject(messageKey); - if (obj != null) - return obj; + if (obj != null) return obj; } catch (MissingResourceException e) { // just continue it might be on the next resource bundle } @@ -134,8 +125,7 @@ public class NLS { private static void loadfieldValue(Field field, Class clazz) { int MOD_EXPECTED = Modifier.PUBLIC | Modifier.STATIC; int MOD_MASK = MOD_EXPECTED | Modifier.FINAL; - if ((field.getModifiers() & MOD_MASK) != MOD_EXPECTED) - return; + if ((field.getModifiers() & MOD_MASK) != MOD_EXPECTED) return; // Set a value for this empty field. try { @@ -146,23 +136,20 @@ public class NLS { } } - /** - * @param key - * - Message Key - */ + /** @param key - Message Key */ private static void validateMessage(String key, Class clazz) { // Test if the message is present in the resource bundle try { - ResourceBundle resourceBundle = ResourceBundle.getBundle(clazz.getName(), - Locale.getDefault()); + ResourceBundle resourceBundle = + ResourceBundle.getBundle(clazz.getName(), Locale.getDefault()); if (resourceBundle != null) { Object obj = resourceBundle.getObject(key); - //if (obj == null) + // if (obj == null) // System.err.println("WARN: Message with key:" + key + " and locale: " // + Locale.getDefault() + " not found."); } } catch (MissingResourceException e) { - //System.err.println("WARN: Message with key:" + key + " and locale: " + // System.err.println("WARN: Message with key:" + key + " and locale: " // + Locale.getDefault() + " not found."); } catch (Throwable e) { // ignore all other errors and exceptions diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLSException.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLSException.java index 6e7f3b18eb4..ce52d86d632 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLSException.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLSException.java @@ -18,16 +18,12 @@ package org.apache.lucene.queryparser.flexible.messages; /** * Interface that exceptions should implement to support lazy loading of messages. - * - * For Native Language Support (NLS), system of software internationalization. - * - * This Interface should be implemented by all exceptions that require - * translation - * + * + *

    For Native Language Support (NLS), system of software internationalization. + * + *

    This Interface should be implemented by all exceptions that require translation */ public interface NLSException { - /** - * @return a instance of a class that implements the Message interface - */ + /** @return a instance of a class that implements the Message interface */ public Message getMessageObject(); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/package-info.java index 07cef021112..066e55c1a6a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/package-info.java @@ -14,77 +14,76 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * For Native Language Support (NLS), system of software internationalization. - * + * *

    NLS message API

    - *

    - * This utility API, adds support for NLS messages in the apache code. - * It is currently used by the lucene "New Flexible Query PArser". - *

    - * Features: - *

      - *
    1. Message reference in the code, using static Strings
    2. - *
    3. Message resource validation at class load time, for easier debugging
    4. - *
    5. Allows for message IDs to be re-factored using eclipse or other code re-factor tools
    6. - *
    7. Allows for reference count on messages, just like code
    8. - *
    9. Lazy loading of Message Strings
    10. - *
    11. Normal loading Message Strings
    12. - *
    - * + * + *

    This utility API, adds support for NLS messages in the apache code. It is currently used by + * the lucene "New Flexible Query PArser". + * + *

    Features: + * + *

      + *
    1. Message reference in the code, using static Strings + *
    2. Message resource validation at class load time, for easier debugging + *
    3. Allows for message IDs to be re-factored using eclipse or other code re-factor tools + *
    4. Allows for reference count on messages, just like code + *
    5. Lazy loading of Message Strings + *
    6. Normal loading Message Strings + *
    + * *
    *
    - *

    - * Lazy loading of Message Strings - * + * + *

    Lazy loading of Message Strings + * *

      *   public class MessagesTestBundle extends NLS {
    - *   
    + *
      *     private static final String BUNDLE_NAME = MessagesTestBundle.class.getName();
    - *   
    + *
      *     private MessagesTestBundle() {
      *       // should never be instantiated
      *     }
    - *   
    + *
      *     static {
      *       // register all string ids with NLS class and initialize static string
      *       // values
      *       NLS.initializeMessages(BUNDLE_NAME, MessagesTestBundle.class);
      *     }
    - *   
    + *
      *     // static string must match the strings in the property files.
      *     public static String Q0001E_INVALID_SYNTAX;
      *     public static String Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION;
    - *   
    + *
      *     // this message is missing from the properties file
      *     public static String Q0005E_MESSAGE_NOT_IN_BUNDLE;
      *   }
    - * 
    + *
      *     // Create a message reference
      *     Message invalidSyntax = new MessageImpl(MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX");
    - *     
    + *
      *     // Do other stuff in the code...
      *     // when is time to display the message to the user or log the message on a file
      *     // the message is loaded from the correct bundle
    - *     
    + *
      *     String message1 = invalidSyntax.getLocalizedMessage();
      *     String message2 = invalidSyntax.getLocalizedMessage(Locale.JAPANESE);
      * 
    - * + * *
    *
    - *

    - * Normal loading of Message Strings - * + * + *

    Normal loading of Message Strings + * *

      *   String message1 = NLS.getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION);
      *   String message2 = NLS.getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, Locale.JAPANESE);
      * 
    - * - *

    - * The org.apache.lucene.messages.TestNLS junit contains several other examples. - * The TestNLS java code is available from the Apache Lucene code repository. + * + *

    The org.apache.lucene.messages.TestNLS junit contains several other examples. The TestNLS java + * code is available from the Apache Lucene code repository. */ package org.apache.lucene.queryparser.flexible.messages; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/PrecedenceQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/PrecedenceQueryParser.java index 880e795e89c..9efa6bf1e6f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/PrecedenceQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/PrecedenceQueryParser.java @@ -17,40 +17,32 @@ package org.apache.lucene.queryparser.flexible.precedence; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.queryparser.flexible.precedence.processors.PrecedenceQueryNodeProcessorPipeline; import org.apache.lucene.queryparser.flexible.standard.StandardQueryParser; import org.apache.lucene.queryparser.flexible.standard.processors.StandardQueryNodeProcessorPipeline; -import org.apache.lucene.queryparser.flexible.precedence.processors.PrecedenceQueryNodeProcessorPipeline; /** - *

    - * This query parser works exactly as the standard query parser ( {@link StandardQueryParser} ), - * except that it respect the boolean precedence, so <a AND b OR c AND d> is parsed to <(+a +b) (+c +d)> - * instead of <+a +b +c +d>. - *

    - *

    - * EXPERT: This class extends {@link StandardQueryParser}, but uses {@link PrecedenceQueryNodeProcessorPipeline} - * instead of {@link StandardQueryNodeProcessorPipeline} to process the query tree. - *

    - * + * This query parser works exactly as the standard query parser ( {@link StandardQueryParser} ), + * except that it respect the boolean precedence, so <a AND b OR c AND d> is parsed to <(+a + * +b) (+c +d)> instead of <+a +b +c +d>. + * + *

    EXPERT: This class extends {@link StandardQueryParser}, but uses {@link + * PrecedenceQueryNodeProcessorPipeline} instead of {@link StandardQueryNodeProcessorPipeline} to + * process the query tree. + * * @see StandardQueryParser */ public class PrecedenceQueryParser extends StandardQueryParser { - - /** - * @see StandardQueryParser#StandardQueryParser() - */ + + /** @see StandardQueryParser#StandardQueryParser() */ public PrecedenceQueryParser() { setQueryNodeProcessor(new PrecedenceQueryNodeProcessorPipeline(getQueryConfigHandler())); } - - /** - * @see StandardQueryParser#StandardQueryParser(Analyzer) - */ + + /** @see StandardQueryParser#StandardQueryParser(Analyzer) */ public PrecedenceQueryParser(Analyzer analyer) { super(analyer); - - setQueryNodeProcessor(new PrecedenceQueryNodeProcessorPipeline(getQueryConfigHandler())); - - } + setQueryNodeProcessor(new PrecedenceQueryNodeProcessorPipeline(getQueryConfigHandler())); + } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/package-info.java index 2d46676ec12..c2cad38cf9f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/package-info.java @@ -14,19 +14,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Precedence Query Parser Implementation - * + * *

    Lucene Precedence Query Parser

    - * - *

    - * The Precedence Query Parser extends the Standard Query Parser and enables - * the boolean precedence. So, the query <a AND b OR c AND d> is parsed to - * <(+a +b) (+c +d)> instead of <+a +b +c +d>. - *

    - * Check {@link org.apache.lucene.queryparser.flexible.standard.StandardQueryParser} for more details about the - * supported syntax and query parser functionalities. + * + *

    The Precedence Query Parser extends the Standard Query Parser and enables the boolean + * precedence. So, the query <a AND b OR c AND d> is parsed to <(+a +b) (+c +d)> instead + * of <+a +b +c +d>. + * + *

    Check {@link org.apache.lucene.queryparser.flexible.standard.StandardQueryParser} for more + * details about the supported syntax and query parser functionalities. */ package org.apache.lucene.queryparser.flexible.precedence; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/BooleanModifiersQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/BooleanModifiersQueryNodeProcessor.java index b23f30e0dd9..084ca146fc8 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/BooleanModifiersQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/BooleanModifiersQueryNodeProcessor.java @@ -18,28 +18,26 @@ package org.apache.lucene.queryparser.flexible.precedence.processors; import java.util.ArrayList; import java.util.List; - +import org.apache.lucene.queryparser.flexible.core.QueryNodeException; +import org.apache.lucene.queryparser.flexible.core.nodes.*; +import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode.Modifier; +import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; import org.apache.lucene.queryparser.flexible.precedence.PrecedenceQueryParser; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.Operator; -import org.apache.lucene.queryparser.flexible.core.QueryNodeException; -import org.apache.lucene.queryparser.flexible.core.nodes.*; -import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; -import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode.Modifier; /** - *

    - * This processor is used to apply the correct {@link ModifierQueryNode} to {@link BooleanQueryNode}s children. - *

    - *

    - * It walks through the query node tree looking for {@link BooleanQueryNode}s. If an {@link AndQueryNode} is found, - * every child, which is not a {@link ModifierQueryNode} or the {@link ModifierQueryNode} - * is {@link Modifier#MOD_NONE}, becomes a {@link Modifier#MOD_REQ}. For any other - * {@link BooleanQueryNode} which is not an {@link OrQueryNode}, it checks the default operator is {@link Operator#AND}, - * if it is, the same operation when an {@link AndQueryNode} is found is applied to it. - *

    - * + * This processor is used to apply the correct {@link ModifierQueryNode} to {@link + * BooleanQueryNode}s children. + * + *

    It walks through the query node tree looking for {@link BooleanQueryNode}s. If an {@link + * AndQueryNode} is found, every child, which is not a {@link ModifierQueryNode} or the {@link + * ModifierQueryNode} is {@link Modifier#MOD_NONE}, becomes a {@link Modifier#MOD_REQ}. For any + * other {@link BooleanQueryNode} which is not an {@link OrQueryNode}, it checks the default + * operator is {@link Operator#AND}, if it is, the same operation when an {@link AndQueryNode} is + * found is applied to it. + * * @see ConfigurationKeys#DEFAULT_OPERATOR * @see PrecedenceQueryParser#setDefaultOperator */ @@ -56,7 +54,7 @@ public class BooleanModifiersQueryNodeProcessor extends QueryNodeProcessorImpl { @Override public QueryNode process(QueryNode queryTree) throws QueryNodeException { Operator op = getQueryConfigHandler().get(ConfigurationKeys.DEFAULT_OPERATOR); - + if (op == null) { throw new IllegalArgumentException( "StandardQueryConfigHandler.ConfigurationKeys.DEFAULT_OPERATOR should be set on the QueryConfigHandler"); @@ -65,7 +63,6 @@ public class BooleanModifiersQueryNodeProcessor extends QueryNodeProcessorImpl { this.usingAnd = StandardQueryConfigHandler.Operator.AND == op; return super.process(queryTree); - } @Override @@ -81,7 +78,8 @@ public class BooleanModifiersQueryNodeProcessor extends QueryNodeProcessorImpl { node.set(this.childrenBuffer); - } else if (this.usingAnd && node instanceof BooleanQueryNode + } else if (this.usingAnd + && node instanceof BooleanQueryNode && !(node instanceof OrQueryNode)) { this.childrenBuffer.clear(); @@ -92,11 +90,9 @@ public class BooleanModifiersQueryNodeProcessor extends QueryNodeProcessorImpl { } node.set(this.childrenBuffer); - } return node; - } private QueryNode applyModifier(QueryNode node, ModifierQueryNode.Modifier mod) { @@ -111,11 +107,9 @@ public class BooleanModifiersQueryNodeProcessor extends QueryNodeProcessorImpl { if (modNode.getModifier() == ModifierQueryNode.Modifier.MOD_NONE) { return new ModifierQueryNode(modNode.getChild(), mod); } - } return node; - } @Override @@ -124,11 +118,8 @@ public class BooleanModifiersQueryNodeProcessor extends QueryNodeProcessorImpl { } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/PrecedenceQueryNodeProcessorPipeline.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/PrecedenceQueryNodeProcessorPipeline.java index 6234f3e85db..0dc401b5764 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/PrecedenceQueryNodeProcessorPipeline.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/PrecedenceQueryNodeProcessorPipeline.java @@ -16,24 +16,21 @@ */ package org.apache.lucene.queryparser.flexible.precedence.processors; +import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.precedence.PrecedenceQueryParser; import org.apache.lucene.queryparser.flexible.standard.processors.BooleanQuery2ModifierNodeProcessor; import org.apache.lucene.queryparser.flexible.standard.processors.StandardQueryNodeProcessorPipeline; -import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; /** - *

    - * This processor pipeline extends {@link StandardQueryNodeProcessorPipeline} and enables - * boolean precedence on it. - *

    - *

    - * EXPERT: the precedence is enabled by removing {@link BooleanQuery2ModifierNodeProcessor} from the - * {@link StandardQueryNodeProcessorPipeline} and appending {@link BooleanModifiersQueryNodeProcessor} - * to the pipeline. - *

    - * + * This processor pipeline extends {@link StandardQueryNodeProcessorPipeline} and enables boolean + * precedence on it. + * + *

    EXPERT: the precedence is enabled by removing {@link BooleanQuery2ModifierNodeProcessor} from + * the {@link StandardQueryNodeProcessorPipeline} and appending {@link + * BooleanModifiersQueryNodeProcessor} to the pipeline. + * * @see PrecedenceQueryParser - * @see StandardQueryNodeProcessorPipeline + * @see StandardQueryNodeProcessorPipeline */ public class PrecedenceQueryNodeProcessorPipeline extends StandardQueryNodeProcessorPipeline { @@ -42,17 +39,14 @@ public class PrecedenceQueryNodeProcessorPipeline extends StandardQueryNodeProce */ public PrecedenceQueryNodeProcessorPipeline(QueryConfigHandler queryConfig) { super(queryConfig); - - for (int i = 0 ; i < size() ; i++) { - + + for (int i = 0; i < size(); i++) { + if (get(i).getClass().equals(BooleanQuery2ModifierNodeProcessor.class)) { remove(i--); } - } - - add(new BooleanModifiersQueryNodeProcessor()); - - } + add(new BooleanModifiersQueryNodeProcessor()); + } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/package-info.java index 5d0cf3c2f0a..2bb0fa21c54 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/package-info.java @@ -18,22 +18,24 @@ /** * Lucene Precedence Query Parser Processors * - *

    - * This package contains the 2 {@link org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor}s used by - * {@link org.apache.lucene.queryparser.flexible.precedence.PrecedenceQueryParser}. - *

    - *

    - * {@link org.apache.lucene.queryparser.flexible.precedence.processors.BooleanModifiersQueryNodeProcessor}: this processor - * is used to apply {@link org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode}s on - * {@link org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode} children according to the boolean type - * or the default operator. - *

    - *

    - * {@link org.apache.lucene.queryparser.flexible.precedence.processors.PrecedenceQueryNodeProcessorPipeline}: this - * processor pipeline is used by {@link org.apache.lucene.queryparser.flexible.precedence.PrecedenceQueryParser}. It extends - * {@link org.apache.lucene.queryparser.flexible.standard.processors.StandardQueryNodeProcessorPipeline} and rearrange - * the pipeline so the boolean precedence is processed correctly. Check {@link org.apache.lucene.queryparser.flexible.precedence.processors.PrecedenceQueryNodeProcessorPipeline} - * for more details. - *

    + *

    This package contains the 2 {@link + * org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor}s used by {@link + * org.apache.lucene.queryparser.flexible.precedence.PrecedenceQueryParser}. + * + *

    {@link + * org.apache.lucene.queryparser.flexible.precedence.processors.BooleanModifiersQueryNodeProcessor}: + * this processor is used to apply {@link + * org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode}s on {@link + * org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode} children according to the + * boolean type or the default operator. + * + *

    {@link + * org.apache.lucene.queryparser.flexible.precedence.processors.PrecedenceQueryNodeProcessorPipeline}: + * this processor pipeline is used by {@link + * org.apache.lucene.queryparser.flexible.precedence.PrecedenceQueryParser}. It extends {@link + * org.apache.lucene.queryparser.flexible.standard.processors.StandardQueryNodeProcessorPipeline} + * and rearrange the pipeline so the boolean precedence is processed correctly. Check {@link + * org.apache.lucene.queryparser.flexible.precedence.processors.PrecedenceQueryNodeProcessorPipeline} + * for more details. */ -package org.apache.lucene.queryparser.flexible.precedence.processors; \ No newline at end of file +package org.apache.lucene.queryparser.flexible.precedence.processors; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/CommonQueryParserConfiguration.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/CommonQueryParserConfiguration.java index 43ff0a9540e..2aecec32897 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/CommonQueryParserConfiguration.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/CommonQueryParserConfiguration.java @@ -19,129 +19,104 @@ package org.apache.lucene.queryparser.flexible.standard; import java.util.Locale; import java.util.TimeZone; import java.util.TooManyListenersException; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.DateTools; import org.apache.lucene.document.DateTools.Resolution; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; -/** - * Configuration options common across queryparser implementations. - */ +/** Configuration options common across queryparser implementations. */ public interface CommonQueryParserConfiguration { - + /** * Set to true to allow leading wildcard characters. - *

    - * When set, * or ? are allowed as the first - * character of a PrefixQuery and WildcardQuery. Note that this can produce - * very slow queries on big indexes. - *

    - * Default: false. + * + *

    When set, * or ? are allowed as the first character of a + * PrefixQuery and WildcardQuery. Note that this can produce very slow queries on big indexes. + * + *

    Default: false. */ public void setAllowLeadingWildcard(boolean allowLeadingWildcard); - + /** * Set to true to enable position increments in result query. - *

    - * When set, result phrase and multi-phrase queries will be aware of position - * increments. Useful when e.g. a StopFilter increases the position increment - * of the token that follows an omitted token. - *

    - * Default: false. + * + *

    When set, result phrase and multi-phrase queries will be aware of position increments. + * Useful when e.g. a StopFilter increases the position increment of the token that follows an + * omitted token. + * + *

    Default: false. */ public void setEnablePositionIncrements(boolean enabled); - - /** - * @see #setEnablePositionIncrements(boolean) - */ + + /** @see #setEnablePositionIncrements(boolean) */ public boolean getEnablePositionIncrements(); - + /** - * By default, it uses - * {@link MultiTermQuery#CONSTANT_SCORE_REWRITE} when creating a - * prefix, wildcard and range queries. This implementation is generally - * preferable because it a) Runs faster b) Does not have the scarcity of terms - * unduly influence score c) avoids any {@link TooManyListenersException} - * exception. However, if your application really needs to use the - * old-fashioned boolean queries expansion rewriting and the above points are - * not relevant then use this change the rewrite method. + * By default, it uses {@link MultiTermQuery#CONSTANT_SCORE_REWRITE} when creating a prefix, + * wildcard and range queries. This implementation is generally preferable because it a) Runs + * faster b) Does not have the scarcity of terms unduly influence score c) avoids any {@link + * TooManyListenersException} exception. However, if your application really needs to use the + * old-fashioned boolean queries expansion rewriting and the above points are not relevant then + * use this change the rewrite method. */ public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method); - - /** - * @see #setMultiTermRewriteMethod(org.apache.lucene.search.MultiTermQuery.RewriteMethod) - */ + + /** @see #setMultiTermRewriteMethod(org.apache.lucene.search.MultiTermQuery.RewriteMethod) */ public MultiTermQuery.RewriteMethod getMultiTermRewriteMethod(); - - + /** * Set the prefix length for fuzzy queries. Default is 0. - * - * @param fuzzyPrefixLength - * The fuzzyPrefixLength to set. + * + * @param fuzzyPrefixLength The fuzzyPrefixLength to set. */ public void setFuzzyPrefixLength(int fuzzyPrefixLength); - - /** - * Set locale used by date range parsing. - */ + + /** Set locale used by date range parsing. */ public void setLocale(Locale locale); - - /** - * Returns current locale, allowing access by subclasses. - */ + + /** Returns current locale, allowing access by subclasses. */ public Locale getLocale(); - + public void setTimeZone(TimeZone timeZone); - + public TimeZone getTimeZone(); - + /** - * Sets the default slop for phrases. If zero, then exact phrase matches are - * required. Default value is zero. + * Sets the default slop for phrases. If zero, then exact phrase matches are required. Default + * value is zero. */ public void setPhraseSlop(int defaultPhraseSlop); - + public Analyzer getAnalyzer(); - - /** - * @see #setAllowLeadingWildcard(boolean) - */ + + /** @see #setAllowLeadingWildcard(boolean) */ public boolean getAllowLeadingWildcard(); - - /** - * Get the minimal similarity for fuzzy queries. - */ + + /** Get the minimal similarity for fuzzy queries. */ public float getFuzzyMinSim(); - + /** * Get the prefix length for fuzzy queries. - * + * * @return Returns the fuzzyPrefixLength. */ public int getFuzzyPrefixLength(); - - /** - * Gets the default slop for phrases. - */ + + /** Gets the default slop for phrases. */ public int getPhraseSlop(); - + /** - * Set the minimum similarity for fuzzy queries. Default is defined on - * {@link FuzzyQuery#defaultMaxEdits}. + * Set the minimum similarity for fuzzy queries. Default is defined on {@link + * FuzzyQuery#defaultMaxEdits}. */ public void setFuzzyMinSim(float fuzzyMinSim); - + /** - * Sets the default {@link Resolution} used for certain field when - * no {@link Resolution} is defined for this field. - * + * Sets the default {@link Resolution} used for certain field when no {@link Resolution} is + * defined for this field. + * * @param dateResolution the default {@link Resolution} */ public void setDateResolution(DateTools.Resolution dateResolution); - - - -} \ No newline at end of file +} diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java index 87b0a40b426..de1c814924a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java @@ -22,32 +22,25 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; -/** - * This class defines utility methods to (help) parse query strings into - * {@link Query} objects. - */ -final public class QueryParserUtil { +/** This class defines utility methods to (help) parse query strings into {@link Query} objects. */ +public final class QueryParserUtil { /** * Parses a query which searches on the fields specified. - *

    - * If x fields are specified, this effectively constructs: - * + * + *

    If x fields are specified, this effectively constructs: + * *

        * 
        * (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
        * 
        * 
    - * - * @param queries - * Queries strings to parse - * @param fields - * Fields to search on - * @param analyzer - * Analyzer to use - * @throws IllegalArgumentException - * if the length of the queries array differs from the length of the - * fields array + * + * @param queries Queries strings to parse + * @param fields Fields to search on + * @param analyzer Analyzer to use + * @throws IllegalArgumentException if the length of the queries array differs from the length of + * the fields array */ public static Query parse(String[] queries, String[] fields, Analyzer analyzer) throws QueryNodeException { @@ -69,11 +62,11 @@ final public class QueryParserUtil { } /** - * Parses a query, searching on the fields specified. Use this if you need to - * specify certain fields as required, and others as prohibited. - *

    - * - * Usage: + * Parses a query, searching on the fields specified. Use this if you need to specify certain + * fields as required, and others as prohibited. + * + *

    Usage: + * *

        * 
        * String[] fields = {"filename", "contents", "description"};
    @@ -83,29 +76,25 @@ final public class QueryParserUtil {
        * MultiFieldQueryParser.parse("query", fields, flags, analyzer);
        * 
        * 
    - *

    - * The code above would construct a query: - * + * + *

    The code above would construct a query: + * *

        * 
        * (filename:query) +(contents:query) -(description:query)
        * 
        * 
    - * - * @param query - * Query string to parse - * @param fields - * Fields to search on - * @param flags - * Flags describing the fields - * @param analyzer - * Analyzer to use - * @throws IllegalArgumentException - * if the length of the fields array differs from the length of the - * flags array + * + * @param query Query string to parse + * @param fields Fields to search on + * @param flags Flags describing the fields + * @param analyzer Analyzer to use + * @throws IllegalArgumentException if the length of the fields array differs from the length of + * the flags array */ - public static Query parse(String query, String[] fields, - BooleanClause.Occur[] flags, Analyzer analyzer) throws QueryNodeException { + public static Query parse( + String query, String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer) + throws QueryNodeException { if (fields.length != flags.length) throw new IllegalArgumentException("fields.length != flags.length"); BooleanQuery.Builder bQuery = new BooleanQuery.Builder(); @@ -124,11 +113,11 @@ final public class QueryParserUtil { } /** - * Parses a query, searching on the fields specified. Use this if you need to - * specify certain fields as required, and others as prohibited. - *

    - * - * Usage: + * Parses a query, searching on the fields specified. Use this if you need to specify certain + * fields as required, and others as prohibited. + * + *

    Usage: + * *

        * 
        * String[] query = {"query1", "query2", "query3"};
    @@ -139,28 +128,24 @@ final public class QueryParserUtil {
        * MultiFieldQueryParser.parse(query, fields, flags, analyzer);
        * 
        * 
    - *

    - * The code above would construct a query: - * + * + *

    The code above would construct a query: + * *

        * 
        * (filename:query1) +(contents:query2) -(description:query3)
        * 
        * 
    - * - * @param queries - * Queries string to parse - * @param fields - * Fields to search on - * @param flags - * Flags describing the fields - * @param analyzer - * Analyzer to use - * @throws IllegalArgumentException - * if the length of the queries, fields, and flags array differ + * + * @param queries Queries string to parse + * @param fields Fields to search on + * @param flags Flags describing the fields + * @param analyzer Analyzer to use + * @throws IllegalArgumentException if the length of the queries, fields, and flags array differ */ - public static Query parse(String[] queries, String[] fields, - BooleanClause.Occur[] flags, Analyzer analyzer) throws QueryNodeException { + public static Query parse( + String[] queries, String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer) + throws QueryNodeException { if (!(queries.length == fields.length && queries.length == flags.length)) throw new IllegalArgumentException( "queries, fields, and flags array have have different length"); @@ -180,23 +165,21 @@ final public class QueryParserUtil { } /** - * Returns a String where those characters that TextParser expects to be - * escaped are escaped by a preceding \. + * Returns a String where those characters that TextParser expects to be escaped are escaped by a + * preceding \. */ public static String escape(String s) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < s.length(); i++) { char c = s.charAt(i); // These characters are part of the query syntax and must be escaped - if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' - || c == ':' || c == '^' || c == '[' || c == ']' || c == '\"' - || c == '{' || c == '}' || c == '~' || c == '*' || c == '?' - || c == '|' || c == '&' || c == '/') { + if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' || c == ':' + || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~' + || c == '*' || c == '?' || c == '|' || c == '&' || c == '/') { sb.append('\\'); } sb.append(c); } return sb.toString(); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java index 04529c467bd..c84ea55f2b0 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java @@ -20,7 +20,6 @@ import java.util.Locale; import java.util.Map; import java.util.TimeZone; import java.util.TooManyListenersException; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.DateTools; import org.apache.lucene.document.DateTools.Resolution; @@ -40,231 +39,211 @@ import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; /** - * This class is a helper that enables users to easily use the Lucene query - * parser. - *

    - * To construct a Query object from a query string, use the - * {@link #parse(String, String)} method: + * This class is a helper that enables users to easily use the Lucene query parser. + * + *

    To construct a Query object from a query string, use the {@link #parse(String, String)} + * method: + * *

      * StandardQueryParser queryParserHelper = new StandardQueryParser();
      * Query query = queryParserHelper.parse("a AND b", "defaultField");
      * 
    - *

    - * To change any configuration before parsing the query string do, for example: - *
    + * + *

    To change any configuration before parsing the query string do, for example:
    + * *

      * // the query config handler returned by {@link StandardQueryParser} is a {@link StandardQueryConfigHandler}
      * queryParserHelper.getQueryConfigHandler().setAnalyzer(new WhitespaceAnalyzer());
      * 
    - *

    - * The syntax for query strings is as follows (copied from the old QueryParser - * javadoc): - * A Query is a series of clauses. A clause may be prefixed by: + * + *

    The syntax for query strings is as follows (copied from the old QueryParser javadoc): A Query + * is a series of clauses. A clause may be prefixed by: + * *

      - *
    • a plus (+) or a minus (-) sign, indicating that - * the clause is required or prohibited respectively; or - *
    • a term followed by a colon, indicating the field to be searched. This - * enables one to construct queries which search multiple fields. + *
    • a plus (+) or a minus (-) sign, indicating that the clause is + * required or prohibited respectively; or + *
    • a term followed by a colon, indicating the field to be searched. This enables one to + * construct queries which search multiple fields. *
    - * + * * A clause may be either: + * *
      - *
    • a term, indicating all the documents that contain this term; or - *
    • a nested query, enclosed in parentheses. Note that this may be used with - * a +/- prefix to require any of a set of terms. + *
    • a term, indicating all the documents that contain this term; or + *
    • a nested query, enclosed in parentheses. Note that this may be used with a +/ + * - prefix to require any of a set of terms. *
    - * + * * Thus, in BNF, the query grammar is: - * + * *
      *   Query  ::= ( Clause )*
      *   Clause ::= ["+", "-"] [<TERM> ":"] ( <TERM> | "(" Query ")" )
      * 
    - * - *

    - * Examples of appropriately formatted queries can be found in the Examples of appropriately formatted queries can be found in the * query syntax documentation. - *

    - *

    - * The text parser used by this helper is a {@link StandardSyntaxParser}. - *

    - * The query node processor used by this helper is a - * {@link StandardQueryNodeProcessorPipeline}. - *

    - * The builder used by this helper is a {@link StandardQueryTreeBuilder}. - * + * + *

    The text parser used by this helper is a {@link StandardSyntaxParser}. + * + *

    The query node processor used by this helper is a {@link StandardQueryNodeProcessorPipeline}. + * + *

    The builder used by this helper is a {@link StandardQueryTreeBuilder}. + * * @see StandardQueryParser * @see StandardQueryConfigHandler * @see StandardSyntaxParser * @see StandardQueryNodeProcessorPipeline * @see StandardQueryTreeBuilder */ -public class StandardQueryParser extends QueryParserHelper implements CommonQueryParserConfiguration { - - /** - * Constructs a {@link StandardQueryParser} object. - */ +public class StandardQueryParser extends QueryParserHelper + implements CommonQueryParserConfiguration { + + /** Constructs a {@link StandardQueryParser} object. */ public StandardQueryParser() { - super(new StandardQueryConfigHandler(), new StandardSyntaxParser(), + super( + new StandardQueryConfigHandler(), + new StandardSyntaxParser(), new StandardQueryNodeProcessorPipeline(null), new StandardQueryTreeBuilder()); setEnablePositionIncrements(true); } - + /** - * Constructs a {@link StandardQueryParser} object and sets an - * {@link Analyzer} to it. The same as: - * + * Constructs a {@link StandardQueryParser} object and sets an {@link Analyzer} to it. The same + * as: + * *

        * StandardQueryParser qp = new StandardQueryParser();
        * qp.getQueryConfigHandler().setAnalyzer(analyzer);
        * 
    - * - * @param analyzer - * the analyzer to be used by this query parser helper + * + * @param analyzer the analyzer to be used by this query parser helper */ public StandardQueryParser(Analyzer analyzer) { this(); - + this.setAnalyzer(analyzer); } - + @Override public String toString() { - return ""; + return ""; } - + /** - * Overrides {@link QueryParserHelper#parse(String, String)} so it casts the - * return object to {@link Query}. For more reference about this method, check - * {@link QueryParserHelper#parse(String, String)}. - * - * @param query - * the query string - * @param defaultField - * the default field used by the text parser - * + * Overrides {@link QueryParserHelper#parse(String, String)} so it casts the return object to + * {@link Query}. For more reference about this method, check {@link + * QueryParserHelper#parse(String, String)}. + * + * @param query the query string + * @param defaultField the default field used by the text parser * @return the object built from the query - * - * @throws QueryNodeException - * if something wrong happens along the three phases + * @throws QueryNodeException if something wrong happens along the three phases */ @Override - public Query parse(String query, String defaultField) - throws QueryNodeException { - + public Query parse(String query, String defaultField) throws QueryNodeException { + return (Query) super.parse(query, defaultField); - } - + /** - * Gets implicit operator setting, which will be either {@link Operator#AND} - * or {@link Operator#OR}. + * Gets implicit operator setting, which will be either {@link Operator#AND} or {@link + * Operator#OR}. */ public StandardQueryConfigHandler.Operator getDefaultOperator() { return getQueryConfigHandler().get(ConfigurationKeys.DEFAULT_OPERATOR); } - + /** - * Sets the boolean operator of the QueryParser. In default mode ( - * {@link Operator#OR}) terms without any modifiers are considered optional: - * for example capital of Hungary is equal to - * capital OR of OR Hungary.
    - * In {@link Operator#AND} mode terms are considered to be in conjunction: the - * above mentioned query is parsed as capital AND of AND Hungary + * Sets the boolean operator of the QueryParser. In default mode ( {@link Operator#OR}) terms + * without any modifiers are considered optional: for example capital of Hungary is + * equal to capital OR of OR Hungary.
    + * In {@link Operator#AND} mode terms are considered to be in conjunction: the above mentioned + * query is parsed as capital AND of AND Hungary */ public void setDefaultOperator(StandardQueryConfigHandler.Operator operator) { getQueryConfigHandler().set(ConfigurationKeys.DEFAULT_OPERATOR, operator); } - + /** * Set to true to allow leading wildcard characters. - *

    - * When set, * or ? are allowed as the first - * character of a PrefixQuery and WildcardQuery. Note that this can produce - * very slow queries on big indexes. - *

    - * Default: false. + * + *

    When set, * or ? are allowed as the first character of a + * PrefixQuery and WildcardQuery. Note that this can produce very slow queries on big indexes. + * + *

    Default: false. */ @Override public void setAllowLeadingWildcard(boolean allowLeadingWildcard) { getQueryConfigHandler().set(ConfigurationKeys.ALLOW_LEADING_WILDCARD, allowLeadingWildcard); } - + /** * Set to true to enable position increments in result query. - *

    - * When set, result phrase and multi-phrase queries will be aware of position - * increments. Useful when e.g. a StopFilter increases the position increment - * of the token that follows an omitted token. - *

    - * Default: false. + * + *

    When set, result phrase and multi-phrase queries will be aware of position increments. + * Useful when e.g. a StopFilter increases the position increment of the token that follows an + * omitted token. + * + *

    Default: false. */ @Override public void setEnablePositionIncrements(boolean enabled) { getQueryConfigHandler().set(ConfigurationKeys.ENABLE_POSITION_INCREMENTS, enabled); } - - /** - * @see #setEnablePositionIncrements(boolean) - */ + + /** @see #setEnablePositionIncrements(boolean) */ @Override public boolean getEnablePositionIncrements() { - Boolean enablePositionsIncrements = getQueryConfigHandler().get(ConfigurationKeys.ENABLE_POSITION_INCREMENTS); - + Boolean enablePositionsIncrements = + getQueryConfigHandler().get(ConfigurationKeys.ENABLE_POSITION_INCREMENTS); + if (enablePositionsIncrements == null) { - return false; - + return false; + } else { return enablePositionsIncrements; } - } - + /** - * By default, it uses - * {@link MultiTermQuery#CONSTANT_SCORE_REWRITE} when creating a - * prefix, wildcard and range queries. This implementation is generally - * preferable because it a) Runs faster b) Does not have the scarcity of terms - * unduly influence score c) avoids any {@link TooManyListenersException} - * exception. However, if your application really needs to use the - * old-fashioned boolean queries expansion rewriting and the above points are - * not relevant then use this change the rewrite method. + * By default, it uses {@link MultiTermQuery#CONSTANT_SCORE_REWRITE} when creating a prefix, + * wildcard and range queries. This implementation is generally preferable because it a) Runs + * faster b) Does not have the scarcity of terms unduly influence score c) avoids any {@link + * TooManyListenersException} exception. However, if your application really needs to use the + * old-fashioned boolean queries expansion rewriting and the above points are not relevant then + * use this change the rewrite method. */ @Override public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method) { getQueryConfigHandler().set(ConfigurationKeys.MULTI_TERM_REWRITE_METHOD, method); } - - /** - * @see #setMultiTermRewriteMethod(org.apache.lucene.search.MultiTermQuery.RewriteMethod) - */ + + /** @see #setMultiTermRewriteMethod(org.apache.lucene.search.MultiTermQuery.RewriteMethod) */ @Override public MultiTermQuery.RewriteMethod getMultiTermRewriteMethod() { return getQueryConfigHandler().get(ConfigurationKeys.MULTI_TERM_REWRITE_METHOD); } - + /** - * Set the fields a query should be expanded to when the field is - * null - * + * Set the fields a query should be expanded to when the field is null + * * @param fields the fields used to expand the query */ public void setMultiFields(CharSequence[] fields) { - + if (fields == null) { fields = new CharSequence[0]; } getQueryConfigHandler().set(ConfigurationKeys.MULTI_FIELDS, fields); - } /** - * Returns the fields used to expand the query when the field for a - * certain query is null + * Returns the fields used to expand the query when the field for a certain query is null + * * * @return the fields used to expand the query */ @@ -274,61 +253,55 @@ public class StandardQueryParser extends QueryParserHelper implements CommonQuer /** * Set the prefix length for fuzzy queries. Default is 0. - * - * @param fuzzyPrefixLength - * The fuzzyPrefixLength to set. + * + * @param fuzzyPrefixLength The fuzzyPrefixLength to set. */ @Override public void setFuzzyPrefixLength(int fuzzyPrefixLength) { QueryConfigHandler config = getQueryConfigHandler(); FuzzyConfig fuzzyConfig = config.get(ConfigurationKeys.FUZZY_CONFIG); - + if (fuzzyConfig == null) { fuzzyConfig = new FuzzyConfig(); config.set(ConfigurationKeys.FUZZY_CONFIG, fuzzyConfig); } fuzzyConfig.setPrefixLength(fuzzyPrefixLength); - } - - public void setPointsConfigMap(Map pointsConfigMap) { + + public void setPointsConfigMap(Map pointsConfigMap) { getQueryConfigHandler().set(ConfigurationKeys.POINTS_CONFIG_MAP, pointsConfigMap); } - - public Map getPointsConfigMap() { + + public Map getPointsConfigMap() { return getQueryConfigHandler().get(ConfigurationKeys.POINTS_CONFIG_MAP); } - - /** - * Set locale used by date range parsing. - */ + + /** Set locale used by date range parsing. */ @Override public void setLocale(Locale locale) { getQueryConfigHandler().set(ConfigurationKeys.LOCALE, locale); } - - /** - * Returns current locale, allowing access by subclasses. - */ + + /** Returns current locale, allowing access by subclasses. */ @Override public Locale getLocale() { return getQueryConfigHandler().get(ConfigurationKeys.LOCALE); } - + @Override public void setTimeZone(TimeZone timeZone) { getQueryConfigHandler().set(ConfigurationKeys.TIMEZONE, timeZone); } - + @Override public TimeZone getTimeZone() { return getQueryConfigHandler().get(ConfigurationKeys.TIMEZONE); } - + /** - * Sets the default slop for phrases. If zero, then exact phrase matches are - * required. Default value is zero. + * Sets the default slop for phrases. If zero, then exact phrase matches are required. Default + * value is zero. */ @Override public void setPhraseSlop(int defaultPhraseSlop) { @@ -338,81 +311,76 @@ public class StandardQueryParser extends QueryParserHelper implements CommonQuer public void setAnalyzer(Analyzer analyzer) { getQueryConfigHandler().set(ConfigurationKeys.ANALYZER, analyzer); } - + @Override public Analyzer getAnalyzer() { - return getQueryConfigHandler().get(ConfigurationKeys.ANALYZER); + return getQueryConfigHandler().get(ConfigurationKeys.ANALYZER); } - - /** - * @see #setAllowLeadingWildcard(boolean) - */ + + /** @see #setAllowLeadingWildcard(boolean) */ @Override public boolean getAllowLeadingWildcard() { - Boolean allowLeadingWildcard = getQueryConfigHandler().get(ConfigurationKeys.ALLOW_LEADING_WILDCARD); - + Boolean allowLeadingWildcard = + getQueryConfigHandler().get(ConfigurationKeys.ALLOW_LEADING_WILDCARD); + if (allowLeadingWildcard == null) { return false; - + } else { return allowLeadingWildcard; } } - - /** - * Get the minimal similarity for fuzzy queries. - */ + + /** Get the minimal similarity for fuzzy queries. */ @Override public float getFuzzyMinSim() { FuzzyConfig fuzzyConfig = getQueryConfigHandler().get(ConfigurationKeys.FUZZY_CONFIG); - + if (fuzzyConfig == null) { return FuzzyQuery.defaultMaxEdits; } else { return fuzzyConfig.getMinSimilarity(); } } - + /** * Get the prefix length for fuzzy queries. - * + * * @return Returns the fuzzyPrefixLength. */ @Override public int getFuzzyPrefixLength() { FuzzyConfig fuzzyConfig = getQueryConfigHandler().get(ConfigurationKeys.FUZZY_CONFIG); - + if (fuzzyConfig == null) { return FuzzyQuery.defaultPrefixLength; } else { return fuzzyConfig.getPrefixLength(); } } - - /** - * Gets the default slop for phrases. - */ + + /** Gets the default slop for phrases. */ @Override public int getPhraseSlop() { Integer phraseSlop = getQueryConfigHandler().get(ConfigurationKeys.PHRASE_SLOP); - + if (phraseSlop == null) { return 0; - + } else { return phraseSlop; } } - + /** - * Set the minimum similarity for fuzzy queries. Default is defined on - * {@link FuzzyQuery#defaultMaxEdits}. + * Set the minimum similarity for fuzzy queries. Default is defined on {@link + * FuzzyQuery#defaultMaxEdits}. */ @Override public void setFuzzyMinSim(float fuzzyMinSim) { QueryConfigHandler config = getQueryConfigHandler(); FuzzyConfig fuzzyConfig = config.get(ConfigurationKeys.FUZZY_CONFIG); - + if (fuzzyConfig == null) { fuzzyConfig = new FuzzyConfig(); config.set(ConfigurationKeys.FUZZY_CONFIG, fuzzyConfig); @@ -420,62 +388,61 @@ public class StandardQueryParser extends QueryParserHelper implements CommonQuer fuzzyConfig.setMinSimilarity(fuzzyMinSim); } - + /** * Sets the boost used for each field. - * - * @param boosts a collection that maps a field to its boost + * + * @param boosts a collection that maps a field to its boost */ public void setFieldsBoost(Map boosts) { getQueryConfigHandler().set(ConfigurationKeys.FIELD_BOOST_MAP, boosts); } - + /** * Returns the field to boost map used to set boost for each field. - * - * @return the field to boost map + * + * @return the field to boost map */ public Map getFieldsBoost() { return getQueryConfigHandler().get(ConfigurationKeys.FIELD_BOOST_MAP); } /** - * Sets the default {@link Resolution} used for certain field when - * no {@link Resolution} is defined for this field. - * + * Sets the default {@link Resolution} used for certain field when no {@link Resolution} is + * defined for this field. + * * @param dateResolution the default {@link Resolution} */ @Override public void setDateResolution(DateTools.Resolution dateResolution) { getQueryConfigHandler().set(ConfigurationKeys.DATE_RESOLUTION, dateResolution); } - + /** - * Returns the default {@link Resolution} used for certain field when - * no {@link Resolution} is defined for this field. - * + * Returns the default {@link Resolution} used for certain field when no {@link Resolution} is + * defined for this field. + * * @return the default {@link Resolution} */ public DateTools.Resolution getDateResolution() { return getQueryConfigHandler().get(ConfigurationKeys.DATE_RESOLUTION); } - + /** * Returns the field to {@link Resolution} map used to normalize each date field. - * + * * @return the field to {@link Resolution} map */ public Map getDateResolutionMap() { return getQueryConfigHandler().get(ConfigurationKeys.FIELD_DATE_RESOLUTION_MAP); } - + /** * Sets the {@link Resolution} used for each field - * + * * @param dateRes a collection that maps a field to its {@link Resolution} */ public void setDateResolutionMap(Map dateRes) { getQueryConfigHandler().set(ConfigurationKeys.FIELD_DATE_RESOLUTION_MAP, dateRes); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/AnyQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/AnyQueryNodeBuilder.java index ee943b5dc01..0643e8ebc71 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/AnyQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/AnyQueryNodeBuilder.java @@ -17,22 +17,18 @@ package org.apache.lucene.queryparser.flexible.standard.builders; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.builders.QueryTreeBuilder; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.nodes.AnyQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.Query; import org.apache.lucene.search.IndexSearcher.TooManyClauses; +import org.apache.lucene.search.Query; -/** - * Builds a BooleanQuery of SHOULD clauses, possibly with - * some minimum number to match. - */ +/** Builds a BooleanQuery of SHOULD clauses, possibly with some minimum number to match. */ public class AnyQueryNodeBuilder implements StandardQueryBuilder { public AnyQueryNodeBuilder() { @@ -58,24 +54,18 @@ public class AnyQueryNodeBuilder implements StandardQueryBuilder { bQuery.add(query, BooleanClause.Occur.SHOULD); } catch (TooManyClauses ex) { - throw new QueryNodeException(new MessageImpl( - /* + throw new QueryNodeException( + new MessageImpl(/* * IQQQ.Q0028E_TOO_MANY_BOOLEAN_CLAUSES, * BooleanQuery.getMaxClauseCount() - */QueryParserMessages.EMPTY_MESSAGE), ex); - + */ QueryParserMessages.EMPTY_MESSAGE), ex); } - } - } - } bQuery.setMinimumNumberShouldMatch(andNode.getMinimumMatchingElements()); return bQuery.build(); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BooleanQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BooleanQueryNodeBuilder.java index 9f9d229e683..cd185ac0522 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BooleanQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BooleanQueryNodeBuilder.java @@ -17,29 +17,27 @@ package org.apache.lucene.queryparser.flexible.standard.builders; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.builders.QueryTreeBuilder; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.standard.parser.EscapeQuerySyntaxImpl; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; import org.apache.lucene.search.IndexSearcher.TooManyClauses; +import org.apache.lucene.search.Query; /** - * Builds a {@link BooleanQuery} object from a {@link BooleanQueryNode} object. - * Every children in the {@link BooleanQueryNode} object must be already tagged - * using {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} with a {@link Query} - * object.
    + * Builds a {@link BooleanQuery} object from a {@link BooleanQueryNode} object. Every children in + * the {@link BooleanQueryNode} object must be already tagged using {@link + * QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} with a {@link Query} object.
    *
    - * It takes in consideration if the children is a {@link ModifierQueryNode} to - * define the {@link BooleanClause}. + * It takes in consideration if the children is a {@link ModifierQueryNode} to define the {@link + * BooleanClause}. */ public class BooleanQueryNodeBuilder implements StandardQueryBuilder { @@ -67,21 +65,18 @@ public class BooleanQueryNodeBuilder implements StandardQueryBuilder { } catch (TooManyClauses ex) { - throw new QueryNodeException(new MessageImpl( - QueryParserMessages.TOO_MANY_BOOLEAN_CLAUSES, IndexSearcher - .getMaxClauseCount(), queryNode - .toQueryString(new EscapeQuerySyntaxImpl())), ex); - + throw new QueryNodeException( + new MessageImpl( + QueryParserMessages.TOO_MANY_BOOLEAN_CLAUSES, + IndexSearcher.getMaxClauseCount(), + queryNode.toQueryString(new EscapeQuerySyntaxImpl())), + ex); } - } - } - } return bQuery.build(); - } private static BooleanClause.Occur getModifierValue(QueryNode node) { @@ -89,22 +84,17 @@ public class BooleanQueryNodeBuilder implements StandardQueryBuilder { if (node instanceof ModifierQueryNode) { ModifierQueryNode mNode = ((ModifierQueryNode) node); switch (mNode.getModifier()) { + case MOD_REQ: + return BooleanClause.Occur.MUST; - case MOD_REQ: - return BooleanClause.Occur.MUST; - - case MOD_NOT: - return BooleanClause.Occur.MUST_NOT; - - case MOD_NONE: - return BooleanClause.Occur.SHOULD; + case MOD_NOT: + return BooleanClause.Occur.MUST_NOT; + case MOD_NONE: + return BooleanClause.Occur.SHOULD; } - } return BooleanClause.Occur.SHOULD; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BoostQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BoostQueryNodeBuilder.java index b9c67a0ddba..b3be122e481 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BoostQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BoostQueryNodeBuilder.java @@ -24,10 +24,9 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; /** - * This builder basically reads the {@link Query} object set on the - * {@link BoostQueryNode} child using - * {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} and applies the boost value - * defined in the {@link BoostQueryNode}. + * This builder basically reads the {@link Query} object set on the {@link BoostQueryNode} child + * using {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} and applies the boost value defined in + * the {@link BoostQueryNode}. */ public class BoostQueryNodeBuilder implements StandardQueryBuilder { @@ -44,11 +43,8 @@ public class BoostQueryNodeBuilder implements StandardQueryBuilder { return null; } - Query query = (Query) child - .getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); + Query query = (Query) child.getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); return new BoostQuery(query, boostNode.getValue()); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/DummyQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/DummyQueryNodeBuilder.java index 52001e57362..706dfba45f4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/DummyQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/DummyQueryNodeBuilder.java @@ -22,29 +22,26 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.search.TermQuery; /** - * This builder does nothing. Commonly used for {@link QueryNode} objects that - * are built by its parent's builder. - * + * This builder does nothing. Commonly used for {@link QueryNode} objects that are built by its + * parent's builder. + * * @see StandardQueryBuilder * @see QueryTreeBuilder */ public class DummyQueryNodeBuilder implements StandardQueryBuilder { - /** - * Constructs a {@link DummyQueryNodeBuilder} object. - */ + /** Constructs a {@link DummyQueryNodeBuilder} object. */ public DummyQueryNodeBuilder() { // empty constructor } /** * Always return null. - * - * return null + * + *

    return null */ @Override public TermQuery build(QueryNode queryNode) throws QueryNodeException { return null; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/FieldQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/FieldQueryNodeBuilder.java index cc1f4714ba2..1c207fad365 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/FieldQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/FieldQueryNodeBuilder.java @@ -22,9 +22,7 @@ import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.search.TermQuery; -/** - * Builds a {@link TermQuery} object from a {@link FieldQueryNode} object. - */ +/** Builds a {@link TermQuery} object from a {@link FieldQueryNode} object. */ public class FieldQueryNodeBuilder implements StandardQueryBuilder { public FieldQueryNodeBuilder() { @@ -35,9 +33,6 @@ public class FieldQueryNodeBuilder implements StandardQueryBuilder { public TermQuery build(QueryNode queryNode) throws QueryNodeException { FieldQueryNode fieldNode = (FieldQueryNode) queryNode; - return new TermQuery(new Term(fieldNode.getFieldAsString(), fieldNode - .getTextAsString())); - + return new TermQuery(new Term(fieldNode.getFieldAsString(), fieldNode.getTextAsString())); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/FuzzyQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/FuzzyQueryNodeBuilder.java index 2575e802053..a905bfc0abc 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/FuzzyQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/FuzzyQueryNodeBuilder.java @@ -22,9 +22,7 @@ import org.apache.lucene.queryparser.flexible.core.nodes.FuzzyQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.search.FuzzyQuery; -/** - * Builds a {@link FuzzyQuery} object from a {@link FuzzyQueryNode} object. - */ +/** Builds a {@link FuzzyQuery} object from a {@link FuzzyQueryNode} object. */ public class FuzzyQueryNodeBuilder implements StandardQueryBuilder { public FuzzyQueryNodeBuilder() { @@ -35,14 +33,13 @@ public class FuzzyQueryNodeBuilder implements StandardQueryBuilder { public FuzzyQuery build(QueryNode queryNode) throws QueryNodeException { FuzzyQueryNode fuzzyNode = (FuzzyQueryNode) queryNode; String text = fuzzyNode.getTextAsString(); - - int numEdits = FuzzyQuery.floatToEdits(fuzzyNode.getSimilarity(), - text.codePointCount(0, text.length())); - - return new FuzzyQuery(new Term(fuzzyNode.getFieldAsString(), fuzzyNode - .getTextAsString()), numEdits, fuzzyNode - .getPrefixLength()); + int numEdits = + FuzzyQuery.floatToEdits(fuzzyNode.getSimilarity(), text.codePointCount(0, text.length())); + + return new FuzzyQuery( + new Term(fuzzyNode.getFieldAsString(), fuzzyNode.getTextAsString()), + numEdits, + fuzzyNode.getPrefixLength()); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/GroupQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/GroupQueryNodeBuilder.java index 007bc49b965..e07824c7533 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/GroupQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/GroupQueryNodeBuilder.java @@ -23,9 +23,8 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.search.Query; /** - * Builds no object, it only returns the {@link Query} object set on the - * {@link GroupQueryNode} object using a - * {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} tag. + * Builds no object, it only returns the {@link Query} object set on the {@link GroupQueryNode} + * object using a {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} tag. */ public class GroupQueryNodeBuilder implements StandardQueryBuilder { @@ -37,9 +36,6 @@ public class GroupQueryNodeBuilder implements StandardQueryBuilder { public Query build(QueryNode queryNode) throws QueryNodeException { GroupQueryNode groupNode = (GroupQueryNode) queryNode; - return (Query) (groupNode).getChild().getTag( - QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); - + return (Query) (groupNode).getChild().getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MatchAllDocsQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MatchAllDocsQueryNodeBuilder.java index d55bfd67dde..cda433ebe8c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MatchAllDocsQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MatchAllDocsQueryNodeBuilder.java @@ -16,18 +16,15 @@ */ package org.apache.lucene.queryparser.flexible.standard.builders; -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.nodes.MatchAllDocsQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.standard.parser.EscapeQuerySyntaxImpl; import org.apache.lucene.search.MatchAllDocsQuery; -/** - * Builds a {@link MatchAllDocsQuery} object from a - * {@link MatchAllDocsQueryNode} object. - */ +/** Builds a {@link MatchAllDocsQuery} object from a {@link MatchAllDocsQueryNode} object. */ public class MatchAllDocsQueryNodeBuilder implements StandardQueryBuilder { public MatchAllDocsQueryNodeBuilder() { @@ -39,14 +36,13 @@ public class MatchAllDocsQueryNodeBuilder implements StandardQueryBuilder { // validates node if (!(queryNode instanceof MatchAllDocsQueryNode)) { - throw new QueryNodeException(new MessageImpl( - QueryParserMessages.LUCENE_QUERY_CONVERSION_ERROR, queryNode - .toQueryString(new EscapeQuerySyntaxImpl()), queryNode.getClass() - .getName())); + throw new QueryNodeException( + new MessageImpl( + QueryParserMessages.LUCENE_QUERY_CONVERSION_ERROR, + queryNode.toQueryString(new EscapeQuerySyntaxImpl()), + queryNode.getClass().getName())); } return new MatchAllDocsQuery(); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MatchNoDocsQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MatchNoDocsQueryNodeBuilder.java index 40584727f81..57ff2fda108 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MatchNoDocsQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MatchNoDocsQueryNodeBuilder.java @@ -24,10 +24,7 @@ import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.standard.parser.EscapeQuerySyntaxImpl; import org.apache.lucene.search.MatchNoDocsQuery; -/** - * Builds a {@link MatchNoDocsQuery} object from a - * {@link MatchNoDocsQueryNode} object. - */ +/** Builds a {@link MatchNoDocsQuery} object from a {@link MatchNoDocsQueryNode} object. */ public class MatchNoDocsQueryNodeBuilder implements StandardQueryBuilder { public MatchNoDocsQueryNodeBuilder() { @@ -39,14 +36,13 @@ public class MatchNoDocsQueryNodeBuilder implements StandardQueryBuilder { // validates node if (!(queryNode instanceof MatchNoDocsQueryNode)) { - throw new QueryNodeException(new MessageImpl( - QueryParserMessages.LUCENE_QUERY_CONVERSION_ERROR, queryNode - .toQueryString(new EscapeQuerySyntaxImpl()), queryNode.getClass() - .getName())); + throw new QueryNodeException( + new MessageImpl( + QueryParserMessages.LUCENE_QUERY_CONVERSION_ERROR, + queryNode.toQueryString(new EscapeQuerySyntaxImpl()), + queryNode.getClass().getName())); } return new MatchNoDocsQuery(); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/ModifierQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/ModifierQueryNodeBuilder.java index 9e81f04a0d5..299e76fe310 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/ModifierQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/ModifierQueryNodeBuilder.java @@ -23,9 +23,8 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.search.Query; /** - * Builds no object, it only returns the {@link Query} object set on the - * {@link ModifierQueryNode} object using a - * {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} tag. + * Builds no object, it only returns the {@link Query} object set on the {@link ModifierQueryNode} + * object using a {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} tag. */ public class ModifierQueryNodeBuilder implements StandardQueryBuilder { @@ -37,9 +36,6 @@ public class ModifierQueryNodeBuilder implements StandardQueryBuilder { public Query build(QueryNode queryNode) throws QueryNodeException { ModifierQueryNode modifierNode = (ModifierQueryNode) queryNode; - return (Query) (modifierNode).getChild().getTag( - QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); - + return (Query) (modifierNode).getChild().getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MultiPhraseQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MultiPhraseQueryNodeBuilder.java index 6de66593330..06e7c8ad9b2 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MultiPhraseQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MultiPhraseQueryNodeBuilder.java @@ -20,7 +20,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.TreeMap; - import org.apache.lucene.index.Term; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.builders.QueryTreeBuilder; @@ -30,10 +29,7 @@ import org.apache.lucene.queryparser.flexible.standard.nodes.MultiPhraseQueryNod import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.TermQuery; -/** - * Builds a {@link MultiPhraseQuery} object from a {@link MultiPhraseQueryNode} - * object. - */ +/** Builds a {@link MultiPhraseQuery} object from a {@link MultiPhraseQueryNode} object. */ public class MultiPhraseQueryNodeBuilder implements StandardQueryBuilder { public MultiPhraseQueryNodeBuilder() { @@ -53,30 +49,24 @@ public class MultiPhraseQueryNodeBuilder implements StandardQueryBuilder { for (QueryNode child : children) { FieldQueryNode termNode = (FieldQueryNode) child; - TermQuery termQuery = (TermQuery) termNode - .getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); - List termList = positionTermMap.get(termNode - .getPositionIncrement()); + TermQuery termQuery = + (TermQuery) termNode.getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); + List termList = positionTermMap.get(termNode.getPositionIncrement()); if (termList == null) { termList = new LinkedList<>(); positionTermMap.put(termNode.getPositionIncrement(), termList); - } termList.add(termQuery.getTerm()); - } for (Map.Entry> entry : positionTermMap.entrySet()) { List termList = entry.getValue(); phraseQueryBuilder.add(termList.toArray(new Term[termList.size()]), entry.getKey()); } - } return phraseQueryBuilder.build(); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PhraseQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PhraseQueryNodeBuilder.java index 60260d89d9d..c1fabd8c1c9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PhraseQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PhraseQueryNodeBuilder.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.builders; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.builders.QueryTreeBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; @@ -27,10 +26,7 @@ import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -/** - * Builds a {@link PhraseQuery} object from a {@link TokenizedPhraseQueryNode} - * object. - */ +/** Builds a {@link PhraseQuery} object from a {@link TokenizedPhraseQueryNode} object. */ public class PhraseQueryNodeBuilder implements StandardQueryBuilder { public PhraseQueryNodeBuilder() { @@ -48,17 +44,13 @@ public class PhraseQueryNodeBuilder implements StandardQueryBuilder { if (children != null) { for (QueryNode child : children) { - TermQuery termQuery = (TermQuery) child - .getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); + TermQuery termQuery = (TermQuery) child.getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); FieldQueryNode termNode = (FieldQueryNode) child; builder.add(termQuery.getTerm(), termNode.getPositionIncrement()); } - } return builder.build(); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PointRangeQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PointRangeQueryNodeBuilder.java index 0cce4bf94f2..49e4899a595 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PointRangeQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PointRangeQueryNodeBuilder.java @@ -37,30 +37,28 @@ import org.apache.lucene.search.Query; * @see PointRangeQueryNode */ public class PointRangeQueryNodeBuilder implements StandardQueryBuilder { - - /** - * Constructs a {@link PointRangeQueryNodeBuilder} object. - */ + + /** Constructs a {@link PointRangeQueryNodeBuilder} object. */ public PointRangeQueryNodeBuilder() { - // empty constructor + // empty constructor } - + @Override public Query build(QueryNode queryNode) throws QueryNodeException { PointRangeQueryNode numericRangeNode = (PointRangeQueryNode) queryNode; - + PointQueryNode lowerNumericNode = numericRangeNode.getLowerBound(); PointQueryNode upperNumericNode = numericRangeNode.getUpperBound(); - + Number lowerNumber = lowerNumericNode.getValue(); Number upperNumber = upperNumericNode.getValue(); - + PointsConfig pointsConfig = numericRangeNode.getPointsConfig(); Class numberType = pointsConfig.getType(); String field = StringUtils.toString(numericRangeNode.getField()); boolean minInclusive = numericRangeNode.isLowerInclusive(); boolean maxInclusive = numericRangeNode.isUpperInclusive(); - + // TODO: push down cleaning up of crazy nulls and inclusive/exclusive elsewhere if (Integer.class.equals(numberType)) { Integer lower = (Integer) lowerNumber; @@ -70,7 +68,7 @@ public class PointRangeQueryNodeBuilder implements StandardQueryBuilder { if (minInclusive == false) { lower = lower + 1; } - + Integer upper = (Integer) upperNumber; if (upper == null) { upper = Integer.MAX_VALUE; @@ -87,7 +85,7 @@ public class PointRangeQueryNodeBuilder implements StandardQueryBuilder { if (minInclusive == false) { lower = lower + 1; } - + Long upper = (Long) upperNumber; if (upper == null) { upper = Long.MAX_VALUE; @@ -104,7 +102,7 @@ public class PointRangeQueryNodeBuilder implements StandardQueryBuilder { if (minInclusive == false) { lower = Math.nextUp(lower); } - + Float upper = (Float) upperNumber; if (upper == null) { upper = Float.POSITIVE_INFINITY; @@ -121,7 +119,7 @@ public class PointRangeQueryNodeBuilder implements StandardQueryBuilder { if (minInclusive == false) { lower = Math.nextUp(lower); } - + Double upper = (Double) upperNumber; if (upper == null) { upper = Double.POSITIVE_INFINITY; @@ -131,7 +129,8 @@ public class PointRangeQueryNodeBuilder implements StandardQueryBuilder { } return DoublePoint.newRangeQuery(field, lower, upper); } else { - throw new QueryNodeException(new MessageImpl(QueryParserMessages.UNSUPPORTED_NUMERIC_DATA_TYPE, numberType)); + throw new QueryNodeException( + new MessageImpl(QueryParserMessages.UNSUPPORTED_NUMERIC_DATA_TYPE, numberType)); } } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PrefixWildcardQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PrefixWildcardQueryNodeBuilder.java index b4274575c07..3f0927930cc 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PrefixWildcardQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/PrefixWildcardQueryNodeBuilder.java @@ -24,10 +24,7 @@ import org.apache.lucene.queryparser.flexible.standard.processors.MultiTermRewri import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; -/** - * Builds a {@link PrefixQuery} object from a {@link PrefixWildcardQueryNode} - * object. - */ +/** Builds a {@link PrefixQuery} object from a {@link PrefixWildcardQueryNode} object. */ public class PrefixWildcardQueryNodeBuilder implements StandardQueryBuilder { public PrefixWildcardQueryNodeBuilder() { @@ -35,19 +32,20 @@ public class PrefixWildcardQueryNodeBuilder implements StandardQueryBuilder { } @Override - public PrefixQuery build(QueryNode queryNode) throws QueryNodeException { + public PrefixQuery build(QueryNode queryNode) throws QueryNodeException { PrefixWildcardQueryNode wildcardNode = (PrefixWildcardQueryNode) queryNode; - String text = wildcardNode.getText().subSequence(0, wildcardNode.getText().length() - 1).toString(); + String text = + wildcardNode.getText().subSequence(0, wildcardNode.getText().length() - 1).toString(); PrefixQuery q = new PrefixQuery(new Term(wildcardNode.getFieldAsString(), text)); - - MultiTermQuery.RewriteMethod method = (MultiTermQuery.RewriteMethod)queryNode.getTag(MultiTermRewriteMethodProcessor.TAG_ID); + + MultiTermQuery.RewriteMethod method = + (MultiTermQuery.RewriteMethod) queryNode.getTag(MultiTermRewriteMethodProcessor.TAG_ID); if (method != null) { q.setRewriteMethod(method); } - + return q; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/RegexpQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/RegexpQueryNodeBuilder.java index b2198b41fda..ac64f6bad8a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/RegexpQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/RegexpQueryNodeBuilder.java @@ -24,9 +24,7 @@ import org.apache.lucene.queryparser.flexible.standard.processors.MultiTermRewri import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.RegexpQuery; -/** - * Builds a {@link RegexpQuery} object from a {@link RegexpQueryNode} object. - */ +/** Builds a {@link RegexpQuery} object from a {@link RegexpQueryNode} object. */ public class RegexpQueryNodeBuilder implements StandardQueryBuilder { public RegexpQueryNodeBuilder() { @@ -38,16 +36,15 @@ public class RegexpQueryNodeBuilder implements StandardQueryBuilder { RegexpQueryNode regexpNode = (RegexpQueryNode) queryNode; // TODO: make the maxStates configurable w/ a reasonable default (QueryParserBase uses 10000) - RegexpQuery q = new RegexpQuery(new Term(regexpNode.getFieldAsString(), - regexpNode.textToBytesRef())); + RegexpQuery q = + new RegexpQuery(new Term(regexpNode.getFieldAsString(), regexpNode.textToBytesRef())); - MultiTermQuery.RewriteMethod method = (MultiTermQuery.RewriteMethod) queryNode - .getTag(MultiTermRewriteMethodProcessor.TAG_ID); + MultiTermQuery.RewriteMethod method = + (MultiTermQuery.RewriteMethod) queryNode.getTag(MultiTermRewriteMethodProcessor.TAG_ID); if (method != null) { q.setRewriteMethod(method); } return q; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/SlopQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/SlopQueryNodeBuilder.java index 77667d9e17d..ca0a7c9f514 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/SlopQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/SlopQueryNodeBuilder.java @@ -25,10 +25,9 @@ import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; /** - * This builder basically reads the {@link Query} object set on the - * {@link SlopQueryNode} child using - * {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} and applies the slop value - * defined in the {@link SlopQueryNode}. + * This builder basically reads the {@link Query} object set on the {@link SlopQueryNode} child + * using {@link QueryTreeBuilder#QUERY_TREE_BUILDER_TAGID} and applies the slop value defined in the + * {@link SlopQueryNode}. */ public class SlopQueryNodeBuilder implements StandardQueryBuilder { @@ -40,8 +39,8 @@ public class SlopQueryNodeBuilder implements StandardQueryBuilder { public Query build(QueryNode queryNode) throws QueryNodeException { SlopQueryNode phraseSlopNode = (SlopQueryNode) queryNode; - Query query = (Query) phraseSlopNode.getChild().getTag( - QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); + Query query = + (Query) phraseSlopNode.getChild().getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); if (query instanceof PhraseQuery) { PhraseQuery.Builder builder = new PhraseQuery.Builder(); @@ -55,17 +54,15 @@ public class SlopQueryNodeBuilder implements StandardQueryBuilder { query = builder.build(); } else { - MultiPhraseQuery mpq = (MultiPhraseQuery)query; - + MultiPhraseQuery mpq = (MultiPhraseQuery) query; + int slop = phraseSlopNode.getValue(); - + if (slop != mpq.getSlop()) { query = new MultiPhraseQuery.Builder(mpq).setSlop(slop).build(); } } return query; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryBuilder.java index c9fb3c14b8a..816912b1656 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryBuilder.java @@ -23,9 +23,9 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.search.Query; /** - * This interface should be implemented by every class that wants to build - * {@link Query} objects from {@link QueryNode} objects. - * + * This interface should be implemented by every class that wants to build {@link Query} objects + * from {@link QueryNode} objects. + * * @see QueryBuilder * @see QueryTreeBuilder */ @@ -33,5 +33,4 @@ public interface StandardQueryBuilder extends QueryBuilder { @Override public Query build(QueryNode queryNode) throws QueryNodeException; - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryTreeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryTreeBuilder.java index 7db4205f53c..6e9d1e2953d 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryTreeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryTreeBuilder.java @@ -33,25 +33,23 @@ import org.apache.lucene.queryparser.flexible.standard.nodes.MultiPhraseQueryNod import org.apache.lucene.queryparser.flexible.standard.nodes.PointQueryNode; import org.apache.lucene.queryparser.flexible.standard.nodes.PointRangeQueryNode; import org.apache.lucene.queryparser.flexible.standard.nodes.PrefixWildcardQueryNode; -import org.apache.lucene.queryparser.flexible.standard.nodes.TermRangeQueryNode; import org.apache.lucene.queryparser.flexible.standard.nodes.RegexpQueryNode; import org.apache.lucene.queryparser.flexible.standard.nodes.SynonymQueryNode; +import org.apache.lucene.queryparser.flexible.standard.nodes.TermRangeQueryNode; import org.apache.lucene.queryparser.flexible.standard.nodes.WildcardQueryNode; import org.apache.lucene.queryparser.flexible.standard.processors.StandardQueryNodeProcessorPipeline; import org.apache.lucene.search.Query; /** - * This query tree builder only defines the necessary map to build a - * {@link Query} tree object. It should be used to generate a {@link Query} tree - * object from a query node tree processed by a + * This query tree builder only defines the necessary map to build a {@link Query} tree object. It + * should be used to generate a {@link Query} tree object from a query node tree processed by a * {@link StandardQueryNodeProcessorPipeline}. - * + * * @see QueryTreeBuilder * @see StandardQueryNodeProcessorPipeline */ -public class StandardQueryTreeBuilder extends QueryTreeBuilder implements - StandardQueryBuilder { - +public class StandardQueryTreeBuilder extends QueryTreeBuilder implements StandardQueryBuilder { + public StandardQueryTreeBuilder() { setBuilder(GroupQueryNode.class, new GroupQueryNodeBuilder()); setBuilder(FieldQueryNode.class, new FieldQueryNodeBuilder()); @@ -64,21 +62,17 @@ public class StandardQueryTreeBuilder extends QueryTreeBuilder implements setBuilder(WildcardQueryNode.class, new WildcardQueryNodeBuilder()); setBuilder(TokenizedPhraseQueryNode.class, new PhraseQueryNodeBuilder()); setBuilder(MatchNoDocsQueryNode.class, new MatchNoDocsQueryNodeBuilder()); - setBuilder(PrefixWildcardQueryNode.class, - new PrefixWildcardQueryNodeBuilder()); + setBuilder(PrefixWildcardQueryNode.class, new PrefixWildcardQueryNodeBuilder()); setBuilder(TermRangeQueryNode.class, new TermRangeQueryNodeBuilder()); setBuilder(RegexpQueryNode.class, new RegexpQueryNodeBuilder()); setBuilder(SlopQueryNode.class, new SlopQueryNodeBuilder()); - setBuilder(SynonymQueryNode.class, - new SynonymQueryNodeBuilder()); + setBuilder(SynonymQueryNode.class, new SynonymQueryNodeBuilder()); setBuilder(MultiPhraseQueryNode.class, new MultiPhraseQueryNodeBuilder()); setBuilder(MatchAllDocsQueryNode.class, new MatchAllDocsQueryNodeBuilder()); - } - + @Override public Query build(QueryNode queryNode) throws QueryNodeException { return (Query) super.build(queryNode); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/TermRangeQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/TermRangeQueryNodeBuilder.java index 90abdf7c21b..929a952bf87 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/TermRangeQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/TermRangeQueryNodeBuilder.java @@ -25,45 +25,45 @@ import org.apache.lucene.queryparser.flexible.standard.processors.MultiTermRewri import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.TermRangeQuery; -/** - * Builds a {@link TermRangeQuery} object from a {@link TermRangeQueryNode} - * object. - */ +/** Builds a {@link TermRangeQuery} object from a {@link TermRangeQueryNode} object. */ public class TermRangeQueryNodeBuilder implements StandardQueryBuilder { - + public TermRangeQueryNodeBuilder() { - // empty constructor + // empty constructor } - + @Override public TermRangeQuery build(QueryNode queryNode) throws QueryNodeException { TermRangeQueryNode rangeNode = (TermRangeQueryNode) queryNode; FieldQueryNode upper = rangeNode.getUpperBound(); FieldQueryNode lower = rangeNode.getLowerBound(); - + String field = StringUtils.toString(rangeNode.getField()); String lowerText = lower.getTextAsString(); String upperText = upper.getTextAsString(); - + if (lowerText.length() == 0) { lowerText = null; } - + if (upperText.length() == 0) { upperText = null; } - - TermRangeQuery rangeQuery = TermRangeQuery.newStringRange(field, lowerText, upperText, rangeNode - .isLowerInclusive(), rangeNode.isUpperInclusive()); - - MultiTermQuery.RewriteMethod method = (MultiTermQuery.RewriteMethod) queryNode - .getTag(MultiTermRewriteMethodProcessor.TAG_ID); + + TermRangeQuery rangeQuery = + TermRangeQuery.newStringRange( + field, + lowerText, + upperText, + rangeNode.isLowerInclusive(), + rangeNode.isUpperInclusive()); + + MultiTermQuery.RewriteMethod method = + (MultiTermQuery.RewriteMethod) queryNode.getTag(MultiTermRewriteMethodProcessor.TAG_ID); if (method != null) { rangeQuery.setRewriteMethod(method); } - + return rangeQuery; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/WildcardQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/WildcardQueryNodeBuilder.java index d01d601fb7b..d2b1a731352 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/WildcardQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/WildcardQueryNodeBuilder.java @@ -24,10 +24,7 @@ import org.apache.lucene.queryparser.flexible.standard.processors.MultiTermRewri import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.WildcardQuery; -/** - * Builds a {@link WildcardQuery} object from a {@link WildcardQueryNode} - * object. - */ +/** Builds a {@link WildcardQuery} object from a {@link WildcardQueryNode} object. */ public class WildcardQueryNodeBuilder implements StandardQueryBuilder { public WildcardQueryNodeBuilder() { @@ -38,15 +35,16 @@ public class WildcardQueryNodeBuilder implements StandardQueryBuilder { public WildcardQuery build(QueryNode queryNode) throws QueryNodeException { WildcardQueryNode wildcardNode = (WildcardQueryNode) queryNode; - WildcardQuery q = new WildcardQuery(new Term(wildcardNode.getFieldAsString(), - wildcardNode.getTextAsString())); - - MultiTermQuery.RewriteMethod method = (MultiTermQuery.RewriteMethod)queryNode.getTag(MultiTermRewriteMethodProcessor.TAG_ID); + WildcardQuery q = + new WildcardQuery( + new Term(wildcardNode.getFieldAsString(), wildcardNode.getTextAsString())); + + MultiTermQuery.RewriteMethod method = + (MultiTermQuery.RewriteMethod) queryNode.getTag(MultiTermRewriteMethodProcessor.TAG_ID); if (method != null) { q.setRewriteMethod(method); } - + return q; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/package-info.java index 2d950107da5..a924538a12b 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/package-info.java @@ -14,18 +14,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Standard Lucene Query Node Builders. - * + * *

    Standard Lucene Query Node Builders

    - *

    - * The package org.apache.lucene.queryparser.flexible.standard.builders contains all the builders needed - * to build a Lucene Query object from a query node tree. These builders expect the query node tree was - * already processed by the {@link org.apache.lucene.queryparser.flexible.standard.processors.StandardQueryNodeProcessorPipeline}. - *

    - * {@link org.apache.lucene.queryparser.flexible.standard.builders.StandardQueryTreeBuilder} is a builder that already contains a defined map that maps each QueryNode object - * with its respective builder. + * + *

    The package org.apache.lucene.queryparser.flexible.standard.builders contains all the builders + * needed to build a Lucene Query object from a query node tree. These builders expect the query + * node tree was already processed by the {@link + * org.apache.lucene.queryparser.flexible.standard.processors.StandardQueryNodeProcessorPipeline}. + * + *

    {@link org.apache.lucene.queryparser.flexible.standard.builders.StandardQueryTreeBuilder} is a + * builder that already contains a defined map that maps each QueryNode object with its respective + * builder. */ package org.apache.lucene.queryparser.flexible.standard.builders; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FieldBoostMapFCListener.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FieldBoostMapFCListener.java index 16db310c7ac..3f65b3409b1 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FieldBoostMapFCListener.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FieldBoostMapFCListener.java @@ -17,18 +17,16 @@ package org.apache.lucene.queryparser.flexible.standard.config; import java.util.Map; - -import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; import org.apache.lucene.queryparser.flexible.core.config.FieldConfig; import org.apache.lucene.queryparser.flexible.core.config.FieldConfigListener; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; +import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; /** - * This listener listens for every field configuration request and assign a - * {@link ConfigurationKeys#BOOST} to the - * equivalent {@link FieldConfig} based on a defined map: fieldName -> boostValue stored in - * {@link ConfigurationKeys#FIELD_BOOST_MAP}. - * + * This listener listens for every field configuration request and assign a {@link + * ConfigurationKeys#BOOST} to the equivalent {@link FieldConfig} based on a defined map: fieldName + * -> boostValue stored in {@link ConfigurationKeys#FIELD_BOOST_MAP}. + * * @see ConfigurationKeys#FIELD_BOOST_MAP * @see ConfigurationKeys#BOOST * @see FieldConfig @@ -37,7 +35,7 @@ import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; public class FieldBoostMapFCListener implements FieldConfigListener { private QueryConfigHandler config = null; - + public FieldBoostMapFCListener(QueryConfigHandler config) { this.config = config; } @@ -45,15 +43,13 @@ public class FieldBoostMapFCListener implements FieldConfigListener { @Override public void buildFieldConfig(FieldConfig fieldConfig) { Map fieldBoostMap = this.config.get(ConfigurationKeys.FIELD_BOOST_MAP); - + if (fieldBoostMap != null) { Float boost = fieldBoostMap.get(fieldConfig.getField()); if (boost != null) { fieldConfig.set(ConfigurationKeys.BOOST, boost); } - } } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FieldDateResolutionFCListener.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FieldDateResolutionFCListener.java index ae880088df5..2987a470093 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FieldDateResolutionFCListener.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FieldDateResolutionFCListener.java @@ -17,20 +17,18 @@ package org.apache.lucene.queryparser.flexible.standard.config; import java.util.Map; - import org.apache.lucene.document.DateTools; import org.apache.lucene.document.DateTools.Resolution; -import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; import org.apache.lucene.queryparser.flexible.core.config.FieldConfig; import org.apache.lucene.queryparser.flexible.core.config.FieldConfigListener; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; +import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; /** - * This listener listens for every field configuration request and assign a - * {@link ConfigurationKeys#DATE_RESOLUTION} to the equivalent {@link FieldConfig} based - * on a defined map: fieldName -> {@link Resolution} stored in - * {@link ConfigurationKeys#FIELD_DATE_RESOLUTION_MAP}. - * + * This listener listens for every field configuration request and assign a {@link + * ConfigurationKeys#DATE_RESOLUTION} to the equivalent {@link FieldConfig} based on a defined map: + * fieldName -> {@link Resolution} stored in {@link ConfigurationKeys#FIELD_DATE_RESOLUTION_MAP}. + * * @see ConfigurationKeys#DATE_RESOLUTION * @see ConfigurationKeys#FIELD_DATE_RESOLUTION_MAP * @see FieldConfig @@ -47,11 +45,11 @@ public class FieldDateResolutionFCListener implements FieldConfigListener { @Override public void buildFieldConfig(FieldConfig fieldConfig) { DateTools.Resolution dateRes = null; - Map dateResMap = this.config.get(ConfigurationKeys.FIELD_DATE_RESOLUTION_MAP); + Map dateResMap = + this.config.get(ConfigurationKeys.FIELD_DATE_RESOLUTION_MAP); if (dateResMap != null) { - dateRes = dateResMap.get( - fieldConfig.getField()); + dateRes = dateResMap.get(fieldConfig.getField()); } if (dateRes == null) { @@ -61,7 +59,5 @@ public class FieldDateResolutionFCListener implements FieldConfigListener { if (dateRes != null) { fieldConfig.set(ConfigurationKeys.DATE_RESOLUTION, dateRes); } - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FuzzyConfig.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FuzzyConfig.java index 3b53a0b1311..155b44d71b1 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FuzzyConfig.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/FuzzyConfig.java @@ -18,15 +18,13 @@ package org.apache.lucene.queryparser.flexible.standard.config; import org.apache.lucene.search.FuzzyQuery; -/** - * Configuration parameters for {@link FuzzyQuery}s - */ +/** Configuration parameters for {@link FuzzyQuery}s */ public class FuzzyConfig { - + private int prefixLength = FuzzyQuery.defaultPrefixLength; private float minSimilarity = FuzzyQuery.defaultMaxEdits; - + public FuzzyConfig() {} public int getPrefixLength() { @@ -44,5 +42,4 @@ public class FuzzyConfig { public void setMinSimilarity(float minSimilarity) { this.minSimilarity = minSimilarity; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/NumberDateFormat.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/NumberDateFormat.java index e3a4ffa29fd..ba0bed2f124 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/NumberDateFormat.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/NumberDateFormat.java @@ -24,47 +24,43 @@ import java.text.ParsePosition; import java.util.Date; /** - * This {@link Format} parses {@link Long} into date strings and vice-versa. It - * uses the given {@link DateFormat} to parse and format dates, but before, it - * converts {@link Long} to {@link Date} objects or vice-versa. + * This {@link Format} parses {@link Long} into date strings and vice-versa. It uses the given + * {@link DateFormat} to parse and format dates, but before, it converts {@link Long} to {@link + * Date} objects or vice-versa. */ public class NumberDateFormat extends NumberFormat { - + private static final long serialVersionUID = 964823936071308283L; - - final private DateFormat dateFormat; - + + private final DateFormat dateFormat; + /** * Constructs a {@link NumberDateFormat} object using the given {@link DateFormat}. - * + * * @param dateFormat {@link DateFormat} used to parse and format dates */ public NumberDateFormat(DateFormat dateFormat) { this.dateFormat = dateFormat; } - + @Override - public StringBuffer format(double number, StringBuffer toAppendTo, - FieldPosition pos) { + public StringBuffer format(double number, StringBuffer toAppendTo, FieldPosition pos) { return dateFormat.format(new Date((long) number), toAppendTo, pos); } - + @Override - public StringBuffer format(long number, StringBuffer toAppendTo, - FieldPosition pos) { + public StringBuffer format(long number, StringBuffer toAppendTo, FieldPosition pos) { return dateFormat.format(new Date(number), toAppendTo, pos); } - + @Override public Number parse(String source, ParsePosition parsePosition) { final Date date = dateFormat.parse(source, parsePosition); return (date == null) ? null : date.getTime(); } - + @Override - public StringBuffer format(Object number, StringBuffer toAppendTo, - FieldPosition pos) { + public StringBuffer format(Object number, StringBuffer toAppendTo, FieldPosition pos) { return dateFormat.format(number, toAppendTo, pos); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/PointsConfig.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/PointsConfig.java index 41239826da2..8f2db329fb1 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/PointsConfig.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/PointsConfig.java @@ -17,88 +17,79 @@ package org.apache.lucene.queryparser.flexible.standard.config; import java.text.NumberFormat; - import org.apache.lucene.index.PointValues; /** - * This class holds the configuration used to parse numeric queries and create - * {@link PointValues} queries. - * + * This class holds the configuration used to parse numeric queries and create {@link PointValues} + * queries. + * * @see PointValues * @see NumberFormat */ public class PointsConfig { - + private NumberFormat format; - + private Class type; - + /** * Constructs a {@link PointsConfig} object. - * - * @param format - * the {@link NumberFormat} used to parse a {@link String} to - * {@link Number} - * @param type - * the numeric type used to index the numeric values - * + * + * @param format the {@link NumberFormat} used to parse a {@link String} to {@link Number} + * @param type the numeric type used to index the numeric values * @see PointsConfig#setNumberFormat(NumberFormat) */ public PointsConfig(NumberFormat format, Class type) { setNumberFormat(format); - setType(type); + setType(type); } - + /** - * Returns the {@link NumberFormat} used to parse a {@link String} to - * {@link Number} - * - * @return the {@link NumberFormat} used to parse a {@link String} to - * {@link Number} + * Returns the {@link NumberFormat} used to parse a {@link String} to {@link Number} + * + * @return the {@link NumberFormat} used to parse a {@link String} to {@link Number} */ public NumberFormat getNumberFormat() { return format; } - + /** * Returns the numeric type used to index the numeric values - * + * * @return the numeric type used to index the numeric values */ public Class getType() { return type; } - + /** * Sets the numeric type used to index the numeric values - * + * * @param type the numeric type used to index the numeric values */ public void setType(Class type) { if (type == null) { throw new IllegalArgumentException("type must not be null!"); } - if (Integer.class.equals(type) == false && - Long.class.equals(type) == false && - Float.class.equals(type) == false && - Double.class.equals(type) == false) { + if (Integer.class.equals(type) == false + && Long.class.equals(type) == false + && Float.class.equals(type) == false + && Double.class.equals(type) == false) { throw new IllegalArgumentException("unsupported numeric type: " + type); } this.type = type; } - + /** - * Sets the {@link NumberFormat} used to parse a {@link String} to - * {@link Number} - * - * @param format - * the {@link NumberFormat} used to parse a {@link String} to - * {@link Number}, must not be null + * Sets the {@link NumberFormat} used to parse a {@link String} to {@link Number} + * + * @param format the {@link NumberFormat} used to parse a {@link String} to {@link Number}, must + * not be null */ - public void setNumberFormat(NumberFormat format) { + public void setNumberFormat(NumberFormat format) { if (format == null) { throw new IllegalArgumentException("format must not be null!"); - } + } this.format = format; } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/PointsConfigListener.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/PointsConfigListener.java index e7b5185229f..36b85e9bcc5 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/PointsConfigListener.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/PointsConfigListener.java @@ -17,46 +17,44 @@ package org.apache.lucene.queryparser.flexible.standard.config; import java.util.Map; - import org.apache.lucene.queryparser.flexible.core.config.FieldConfig; import org.apache.lucene.queryparser.flexible.core.config.FieldConfigListener; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; /** - * This listener is used to listen to {@link FieldConfig} requests in - * {@link QueryConfigHandler} and add {@link ConfigurationKeys#POINTS_CONFIG} - * based on the {@link ConfigurationKeys#POINTS_CONFIG_MAP} set in the - * {@link QueryConfigHandler}. - * + * This listener is used to listen to {@link FieldConfig} requests in {@link QueryConfigHandler} and + * add {@link ConfigurationKeys#POINTS_CONFIG} based on the {@link + * ConfigurationKeys#POINTS_CONFIG_MAP} set in the {@link QueryConfigHandler}. + * * @see PointsConfig * @see QueryConfigHandler * @see ConfigurationKeys#POINTS_CONFIG * @see ConfigurationKeys#POINTS_CONFIG_MAP */ public class PointsConfigListener implements FieldConfigListener { - - final private QueryConfigHandler config; - + + private final QueryConfigHandler config; + /** * Constructs a {@link PointsConfigListener} object using the given {@link QueryConfigHandler}. - * + * * @param config the {@link QueryConfigHandler} it will listen too */ - public PointsConfigListener(QueryConfigHandler config) { + public PointsConfigListener(QueryConfigHandler config) { if (config == null) { throw new IllegalArgumentException("config must not be null!"); } this.config = config; } - + @Override public void buildFieldConfig(FieldConfig fieldConfig) { - Map pointsConfigMap = config.get(ConfigurationKeys.POINTS_CONFIG_MAP); - + Map pointsConfigMap = config.get(ConfigurationKeys.POINTS_CONFIG_MAP); + if (pointsConfigMap != null) { PointsConfig pointsConfig = pointsConfigMap.get(fieldConfig.getField()); - + if (pointsConfig != null) { fieldConfig.set(ConfigurationKeys.POINTS_CONFIG, pointsConfig); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/StandardQueryConfigHandler.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/StandardQueryConfigHandler.java index f401226f8ba..12aa3349c40 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/StandardQueryConfigHandler.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/StandardQueryConfigHandler.java @@ -21,7 +21,6 @@ import java.util.LinkedHashMap; import java.util.Locale; import java.util.Map; import java.util.TimeZone; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.DateTools; import org.apache.lucene.document.DateTools.Resolution; @@ -34,153 +33,159 @@ import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.MultiTermQuery.RewriteMethod; /** - * This query configuration handler is used for almost every processor defined - * in the {@link StandardQueryNodeProcessorPipeline} processor pipeline. It holds - * configuration methods that reproduce the configuration methods that could be set on the old - * lucene 2.4 QueryParser class. - * + * This query configuration handler is used for almost every processor defined in the {@link + * StandardQueryNodeProcessorPipeline} processor pipeline. It holds configuration methods that + * reproduce the configuration methods that could be set on the old lucene 2.4 QueryParser class. + * * @see StandardQueryNodeProcessorPipeline */ public class StandardQueryConfigHandler extends QueryConfigHandler { - /** - * Class holding keys for StandardQueryNodeProcessorPipeline options. - */ - final public static class ConfigurationKeys { - + /** Class holding keys for StandardQueryNodeProcessorPipeline options. */ + public static final class ConfigurationKeys { + /** * Key used to set whether position increments is enabled - * + * * @see StandardQueryParser#setEnablePositionIncrements(boolean) * @see StandardQueryParser#getEnablePositionIncrements() */ - final public static ConfigurationKey ENABLE_POSITION_INCREMENTS = ConfigurationKey.newInstance(); + public static final ConfigurationKey ENABLE_POSITION_INCREMENTS = + ConfigurationKey.newInstance(); /** * Key used to set whether leading wildcards are supported - * + * * @see StandardQueryParser#setAllowLeadingWildcard(boolean) * @see StandardQueryParser#getAllowLeadingWildcard() */ - final public static ConfigurationKey ALLOW_LEADING_WILDCARD = ConfigurationKey.newInstance(); - + public static final ConfigurationKey ALLOW_LEADING_WILDCARD = + ConfigurationKey.newInstance(); + /** * Key used to set the {@link Analyzer} used for terms found in the query - * + * * @see StandardQueryParser#setAnalyzer(Analyzer) * @see StandardQueryParser#getAnalyzer() */ - final public static ConfigurationKey ANALYZER = ConfigurationKey.newInstance(); - + public static final ConfigurationKey ANALYZER = ConfigurationKey.newInstance(); + /** * Key used to set the default boolean operator - * - * @see StandardQueryParser#setDefaultOperator(org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.Operator) + * + * @see + * StandardQueryParser#setDefaultOperator(org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.Operator) * @see StandardQueryParser#getDefaultOperator() */ - final public static ConfigurationKey DEFAULT_OPERATOR = ConfigurationKey.newInstance(); - + public static final ConfigurationKey DEFAULT_OPERATOR = + ConfigurationKey.newInstance(); + /** * Key used to set the default phrase slop - * + * * @see StandardQueryParser#setPhraseSlop(int) * @see StandardQueryParser#getPhraseSlop() */ - final public static ConfigurationKey PHRASE_SLOP = ConfigurationKey.newInstance(); - + public static final ConfigurationKey PHRASE_SLOP = ConfigurationKey.newInstance(); + /** * Key used to set the {@link Locale} used when parsing the query - * + * * @see StandardQueryParser#setLocale(Locale) * @see StandardQueryParser#getLocale() */ - final public static ConfigurationKey LOCALE = ConfigurationKey.newInstance(); - - final public static ConfigurationKey TIMEZONE = ConfigurationKey.newInstance(); - - /** - * Key used to set the {@link RewriteMethod} used when creating queries - * - * @see StandardQueryParser#setMultiTermRewriteMethod(org.apache.lucene.search.MultiTermQuery.RewriteMethod) - * @see StandardQueryParser#getMultiTermRewriteMethod() - */ - final public static ConfigurationKey MULTI_TERM_REWRITE_METHOD = ConfigurationKey.newInstance(); + public static final ConfigurationKey LOCALE = ConfigurationKey.newInstance(); + + public static final ConfigurationKey TIMEZONE = ConfigurationKey.newInstance(); /** - * Key used to set the fields a query should be expanded to when the field - * is null - * + * Key used to set the {@link RewriteMethod} used when creating queries + * + * @see + * StandardQueryParser#setMultiTermRewriteMethod(org.apache.lucene.search.MultiTermQuery.RewriteMethod) + * @see StandardQueryParser#getMultiTermRewriteMethod() + */ + public static final ConfigurationKey MULTI_TERM_REWRITE_METHOD = + ConfigurationKey.newInstance(); + + /** + * Key used to set the fields a query should be expanded to when the field is null + * * @see StandardQueryParser#setMultiFields(CharSequence[]) * @see StandardQueryParser#getMultiFields() */ - final public static ConfigurationKey MULTI_FIELDS = ConfigurationKey.newInstance(); - + public static final ConfigurationKey MULTI_FIELDS = + ConfigurationKey.newInstance(); + /** * Key used to set a field to boost map that is used to set the boost for each field - * + * * @see StandardQueryParser#setFieldsBoost(Map) * @see StandardQueryParser#getFieldsBoost() */ - final public static ConfigurationKey> FIELD_BOOST_MAP = ConfigurationKey.newInstance(); + public static final ConfigurationKey> FIELD_BOOST_MAP = + ConfigurationKey.newInstance(); /** - * Key used to set a field to {@link Resolution} map that is used - * to normalize each date field value. - * + * Key used to set a field to {@link Resolution} map that is used to normalize each date field + * value. + * * @see StandardQueryParser#setDateResolutionMap(Map) * @see StandardQueryParser#getDateResolutionMap() */ - final public static ConfigurationKey> FIELD_DATE_RESOLUTION_MAP = ConfigurationKey.newInstance(); - + public static final ConfigurationKey> + FIELD_DATE_RESOLUTION_MAP = ConfigurationKey.newInstance(); + /** * Key used to set the {@link FuzzyConfig} used to create fuzzy queries. - * + * * @see StandardQueryParser#setFuzzyMinSim(float) * @see StandardQueryParser#setFuzzyPrefixLength(int) * @see StandardQueryParser#getFuzzyMinSim() * @see StandardQueryParser#getFuzzyPrefixLength() */ - final public static ConfigurationKey FUZZY_CONFIG = ConfigurationKey.newInstance(); - + public static final ConfigurationKey FUZZY_CONFIG = ConfigurationKey.newInstance(); + /** * Key used to set default {@link Resolution}. - * + * * @see StandardQueryParser#setDateResolution(org.apache.lucene.document.DateTools.Resolution) * @see StandardQueryParser#getDateResolution() */ - final public static ConfigurationKey DATE_RESOLUTION = ConfigurationKey.newInstance(); - + public static final ConfigurationKey DATE_RESOLUTION = + ConfigurationKey.newInstance(); + /** * Key used to set the boost value in {@link FieldConfig} objects. - * + * * @see StandardQueryParser#setFieldsBoost(Map) * @see StandardQueryParser#getFieldsBoost() */ - final public static ConfigurationKey BOOST = ConfigurationKey.newInstance(); - + public static final ConfigurationKey BOOST = ConfigurationKey.newInstance(); + /** * Key used to set a field to its {@link PointsConfig}. - * + * * @see StandardQueryParser#setPointsConfigMap(Map) * @see StandardQueryParser#getPointsConfigMap() */ - final public static ConfigurationKey POINTS_CONFIG = ConfigurationKey.newInstance(); + public static final ConfigurationKey POINTS_CONFIG = + ConfigurationKey.newInstance(); /** * Key used to set the {@link PointsConfig} in {@link FieldConfig} for point fields. - * + * * @see StandardQueryParser#setPointsConfigMap(Map) * @see StandardQueryParser#getPointsConfigMap() */ - final public static ConfigurationKey> POINTS_CONFIG_MAP = ConfigurationKey.newInstance(); - + public static final ConfigurationKey> POINTS_CONFIG_MAP = + ConfigurationKey.newInstance(); } - - /** - * Boolean Operator: AND or OR - */ + + /** Boolean Operator: AND or OR */ public static enum Operator { - AND, OR; + AND, + OR; } public StandardQueryConfigHandler() { @@ -188,19 +193,19 @@ public class StandardQueryConfigHandler extends QueryConfigHandler { addFieldConfigListener(new FieldBoostMapFCListener(this)); addFieldConfigListener(new FieldDateResolutionFCListener(this)); addFieldConfigListener(new PointsConfigListener(this)); - + // Default Values set(ConfigurationKeys.ALLOW_LEADING_WILDCARD, false); // default in 2.9 - set(ConfigurationKeys.ANALYZER, null); //default value 2.4 + set(ConfigurationKeys.ANALYZER, null); // default value 2.4 set(ConfigurationKeys.DEFAULT_OPERATOR, Operator.OR); - set(ConfigurationKeys.PHRASE_SLOP, 0); //default value 2.4 - set(ConfigurationKeys.ENABLE_POSITION_INCREMENTS, false); //default value 2.4 + set(ConfigurationKeys.PHRASE_SLOP, 0); // default value 2.4 + set(ConfigurationKeys.ENABLE_POSITION_INCREMENTS, false); // default value 2.4 set(ConfigurationKeys.FIELD_BOOST_MAP, new LinkedHashMap()); set(ConfigurationKeys.FUZZY_CONFIG, new FuzzyConfig()); set(ConfigurationKeys.LOCALE, Locale.getDefault()); set(ConfigurationKeys.MULTI_TERM_REWRITE_METHOD, MultiTermQuery.CONSTANT_SCORE_REWRITE); - set(ConfigurationKeys.FIELD_DATE_RESOLUTION_MAP, new HashMap()); - + set( + ConfigurationKeys.FIELD_DATE_RESOLUTION_MAP, + new HashMap()); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/package-info.java index 6f06e796936..124fcd3673e 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/package-info.java @@ -14,17 +14,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Standard Lucene Query Configuration. - * + * *

    Standard Lucene Query Configuration

    - *

    - * The package org.apache.lucene.queryparser.flexible.standard.config contains the Lucene - * query configuration handler (StandardQueryConfigHandler). This configuration - * handler reproduces almost everything that could be set on the old query parser. - *

    - * StandardQueryConfigHandler is the class that should be used to configure the StandardQueryNodeProcessorPipeline. + * + *

    The package org.apache.lucene.queryparser.flexible.standard.config contains the Lucene query + * configuration handler (StandardQueryConfigHandler). This configuration handler reproduces almost + * everything that could be set on the old query parser. + * + *

    StandardQueryConfigHandler is the class that should be used to configure the + * StandardQueryNodeProcessorPipeline. */ package org.apache.lucene.queryparser.flexible.standard.config; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/AbstractRangeQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/AbstractRangeQueryNode.java index c2f0a804534..4de8fe0d575 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/AbstractRangeQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/AbstractRangeQueryNode.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.nodes; import java.util.ArrayList; - import org.apache.lucene.queryparser.flexible.core.nodes.FieldValuePairQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.FieldableNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; @@ -28,29 +27,24 @@ import org.apache.lucene.queryparser.flexible.core.util.StringUtils; /** * This class should be extended by nodes intending to represent range queries. - * - * @param - * the type of the range query bounds (lower and upper) + * + * @param the type of the range query bounds (lower and upper) */ -public class AbstractRangeQueryNode> - extends QueryNodeImpl implements RangeQueryNode> { - +public class AbstractRangeQueryNode> extends QueryNodeImpl + implements RangeQueryNode> { + private boolean lowerInclusive, upperInclusive; - - /** - * Constructs an {@link AbstractRangeQueryNode}, it should be invoked only by - * its extenders. - */ + + /** Constructs an {@link AbstractRangeQueryNode}, it should be invoked only by its extenders. */ protected AbstractRangeQueryNode() { setLeaf(false); allocate(); } - + /** * Returns the field associated with this node. - * + * * @return the field associated with this node - * * @see FieldableNode */ @Override @@ -58,41 +52,39 @@ public class AbstractRangeQueryNode> CharSequence field = null; T lower = getLowerBound(); T upper = getUpperBound(); - + if (lower != null) { field = lower.getField(); - + } else if (upper != null) { field = upper.getField(); } - + return field; - } - + /** * Sets the field associated with this node. - * + * * @param fieldName the field associated with this node */ @Override public void setField(CharSequence fieldName) { T lower = getLowerBound(); T upper = getUpperBound(); - + if (lower != null) { lower.setField(fieldName); } - + if (upper != null) { upper.setField(fieldName); } - } - + /** * Returns the lower bound node. - * + * * @return the lower bound node. */ @Override @@ -100,10 +92,10 @@ public class AbstractRangeQueryNode> public T getLowerBound() { return (T) getChildren().get(0); } - + /** * Returns the upper bound node. - * + * * @return the upper bound node. */ @Override @@ -111,108 +103,105 @@ public class AbstractRangeQueryNode> public T getUpperBound() { return (T) getChildren().get(1); } - + /** * Returns whether the lower bound is inclusive or exclusive. - * + * * @return true if the lower bound is inclusive, otherwise, false */ @Override public boolean isLowerInclusive() { return lowerInclusive; } - + /** * Returns whether the upper bound is inclusive or exclusive. - * + * * @return true if the upper bound is inclusive, otherwise, false */ @Override public boolean isUpperInclusive() { return upperInclusive; } - + /** * Sets the lower and upper bounds. - * + * * @param lower the lower bound, null if lower bound is open * @param upper the upper bound, null if upper bound is open - * @param lowerInclusive true if the lower bound is inclusive, otherwise, false - * @param upperInclusive true if the upper bound is inclusive, otherwise, false - * + * @param lowerInclusive true if the lower bound is inclusive, otherwise, false + * + * @param upperInclusive true if the upper bound is inclusive, otherwise, false + * * @see #getLowerBound() * @see #getUpperBound() * @see #isLowerInclusive() * @see #isUpperInclusive() */ - public void setBounds(T lower, T upper, boolean lowerInclusive, - boolean upperInclusive) { - + public void setBounds(T lower, T upper, boolean lowerInclusive, boolean upperInclusive) { + if (lower != null && upper != null) { String lowerField = StringUtils.toString(lower.getField()); String upperField = StringUtils.toString(upper.getField()); - + if ((upperField != null || lowerField != null) - && ((upperField != null && !upperField.equals(lowerField)) || !lowerField - .equals(upperField))) { + && ((upperField != null && !upperField.equals(lowerField)) + || !lowerField.equals(upperField))) { throw new IllegalArgumentException( "lower and upper bounds should have the same field name!"); } - + this.lowerInclusive = lowerInclusive; this.upperInclusive = upperInclusive; - + ArrayList children = new ArrayList<>(2); children.add(lower); children.add(upper); - + set(children); - } - } - + @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { StringBuilder sb = new StringBuilder(); - + T lower = getLowerBound(); T upper = getUpperBound(); - + if (lowerInclusive) { sb.append('['); - + } else { sb.append('{'); } - + if (lower != null) { sb.append(lower.toQueryString(escapeSyntaxParser)); - + } else { sb.append("..."); } - + sb.append(' '); - + if (upper != null) { sb.append(upper.toQueryString(escapeSyntaxParser)); - + } else { sb.append("..."); } - + if (upperInclusive) { sb.append(']'); - + } else { sb.append('}'); } - + return sb.toString(); - } - + @Override public String toString() { StringBuilder sb = new StringBuilder("<").append(getClass().getCanonicalName()); @@ -224,7 +213,5 @@ public class AbstractRangeQueryNode> sb.append("\n"); return sb.toString(); - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/BooleanModifierNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/BooleanModifierNode.java index dd81d09568d..e7ff6e7552e 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/BooleanModifierNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/BooleanModifierNode.java @@ -21,10 +21,10 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.standard.processors.BooleanQuery2ModifierNodeProcessor; /** - * A {@link BooleanModifierNode} has the same behaviour as - * {@link ModifierQueryNode}, it only indicates that this modifier was added by - * {@link BooleanQuery2ModifierNodeProcessor} and not by the user. - * + * A {@link BooleanModifierNode} has the same behaviour as {@link ModifierQueryNode}, it only + * indicates that this modifier was added by {@link BooleanQuery2ModifierNodeProcessor} and not by + * the user. + * * @see ModifierQueryNode */ public class BooleanModifierNode extends ModifierQueryNode { @@ -32,5 +32,4 @@ public class BooleanModifierNode extends ModifierQueryNode { public BooleanModifierNode(QueryNode node, Modifier mod) { super(node, mod); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/MultiPhraseQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/MultiPhraseQueryNode.java index 997c2d12d28..b51000fedf7 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/MultiPhraseQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/MultiPhraseQueryNode.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.nodes; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.nodes.FieldableNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNodeImpl; @@ -26,22 +25,19 @@ import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; /** - * A {@link MultiPhraseQueryNode} indicates that its children should be used to - * build a {@link MultiPhraseQuery} instead of {@link PhraseQuery}. + * A {@link MultiPhraseQueryNode} indicates that its children should be used to build a {@link + * MultiPhraseQuery} instead of {@link PhraseQuery}. */ -public class MultiPhraseQueryNode extends QueryNodeImpl implements - FieldableNode { +public class MultiPhraseQueryNode extends QueryNodeImpl implements FieldableNode { public MultiPhraseQueryNode() { setLeaf(false); allocate(); - } @Override public String toString() { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); sb.append(""); for (QueryNode child : getChildren()) { @@ -54,8 +50,7 @@ public class MultiPhraseQueryNode extends QueryNodeImpl implements @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - if (getChildren() == null || getChildren().size() == 0) - return ""; + if (getChildren() == null || getChildren().size() == 0) return ""; StringBuilder sb = new StringBuilder(); String filler = ""; @@ -86,7 +81,6 @@ public class MultiPhraseQueryNode extends QueryNodeImpl implements } else { return ((FieldableNode) children.get(0)).getField(); } - } @Override @@ -100,11 +94,7 @@ public class MultiPhraseQueryNode extends QueryNodeImpl implements if (child instanceof FieldableNode) { ((FieldableNode) child).setField(fieldName); } - } - } - } - } // end class MultitermQueryNode diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PointQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PointQueryNode.java index 69a7d095ebb..51ba96d8522 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PointQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PointQueryNode.java @@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.flexible.standard.nodes; import java.text.NumberFormat; import java.util.Locale; - import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.FieldValuePairQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNodeImpl; @@ -27,74 +26,67 @@ import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax.Type import org.apache.lucene.queryparser.flexible.standard.config.PointsConfig; /** - * This query node represents a field query that holds a point value. It is - * similar to {@link FieldQueryNode}, however the {@link #getValue()} returns a - * {@link Number}. - * + * This query node represents a field query that holds a point value. It is similar to {@link + * FieldQueryNode}, however the {@link #getValue()} returns a {@link Number}. + * * @see PointsConfig */ -public class PointQueryNode extends QueryNodeImpl implements - FieldValuePairQueryNode { - +public class PointQueryNode extends QueryNodeImpl implements FieldValuePairQueryNode { + private NumberFormat numberFormat; - + private CharSequence field; - + private Number value; - + /** - * Creates a {@link PointQueryNode} object using the given field, - * {@link Number} value and {@link NumberFormat} used to convert the value to - * {@link String}. - * + * Creates a {@link PointQueryNode} object using the given field, {@link Number} value and {@link + * NumberFormat} used to convert the value to {@link String}. + * * @param field the field associated with this query node * @param value the value hold by this node * @param numberFormat the {@link NumberFormat} used to convert the value to {@link String} */ - public PointQueryNode(CharSequence field, Number value, - NumberFormat numberFormat) { - + public PointQueryNode(CharSequence field, Number value, NumberFormat numberFormat) { + super(); - + setNumberFormat(numberFormat); setField(field); setValue(value); - } - + /** * Returns the field associated with this node. - * + * * @return the field associated with this node */ @Override public CharSequence getField() { return this.field; } - + /** * Sets the field associated with this node. - * + * * @param fieldName the field associated with this node */ @Override public void setField(CharSequence fieldName) { this.field = fieldName; } - + /** - * This method is used to get the value converted to {@link String} and - * escaped using the given {@link EscapeQuerySyntax}. - * + * This method is used to get the value converted to {@link String} and escaped using the given + * {@link EscapeQuerySyntax}. + * * @param escaper the {@link EscapeQuerySyntax} used to escape the value {@link String} - * * @return the value converted to {@link String} and escaped */ protected CharSequence getTermEscaped(EscapeQuerySyntax escaper) { - return escaper.escape(numberFormat.format(this.value), - Locale.ROOT, Type.NORMAL); + return escaper.escape(numberFormat.format(this.value), Locale.ROOT, Type.NORMAL); } - + @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { if (isDefaultField(this.field)) { @@ -103,49 +95,47 @@ public class PointQueryNode extends QueryNodeImpl implements return this.field + ":" + getTermEscaped(escapeSyntaxParser); } } - + /** * Sets the {@link NumberFormat} used to convert the value to {@link String}. - * + * * @param format the {@link NumberFormat} used to convert the value to {@link String} */ public void setNumberFormat(NumberFormat format) { this.numberFormat = format; } - + /** * Returns the {@link NumberFormat} used to convert the value to {@link String}. - * + * * @return the {@link NumberFormat} used to convert the value to {@link String} */ public NumberFormat getNumberFormat() { return this.numberFormat; } - + /** * Returns the numeric value as {@link Number}. - * + * * @return the numeric value */ @Override public Number getValue() { return value; } - + /** * Sets the numeric value. - * + * * @param value the numeric value */ @Override public void setValue(Number value) { this.value = value; } - + @Override public String toString() { - return ""; + return ""; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PointRangeQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PointRangeQueryNode.java index 3611ebfe979..bc18bd49346 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PointRangeQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PointRangeQueryNode.java @@ -20,92 +20,108 @@ import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.standard.config.PointsConfig; /** - * This query node represents a range query composed by {@link PointQueryNode} - * bounds, which means the bound values are {@link Number}s. - * + * This query node represents a range query composed by {@link PointQueryNode} bounds, which means + * the bound values are {@link Number}s. + * * @see PointQueryNode * @see AbstractRangeQueryNode */ public class PointRangeQueryNode extends AbstractRangeQueryNode { - - public PointsConfig numericConfig; - + + public PointsConfig numericConfig; + /** - * Constructs a {@link PointRangeQueryNode} object using the given - * {@link PointQueryNode} as its bounds and {@link PointsConfig}. - * + * Constructs a {@link PointRangeQueryNode} object using the given {@link PointQueryNode} as its + * bounds and {@link PointsConfig}. + * * @param lower the lower bound * @param upper the upper bound - * @param lowerInclusive true if the lower bound is inclusive, otherwise, false - * @param upperInclusive true if the upper bound is inclusive, otherwise, false - * @param numericConfig the {@link PointsConfig} that represents associated with the upper and lower bounds - * + * @param lowerInclusive true if the lower bound is inclusive, otherwise, false + * + * @param upperInclusive true if the upper bound is inclusive, otherwise, false + * + * @param numericConfig the {@link PointsConfig} that represents associated with the upper and + * lower bounds * @see #setBounds(PointQueryNode, PointQueryNode, boolean, boolean, PointsConfig) */ - public PointRangeQueryNode(PointQueryNode lower, PointQueryNode upper, - boolean lowerInclusive, boolean upperInclusive, PointsConfig numericConfig) throws QueryNodeException { + public PointRangeQueryNode( + PointQueryNode lower, + PointQueryNode upper, + boolean lowerInclusive, + boolean upperInclusive, + PointsConfig numericConfig) + throws QueryNodeException { setBounds(lower, upper, lowerInclusive, upperInclusive, numericConfig); } - + /** - * Sets the upper and lower bounds of this range query node and the - * {@link PointsConfig} associated with these bounds. - * + * Sets the upper and lower bounds of this range query node and the {@link PointsConfig} + * associated with these bounds. + * * @param lower the lower bound * @param upper the upper bound - * @param lowerInclusive true if the lower bound is inclusive, otherwise, false - * @param upperInclusive true if the upper bound is inclusive, otherwise, false - * @param pointsConfig the {@link PointsConfig} that represents associated with the upper and lower bounds - * + * @param lowerInclusive true if the lower bound is inclusive, otherwise, false + * + * @param upperInclusive true if the upper bound is inclusive, otherwise, false + * + * @param pointsConfig the {@link PointsConfig} that represents associated with the upper and + * lower bounds */ - public void setBounds(PointQueryNode lower, PointQueryNode upper, - boolean lowerInclusive, boolean upperInclusive, PointsConfig pointsConfig) throws QueryNodeException { - + public void setBounds( + PointQueryNode lower, + PointQueryNode upper, + boolean lowerInclusive, + boolean upperInclusive, + PointsConfig pointsConfig) + throws QueryNodeException { + if (pointsConfig == null) { throw new IllegalArgumentException("pointsConfig must not be null!"); } - + Class lowerNumberType, upperNumberType; - + if (lower != null && lower.getValue() != null) { lowerNumberType = lower.getValue().getClass(); } else { lowerNumberType = null; } - + if (upper != null && upper.getValue() != null) { upperNumberType = upper.getValue().getClass(); } else { upperNumberType = null; } - - if (lowerNumberType != null - && !lowerNumberType.equals(pointsConfig.getType())) { + + if (lowerNumberType != null && !lowerNumberType.equals(pointsConfig.getType())) { throw new IllegalArgumentException( "lower value's type should be the same as numericConfig type: " - + lowerNumberType + " != " + pointsConfig.getType()); + + lowerNumberType + + " != " + + pointsConfig.getType()); } - - if (upperNumberType != null - && !upperNumberType.equals(pointsConfig.getType())) { + + if (upperNumberType != null && !upperNumberType.equals(pointsConfig.getType())) { throw new IllegalArgumentException( "upper value's type should be the same as numericConfig type: " - + upperNumberType + " != " + pointsConfig.getType()); + + upperNumberType + + " != " + + pointsConfig.getType()); } - + super.setBounds(lower, upper, lowerInclusive, upperInclusive); this.numericConfig = pointsConfig; } - + /** * Returns the {@link PointsConfig} associated with the lower and upper bounds. - * + * * @return the {@link PointsConfig} associated with the lower and upper bounds */ public PointsConfig getPointsConfig() { return this.numericConfig; } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PrefixWildcardQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PrefixWildcardQueryNode.java index 52a84807134..b7dcd1c4944 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PrefixWildcardQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/PrefixWildcardQueryNode.java @@ -19,25 +19,19 @@ package org.apache.lucene.queryparser.flexible.standard.nodes; import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; /** - * A {@link PrefixWildcardQueryNode} represents wildcardquery that matches abc* - * or *. This does not apply to phrases, this is a special case on the original - * lucene parser. TODO: refactor the code to remove this special case from the - * parser. and probably do it on a Processor + * A {@link PrefixWildcardQueryNode} represents wildcardquery that matches abc* or *. This does not + * apply to phrases, this is a special case on the original lucene parser. TODO: refactor the code + * to remove this special case from the parser. and probably do it on a Processor */ public class PrefixWildcardQueryNode extends WildcardQueryNode { /** - * @param field - * - field name - * @param text - * - value including the wildcard - * @param begin - * - position in the query string - * @param end - * - position in the query string + * @param field - field name + * @param text - value including the wildcard + * @param begin - position in the query string + * @param end - position in the query string */ - public PrefixWildcardQueryNode(CharSequence field, CharSequence text, - int begin, int end) { + public PrefixWildcardQueryNode(CharSequence field, CharSequence text, int begin, int end) { super(field, text, begin, end); } @@ -47,8 +41,7 @@ public class PrefixWildcardQueryNode extends WildcardQueryNode { @Override public String toString() { - return ""; + return ""; } @Override diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/RegexpQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/RegexpQueryNode.java index 6ecbacce0f8..f94cfbe56dc 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/RegexpQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/RegexpQueryNode.java @@ -23,34 +23,25 @@ import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.BytesRef; -/** - * A {@link RegexpQueryNode} represents {@link RegexpQuery} query Examples: /[a-z]|[0-9]/ - */ -public class RegexpQueryNode extends QueryNodeImpl implements TextableQueryNode, FieldableNode { +/** A {@link RegexpQueryNode} represents {@link RegexpQuery} query Examples: /[a-z]|[0-9]/ */ +public class RegexpQueryNode extends QueryNodeImpl implements TextableQueryNode, FieldableNode { private CharSequence text; private CharSequence field; /** - * @param field - * - field name - * @param text - * - value that contains a regular expression - * @param begin - * - position in the query string - * @param end - * - position in the query string + * @param field - field name + * @param text - value that contains a regular expression + * @param begin - position in the query string + * @param end - position in the query string */ - public RegexpQueryNode(CharSequence field, CharSequence text, int begin, - int end) { + public RegexpQueryNode(CharSequence field, CharSequence text, int begin, int end) { this.field = field; this.text = text.subSequence(begin, end); } /** - * @param field - * - field name - * @param text - * - value that contains a regular expression + * @param field - field name + * @param text - value that contains a regular expression */ public RegexpQueryNode(CharSequence field, CharSequence text) { this(field, text, 0, text.length()); @@ -87,7 +78,7 @@ public class RegexpQueryNode extends QueryNodeImpl implements TextableQueryNode public CharSequence getField() { return field; } - + public String getFieldAsString() { return field.toString(); } @@ -99,7 +90,6 @@ public class RegexpQueryNode extends QueryNodeImpl implements TextableQueryNode @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { - return isDefaultField(field)? "/"+text+"/": field + ":/" + text + "/"; + return isDefaultField(field) ? "/" + text + "/" : field + ":/" + text + "/"; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/SynonymQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/SynonymQueryNode.java index a8c44bc61bc..067459aa7fa 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/SynonymQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/SynonymQueryNode.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.nodes; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/TermRangeQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/TermRangeQueryNode.java index e2766a098be..873923a261d 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/TermRangeQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/TermRangeQueryNode.java @@ -19,26 +19,27 @@ package org.apache.lucene.queryparser.flexible.standard.nodes; import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; /** - * This query node represents a range query composed by {@link FieldQueryNode} - * bounds, which means the bound values are strings. - * + * This query node represents a range query composed by {@link FieldQueryNode} bounds, which means + * the bound values are strings. + * * @see FieldQueryNode * @see AbstractRangeQueryNode */ public class TermRangeQueryNode extends AbstractRangeQueryNode { - + /** - * Constructs a {@link TermRangeQueryNode} object using the given - * {@link FieldQueryNode} as its bounds. - * + * Constructs a {@link TermRangeQueryNode} object using the given {@link FieldQueryNode} as its + * bounds. + * * @param lower the lower bound * @param upper the upper bound - * @param lowerInclusive true if the lower bound is inclusive, otherwise, false - * @param upperInclusive true if the upper bound is inclusive, otherwise, false + * @param lowerInclusive true if the lower bound is inclusive, otherwise, false + * + * @param upperInclusive true if the upper bound is inclusive, otherwise, false + * */ - public TermRangeQueryNode(FieldQueryNode lower, FieldQueryNode upper, - boolean lowerInclusive, boolean upperInclusive) { + public TermRangeQueryNode( + FieldQueryNode lower, FieldQueryNode upper, boolean lowerInclusive, boolean upperInclusive) { setBounds(lower, upper, lowerInclusive, upperInclusive); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/WildcardQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/WildcardQueryNode.java index afd4f9fe4e5..b49cafc4311 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/WildcardQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/WildcardQueryNode.java @@ -20,23 +20,18 @@ import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; /** - * A {@link WildcardQueryNode} represents wildcard query This does not apply to - * phrases. Examples: a*b*c Fl?w? m?ke*g + * A {@link WildcardQueryNode} represents wildcard query This does not apply to phrases. Examples: + * a*b*c Fl?w? m?ke*g */ public class WildcardQueryNode extends FieldQueryNode { /** - * @param field - * - field name - * @param text - * - value that contains one or more wild card characters (? or *) - * @param begin - * - position in the query string - * @param end - * - position in the query string + * @param field - field name + * @param text - value that contains one or more wild card characters (? or *) + * @param begin - position in the query string + * @param end - position in the query string */ - public WildcardQueryNode(CharSequence field, CharSequence text, int begin, - int end) { + public WildcardQueryNode(CharSequence field, CharSequence text, int begin, int end) { super(field, text, begin, end); } @@ -66,5 +61,4 @@ public class WildcardQueryNode extends FieldQueryNode { return clone; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/package-info.java index 0a9b75a0177..fe63b62eb03 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/package-info.java @@ -14,15 +14,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Standard Lucene Query Nodes. * *

    Standard Lucene Query Nodes

    - *

    - * The package org.apache.lucene.queryparser.flexible.standard.nodes contains QueryNode classes - * that are used specifically for Lucene query node tree. Any other generic QueryNode is - * defined under org.apache.lucene.queryParser.nodes. + * + *

    The package org.apache.lucene.queryparser.flexible.standard.nodes contains QueryNode classes + * that are used specifically for Lucene query node tree. Any other generic QueryNode is defined + * under org.apache.lucene.queryParser.nodes. */ package org.apache.lucene.queryparser.flexible.standard.nodes; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/package-info.java index 740ca4c2cee..ae123419cb9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/package-info.java @@ -14,22 +14,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Implementation of the {@linkplain org.apache.lucene.queryparser.classic Lucene classic query parser} using the flexible query parser frameworks - * + +/** + * Implementation of the {@linkplain org.apache.lucene.queryparser.classic Lucene classic query + * parser} using the flexible query parser frameworks + * *

    Lucene Flexible Query Parser Implementation

    - *

    - * The old Lucene query parser used to have only one class that performed - * all the parsing operations. In the new query parser structure, the - * parsing was divided in 3 steps: parsing (syntax), processing (semantic) - * and building. - *

    - * The classes contained in the package org.apache.lucene.queryParser.standard - * are used to reproduce the same behavior as the old query parser. - * - *

    - * Check {@link org.apache.lucene.queryparser.flexible.standard.StandardQueryParser} to quick start using the Lucene query parser. + * + *

    The old Lucene query parser used to have only one class that performed all the parsing + * operations. In the new query parser structure, the parsing was divided in 3 steps: parsing + * (syntax), processing (semantic) and building. + * + *

    The classes contained in the package org.apache.lucene.queryParser.standard are used to + * reproduce the same behavior as the old query parser. + * + *

    Check {@link org.apache.lucene.queryparser.flexible.standard.StandardQueryParser} to quick + * start using the Lucene query parser. */ package org.apache.lucene.queryparser.flexible.standard; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/EscapeQuerySyntaxImpl.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/EscapeQuerySyntaxImpl.java index 679d913d724..ea534875528 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/EscapeQuerySyntaxImpl.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/EscapeQuerySyntaxImpl.java @@ -17,49 +17,43 @@ package org.apache.lucene.queryparser.flexible.standard.parser; import java.util.Locale; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax; import org.apache.lucene.queryparser.flexible.core.util.UnescapedCharSequence; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; -/** - * Implementation of {@link EscapeQuerySyntax} for the standard lucene - * syntax. - */ +/** Implementation of {@link EscapeQuerySyntax} for the standard lucene syntax. */ public class EscapeQuerySyntaxImpl implements EscapeQuerySyntax { - private static final char[] wildcardChars = { '*', '?' }; + private static final char[] wildcardChars = {'*', '?'}; - private static final String[] escapableTermExtraFirstChars = { "+", "-", "@" }; + private static final String[] escapableTermExtraFirstChars = {"+", "-", "@"}; - private static final String[] escapableTermChars = { "\"", "<", ">", "=", - "!", "(", ")", "^", "[", "{", ":", "]", "}", "~", "/" }; + private static final String[] escapableTermChars = { + "\"", "<", ">", "=", "!", "(", ")", "^", "[", "{", ":", "]", "}", "~", "/" + }; // TODO: check what to do with these "*", "?", "\\" - private static final String[] escapableQuotedChars = { "\"" }; - private static final String[] escapableWhiteChars = { " ", "\t", "\n", "\r", - "\f", "\b", "\u3000" }; - private static final String[] escapableWordTokens = { "AND", "OR", "NOT", - "TO", "WITHIN", "SENTENCE", "PARAGRAPH", "INORDER" }; + private static final String[] escapableQuotedChars = {"\""}; + private static final String[] escapableWhiteChars = {" ", "\t", "\n", "\r", "\f", "\b", "\u3000"}; + private static final String[] escapableWordTokens = { + "AND", "OR", "NOT", "TO", "WITHIN", "SENTENCE", "PARAGRAPH", "INORDER" + }; private static final CharSequence escapeChar(CharSequence str, Locale locale) { - if (str == null || str.length() == 0) - return str; + if (str == null || str.length() == 0) return str; CharSequence buffer = str; // regular escapable Char for terms for (int i = 0; i < escapableTermChars.length; i++) { - buffer = replaceIgnoreCase(buffer, escapableTermChars[i].toLowerCase(locale), - "\\", locale); + buffer = replaceIgnoreCase(buffer, escapableTermChars[i].toLowerCase(locale), "\\", locale); } // First Character of a term as more escaping chars for (int i = 0; i < escapableTermExtraFirstChars.length; i++) { if (buffer.charAt(0) == escapableTermExtraFirstChars[i].charAt(0)) { - buffer = "\\" + buffer.charAt(0) - + buffer.subSequence(1, buffer.length()); + buffer = "\\" + buffer.charAt(0) + buffer.subSequence(1, buffer.length()); break; } } @@ -68,21 +62,18 @@ public class EscapeQuerySyntaxImpl implements EscapeQuerySyntax { } private final CharSequence escapeQuoted(CharSequence str, Locale locale) { - if (str == null || str.length() == 0) - return str; + if (str == null || str.length() == 0) return str; CharSequence buffer = str; for (int i = 0; i < escapableQuotedChars.length; i++) { - buffer = replaceIgnoreCase(buffer, escapableTermChars[i].toLowerCase(locale), - "\\", locale); + buffer = replaceIgnoreCase(buffer, escapableTermChars[i].toLowerCase(locale), "\\", locale); } return buffer; } private static final CharSequence escapeTerm(CharSequence term, Locale locale) { - if (term == null) - return term; + if (term == null) return term; // Escape single Chars term = escapeChar(term, locale); @@ -90,34 +81,28 @@ public class EscapeQuerySyntaxImpl implements EscapeQuerySyntax { // Escape Parser Words for (int i = 0; i < escapableWordTokens.length; i++) { - if (escapableWordTokens[i].equalsIgnoreCase(term.toString())) - return "\\" + term; + if (escapableWordTokens[i].equalsIgnoreCase(term.toString())) return "\\" + term; } return term; } /** * replace with ignore case - * - * @param string - * string to get replaced - * @param sequence1 - * the old character sequence in lowercase - * @param escapeChar - * the new character to prefix sequence1 in return string. + * + * @param string string to get replaced + * @param sequence1 the old character sequence in lowercase + * @param escapeChar the new character to prefix sequence1 in return string. * @return the new String */ - private static CharSequence replaceIgnoreCase(CharSequence string, - CharSequence sequence1, CharSequence escapeChar, Locale locale) { - if (escapeChar == null || sequence1 == null || string == null) - throw new NullPointerException(); + private static CharSequence replaceIgnoreCase( + CharSequence string, CharSequence sequence1, CharSequence escapeChar, Locale locale) { + if (escapeChar == null || sequence1 == null || string == null) throw new NullPointerException(); // empty string case int count = string.length(); int sequence1Length = sequence1.length(); if (sequence1Length == 0) { - StringBuilder result = new StringBuilder((count + 1) - * escapeChar.length()); + StringBuilder result = new StringBuilder((count + 1) * escapeChar.length()); result.append(escapeChar); for (int i = 0; i < count; i++) { result.append(string.charAt(i)); @@ -131,16 +116,12 @@ public class EscapeQuerySyntaxImpl implements EscapeQuerySyntax { char first = sequence1.charAt(0); int start = 0, copyStart = 0, firstIndex; while (start < count) { - if ((firstIndex = string.toString().toLowerCase(locale).indexOf(first, - start)) == -1) - break; + if ((firstIndex = string.toString().toLowerCase(locale).indexOf(first, start)) == -1) break; boolean found = true; if (sequence1.length() > 1) { - if (firstIndex + sequence1Length > count) - break; + if (firstIndex + sequence1Length > count) break; for (int i = 1; i < sequence1Length; i++) { - if (string.toString().toLowerCase(locale).charAt(firstIndex + i) != sequence1 - .charAt(i)) { + if (string.toString().toLowerCase(locale).charAt(firstIndex + i) != sequence1.charAt(i)) { found = false; break; } @@ -149,46 +130,38 @@ public class EscapeQuerySyntaxImpl implements EscapeQuerySyntax { if (found) { result.append(string.toString().substring(copyStart, firstIndex)); result.append(escapeChar); - result.append(string.toString().substring(firstIndex, - firstIndex + sequence1Length)); + result.append(string.toString().substring(firstIndex, firstIndex + sequence1Length)); copyStart = start = firstIndex + sequence1Length; } else { start = firstIndex + 1; } } - if (result.length() == 0 && copyStart == 0) - return string; + if (result.length() == 0 && copyStart == 0) return string; result.append(string.toString().substring(copyStart)); return result.toString(); } /** * escape all tokens that are part of the parser syntax on a given string - * - * @param str - * string to get replaced - * @param locale - * locale to be used when performing string compares + * + * @param str string to get replaced + * @param locale locale to be used when performing string compares * @return the new String */ - private static final CharSequence escapeWhiteChar(CharSequence str, - Locale locale) { - if (str == null || str.length() == 0) - return str; + private static final CharSequence escapeWhiteChar(CharSequence str, Locale locale) { + if (str == null || str.length() == 0) return str; CharSequence buffer = str; for (int i = 0; i < escapableWhiteChars.length; i++) { - buffer = replaceIgnoreCase(buffer, escapableWhiteChars[i].toLowerCase(locale), - "\\", locale); + buffer = replaceIgnoreCase(buffer, escapableWhiteChars[i].toLowerCase(locale), "\\", locale); } return buffer; } @Override public CharSequence escape(CharSequence text, Locale locale, Type type) { - if (text == null || text.length() == 0) - return text; + if (text == null || text.length() == 0) return text; // escape wildcards and the escape char (this has to be perform before // anything else) @@ -208,15 +181,12 @@ public class EscapeQuerySyntaxImpl implements EscapeQuerySyntax { } /** - * Returns a String where the escape char has been removed, or kept only once - * if there was a double escape. - * - * Supports escaped unicode characters, e. g. translates A to - * A. - * + * Returns a String where the escape char has been removed, or kept only once if there was a + * double escape. + * + *

    Supports escaped unicode characters, e. g. translates A to A. */ - public static UnescapedCharSequence discardEscapeChar(CharSequence input) - throws ParseException { + public static UnescapedCharSequence discardEscapeChar(CharSequence input) throws ParseException { // Create char array to hold unescaped char sequence char[] output = new char[input.length()]; boolean[] wasEscaped = new boolean[input.length()]; @@ -269,13 +239,13 @@ public class EscapeQuerySyntaxImpl implements EscapeQuerySyntax { } if (codePointMultiplier > 0) { - throw new ParseException(new MessageImpl( - QueryParserMessages.INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION)); + throw new ParseException( + new MessageImpl(QueryParserMessages.INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION)); } if (lastCharWasEscapeChar) { - throw new ParseException(new MessageImpl( - QueryParserMessages.INVALID_SYNTAX_ESCAPE_CHARACTER)); + throw new ParseException( + new MessageImpl(QueryParserMessages.INVALID_SYNTAX_ESCAPE_CHARACTER)); } return new UnescapedCharSequence(output, wasEscaped, 0, length); @@ -290,9 +260,8 @@ public class EscapeQuerySyntaxImpl implements EscapeQuerySyntax { } else if ('A' <= c && c <= 'F') { return c - 'A' + 10; } else { - throw new ParseException(new MessageImpl( - QueryParserMessages.INVALID_SYNTAX_ESCAPE_NONE_HEX_UNICODE, c)); + throw new ParseException( + new MessageImpl(QueryParserMessages.INVALID_SYNTAX_ESCAPE_NONE_HEX_UNICODE, c)); } } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/package-info.java index be72e7f57a3..dba0de2e406 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/package-info.java @@ -14,14 +14,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** + * + * *

    Lucene Query Parser

    - *

    - * The package org.apache.lucene.queryparser.flexible.standard.parser contains the query parser. - *

    - * This text parser only performs the syntax validation and creates an QueryNode tree - * from a query string. + * + *

    The package org.apache.lucene.queryparser.flexible.standard.parser contains the query parser. + * + *

    This text parser only performs the syntax validation and creates an QueryNode tree from a + * query string. */ package org.apache.lucene.queryparser.flexible.standard.parser; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AllowLeadingWildcardProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AllowLeadingWildcardProcessor.java index 0ff55185aef..43aaddb74a5 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AllowLeadingWildcardProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AllowLeadingWildcardProcessor.java @@ -17,25 +17,23 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; import org.apache.lucene.queryparser.flexible.core.util.UnescapedCharSequence; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; import org.apache.lucene.queryparser.flexible.standard.nodes.WildcardQueryNode; import org.apache.lucene.queryparser.flexible.standard.parser.EscapeQuerySyntaxImpl; /** - * This processor verifies if - * {@link ConfigurationKeys#ALLOW_LEADING_WILDCARD} is defined in the - * {@link QueryConfigHandler}. If it is and leading wildcard is not allowed, it - * looks for every {@link WildcardQueryNode} contained in the query node tree - * and throws an exception if any of them has a leading wildcard ('*' or '?'). - * + * This processor verifies if {@link ConfigurationKeys#ALLOW_LEADING_WILDCARD} is defined in the + * {@link QueryConfigHandler}. If it is and leading wildcard is not allowed, it looks for every + * {@link WildcardQueryNode} contained in the query node tree and throws an exception if any of them + * has a leading wildcard ('*' or '?'). + * * @see ConfigurationKeys#ALLOW_LEADING_WILDCARD */ public class AllowLeadingWildcardProcessor extends QueryNodeProcessorImpl { @@ -46,14 +44,14 @@ public class AllowLeadingWildcardProcessor extends QueryNodeProcessorImpl { @Override public QueryNode process(QueryNode queryTree) throws QueryNodeException { - Boolean allowsLeadingWildcard = getQueryConfigHandler().get(ConfigurationKeys.ALLOW_LEADING_WILDCARD); + Boolean allowsLeadingWildcard = + getQueryConfigHandler().get(ConfigurationKeys.ALLOW_LEADING_WILDCARD); if (allowsLeadingWildcard != null) { if (!allowsLeadingWildcard) { return super.process(queryTree); } - } return queryTree; @@ -66,39 +64,33 @@ public class AllowLeadingWildcardProcessor extends QueryNodeProcessorImpl { WildcardQueryNode wildcardNode = (WildcardQueryNode) node; if (wildcardNode.getText().length() > 0) { - + // Validate if the wildcard was escaped - if (UnescapedCharSequence.wasEscaped(wildcardNode.getText(), 0)) - return node; - - switch (wildcardNode.getText().charAt(0)) { + if (UnescapedCharSequence.wasEscaped(wildcardNode.getText(), 0)) return node; + + switch (wildcardNode.getText().charAt(0)) { case '*': case '?': - throw new QueryNodeException(new MessageImpl( - QueryParserMessages.LEADING_WILDCARD_NOT_ALLOWED, node - .toQueryString(new EscapeQuerySyntaxImpl()))); + throw new QueryNodeException( + new MessageImpl( + QueryParserMessages.LEADING_WILDCARD_NOT_ALLOWED, + node.toQueryString(new EscapeQuerySyntaxImpl()))); } } - } return node; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java index 56e99560722..31226dd958a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java @@ -17,12 +17,10 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.io.IOException; - import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CachingTokenFilter; import org.apache.lucene.analysis.TokenStream; @@ -51,23 +49,21 @@ import org.apache.lucene.queryparser.flexible.standard.nodes.SynonymQueryNode; import org.apache.lucene.queryparser.flexible.standard.nodes.WildcardQueryNode; /** - * This processor verifies if {@link ConfigurationKeys#ANALYZER} - * is defined in the {@link QueryConfigHandler}. If it is and the analyzer is - * not null, it looks for every {@link FieldQueryNode} that is not - * {@link WildcardQueryNode}, {@link FuzzyQueryNode} or - * {@link RangeQueryNode} contained in the query node tree, then it applies - * the analyzer to that {@link FieldQueryNode} object.
    + * This processor verifies if {@link ConfigurationKeys#ANALYZER} is defined in the {@link + * QueryConfigHandler}. If it is and the analyzer is not null, it looks for every + * {@link FieldQueryNode} that is not {@link WildcardQueryNode}, {@link FuzzyQueryNode} or {@link + * RangeQueryNode} contained in the query node tree, then it applies the analyzer to that {@link + * FieldQueryNode} object.
    *
    - * If the analyzer return only one term, the returned term is set to the - * {@link FieldQueryNode} and it's returned.
    + * If the analyzer return only one term, the returned term is set to the {@link FieldQueryNode} and + * it's returned.
    *
    - * If the analyzer return more than one term, a {@link TokenizedPhraseQueryNode} - * or {@link MultiPhraseQueryNode} is created, whether there is one or more - * terms at the same position, and it's returned.
    + * If the analyzer return more than one term, a {@link TokenizedPhraseQueryNode} or {@link + * MultiPhraseQueryNode} is created, whether there is one or more terms at the same position, and + * it's returned.
    *
    - * If no term is returned by the analyzer a {@link NoTokenFoundQueryNode} object - * is returned. - * + * If no term is returned by the analyzer a {@link NoTokenFoundQueryNode} object is returned. + * * @see ConfigurationKeys#ANALYZER * @see Analyzer * @see TokenStream @@ -77,7 +73,7 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { private Analyzer analyzer; private boolean positionIncrementsEnabled; - + private Operator defaultOperator; public AnalyzerQueryNodeProcessor() { @@ -87,16 +83,17 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { @Override public QueryNode process(QueryNode queryTree) throws QueryNodeException { Analyzer analyzer = getQueryConfigHandler().get(ConfigurationKeys.ANALYZER); - + if (analyzer != null) { this.analyzer = analyzer; this.positionIncrementsEnabled = false; - Boolean positionIncrementsEnabled = getQueryConfigHandler().get(ConfigurationKeys.ENABLE_POSITION_INCREMENTS); + Boolean positionIncrementsEnabled = + getQueryConfigHandler().get(ConfigurationKeys.ENABLE_POSITION_INCREMENTS); Operator defaultOperator = getQueryConfigHandler().get(ConfigurationKeys.DEFAULT_OPERATOR); this.defaultOperator = defaultOperator != null ? defaultOperator : Operator.OR; - + if (positionIncrementsEnabled != null) { - this.positionIncrementsEnabled = positionIncrementsEnabled; + this.positionIncrementsEnabled = positionIncrementsEnabled; } if (this.analyzer != null) { @@ -105,7 +102,6 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { } return queryTree; - } @Override @@ -126,7 +122,7 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { int numTokens = 0; int positionCount = 0; boolean severalTokensAtSamePosition = false; - + try { try (TokenStream source = this.analyzer.tokenStream(field, text)) { buffer = new CachingTokenFilter(source); @@ -135,28 +131,26 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { if (buffer.hasAttribute(PositionIncrementAttribute.class)) { posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class); } - + try { - + while (buffer.incrementToken()) { numTokens++; - int positionIncrement = (posIncrAtt != null) ? posIncrAtt - .getPositionIncrement() : 1; + int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1; if (positionIncrement != 0) { positionCount += positionIncrement; - + } else { severalTokensAtSamePosition = true; } - } - + } catch (IOException e) { // ignore } // rewind the buffer stream - buffer.reset();//will never through on subsequent reset calls + buffer.reset(); // will never through on subsequent reset calls } catch (IOException e) { throw new RuntimeException(e); } @@ -164,12 +158,12 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { if (!buffer.hasAttribute(CharTermAttribute.class)) { return new NoTokenFoundQueryNode(); } - + CharTermAttribute termAtt = buffer.getAttribute(CharTermAttribute.class); - + if (numTokens == 0) { return new NoTokenFoundQueryNode(); - + } else if (numTokens == 1) { String term = null; try { @@ -177,39 +171,37 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { hasNext = buffer.incrementToken(); assert hasNext == true; term = termAtt.toString(); - + } catch (IOException e) { // safe to ignore, because we know the number of tokens } - + fieldNode.setText(term); - + return fieldNode; - + } else if (severalTokensAtSamePosition || !(node instanceof QuotedFieldQueryNode)) { if (positionCount == 1 || !(node instanceof QuotedFieldQueryNode)) { // no phrase query: - + if (positionCount == 1) { // simple case: only one position, with synonyms LinkedList children = new LinkedList<>(); - + for (int i = 0; i < numTokens; i++) { String term = null; try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; term = termAtt.toString(); - + } catch (IOException e) { // safe to ignore, because we know the number of tokens } - + children.add(new FieldQueryNode(field, term, -1, -1)); - } - return new GroupQueryNode( - new SynonymQueryNode(children)); + return new GroupQueryNode(new SynonymQueryNode(children)); } else { // multiple positions QueryNode q = new BooleanQueryNode(Collections.emptyList()); @@ -227,9 +219,9 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { if (!(currentQuery instanceof BooleanQueryNode)) { QueryNode t = currentQuery; currentQuery = new SynonymQueryNode(Collections.emptyList()); - ((BooleanQueryNode)currentQuery).add(t); + ((BooleanQueryNode) currentQuery).add(t); } - ((BooleanQueryNode)currentQuery).add(new FieldQueryNode(field, term, -1, -1)); + ((BooleanQueryNode) currentQuery).add(new FieldQueryNode(field, term, -1, -1)); } else { if (currentQuery != null) { if (this.defaultOperator == Operator.OR) { @@ -246,7 +238,7 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { } else { q.add(new ModifierQueryNode(currentQuery, Modifier.MOD_REQ)); } - + if (q instanceof BooleanQueryNode) { q = new GroupQueryNode(q); } @@ -255,7 +247,7 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { } else { // phrase query: MultiPhraseQueryNode mpq = new MultiPhraseQueryNode(); - + List multiTerms = new ArrayList<>(); int position = -1; int i = 0; @@ -270,94 +262,87 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { if (posIncrAtt != null) { positionIncrement = posIncrAtt.getPositionIncrement(); } - + } catch (IOException e) { // safe to ignore, because we know the number of tokens } - + if (positionIncrement > 0 && multiTerms.size() > 0) { - + for (FieldQueryNode termNode : multiTerms) { - + if (this.positionIncrementsEnabled) { termNode.setPositionIncrement(position); } else { termNode.setPositionIncrement(termGroupCount); } - + mpq.add(termNode); - } - + // Only increment once for each "group" of // terms that were in the same position: termGroupCount++; - + multiTerms.clear(); - } - + position += positionIncrement; multiTerms.add(new FieldQueryNode(field, term, -1, -1)); - } - + for (FieldQueryNode termNode : multiTerms) { - + if (this.positionIncrementsEnabled) { termNode.setPositionIncrement(position); - + } else { termNode.setPositionIncrement(termGroupCount); } - + mpq.add(termNode); - } - + return mpq; - } - + } else { - + TokenizedPhraseQueryNode pq = new TokenizedPhraseQueryNode(); - + int position = -1; - + for (int i = 0; i < numTokens; i++) { String term = null; int positionIncrement = 1; - + try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; term = termAtt.toString(); - + if (posIncrAtt != null) { positionIncrement = posIncrAtt.getPositionIncrement(); } - + } catch (IOException e) { // safe to ignore, because we know the number of tokens } - + FieldQueryNode newFieldNode = new FieldQueryNode(field, term, -1, -1); - + if (this.positionIncrementsEnabled) { position += positionIncrement; newFieldNode.setPositionIncrement(position); - + } else { newFieldNode.setPositionIncrement(i); } - + pq.add(newFieldNode); - } - + return pq; - } } finally { if (buffer != null) { @@ -377,15 +362,11 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanQuery2ModifierNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanQuery2ModifierNodeProcessor.java index afa01331821..f7186c1ab62 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanQuery2ModifierNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanQuery2ModifierNodeProcessor.java @@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.core.nodes.AndQueryNode; @@ -35,62 +34,53 @@ import org.apache.lucene.queryparser.flexible.standard.nodes.BooleanModifierNode import org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser; /** - *

    - * This processor is used to apply the correct {@link ModifierQueryNode} to - * {@link BooleanQueryNode}s children. This is a variant of - * {@link BooleanModifiersQueryNodeProcessor} which ignores precedence. - *

    - *

    - * The {@link StandardSyntaxParser} knows the rules of precedence, but lucene - * does not. e.g. (A AND B OR C AND D) ist treated like - * (+A +B +C +D). - *

    - *

    - * This processor walks through the query node tree looking for - * {@link BooleanQueryNode}s. If an {@link AndQueryNode} is found, every child, - * which is not a {@link ModifierQueryNode} or the {@link ModifierQueryNode} is - * {@link Modifier#MOD_NONE}, becomes a {@link Modifier#MOD_REQ}. For default - * {@link BooleanQueryNode}, it checks the default operator is - * {@link Operator#AND}, if it is, the same operation when an - * {@link AndQueryNode} is found is applied to it. Each {@link BooleanQueryNode} - * which direct parent is also a {@link BooleanQueryNode} is removed (to ignore + * This processor is used to apply the correct {@link ModifierQueryNode} to {@link + * BooleanQueryNode}s children. This is a variant of {@link BooleanModifiersQueryNodeProcessor} + * which ignores precedence. + * + *

    The {@link StandardSyntaxParser} knows the rules of precedence, but lucene does not. e.g. + * (A AND B OR C AND D) ist treated like (+A +B +C +D). + * + *

    This processor walks through the query node tree looking for {@link BooleanQueryNode}s. If an + * {@link AndQueryNode} is found, every child, which is not a {@link ModifierQueryNode} or the + * {@link ModifierQueryNode} is {@link Modifier#MOD_NONE}, becomes a {@link Modifier#MOD_REQ}. For + * default {@link BooleanQueryNode}, it checks the default operator is {@link Operator#AND}, if it + * is, the same operation when an {@link AndQueryNode} is found is applied to it. Each {@link + * BooleanQueryNode} which direct parent is also a {@link BooleanQueryNode} is removed (to ignore * the rules of precedence). - *

    - * + * * @see ConfigurationKeys#DEFAULT_OPERATOR * @see BooleanModifiersQueryNodeProcessor */ public class BooleanQuery2ModifierNodeProcessor implements QueryNodeProcessor { - final static String TAG_REMOVE = "remove"; - final static String TAG_MODIFIER = "wrapWithModifier"; - final static String TAG_BOOLEAN_ROOT = "booleanRoot"; - + static final String TAG_REMOVE = "remove"; + static final String TAG_MODIFIER = "wrapWithModifier"; + static final String TAG_BOOLEAN_ROOT = "booleanRoot"; + QueryConfigHandler queryConfigHandler; - + private final ArrayList childrenBuffer = new ArrayList<>(); - + private Boolean usingAnd = false; - + public BooleanQuery2ModifierNodeProcessor() { // empty constructor } - + @Override public QueryNode process(QueryNode queryTree) throws QueryNodeException { - Operator op = getQueryConfigHandler().get( - ConfigurationKeys.DEFAULT_OPERATOR); - + Operator op = getQueryConfigHandler().get(ConfigurationKeys.DEFAULT_OPERATOR); + if (op == null) { throw new IllegalArgumentException( "StandardQueryConfigHandler.ConfigurationKeys.DEFAULT_OPERATOR should be set on the QueryConfigHandler"); } - + this.usingAnd = StandardQueryConfigHandler.Operator.AND == op; - + return processIteration(queryTree); - } - + protected void processChildren(QueryNode queryTree) throws QueryNodeException { List children = queryTree.getChildren(); if (children != null && children.size() > 0) { @@ -99,32 +89,29 @@ public class BooleanQuery2ModifierNodeProcessor implements QueryNodeProcessor { } } } - - private QueryNode processIteration(QueryNode queryTree) - throws QueryNodeException { + + private QueryNode processIteration(QueryNode queryTree) throws QueryNodeException { queryTree = preProcessNode(queryTree); - + processChildren(queryTree); - + queryTree = postProcessNode(queryTree); - + return queryTree; - } - + protected void fillChildrenBufferAndApplyModifiery(QueryNode parent) { for (QueryNode node : parent.getChildren()) { if (node.containsTag(TAG_REMOVE)) { fillChildrenBufferAndApplyModifiery(node); } else if (node.containsTag(TAG_MODIFIER)) { - childrenBuffer.add(applyModifier(node, - (Modifier) node.getTag(TAG_MODIFIER))); + childrenBuffer.add(applyModifier(node, (Modifier) node.getTag(TAG_MODIFIER))); } else { childrenBuffer.add(node); } } } - + protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { if (node.containsTag(TAG_BOOLEAN_ROOT)) { this.childrenBuffer.clear(); @@ -132,9 +119,8 @@ public class BooleanQuery2ModifierNodeProcessor implements QueryNodeProcessor { node.set(childrenBuffer); } return node; - } - + protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { QueryNode parent = node.getParent(); if (node instanceof BooleanQueryNode) { @@ -144,37 +130,34 @@ public class BooleanQuery2ModifierNodeProcessor implements QueryNodeProcessor { node.setTag(TAG_BOOLEAN_ROOT, Boolean.TRUE); } } else if (parent instanceof BooleanQueryNode) { - if ((parent instanceof AndQueryNode) - || (usingAnd && isDefaultBooleanQueryNode(parent))) { + if ((parent instanceof AndQueryNode) || (usingAnd && isDefaultBooleanQueryNode(parent))) { tagModifierButDoNotOverride(node, ModifierQueryNode.Modifier.MOD_REQ); } } return node; } - + protected boolean isDefaultBooleanQueryNode(QueryNode toTest) { return toTest != null && BooleanQueryNode.class.equals(toTest.getClass()); } - + private QueryNode applyModifier(QueryNode node, Modifier mod) { - + // check if modifier is not already defined and is default if (!(node instanceof ModifierQueryNode)) { return new BooleanModifierNode(node, mod); - + } else { ModifierQueryNode modNode = (ModifierQueryNode) node; - + if (modNode.getModifier() == Modifier.MOD_NONE) { return new ModifierQueryNode(modNode.getChild(), mod); } - } - + return node; - } - + protected void tagModifierButDoNotOverride(QueryNode node, Modifier mod) { if (node instanceof ModifierQueryNode) { ModifierQueryNode modNode = (ModifierQueryNode) node; @@ -185,17 +168,14 @@ public class BooleanQuery2ModifierNodeProcessor implements QueryNodeProcessor { node.setTag(TAG_MODIFIER, ModifierQueryNode.Modifier.MOD_REQ); } } - + @Override public void setQueryConfigHandler(QueryConfigHandler queryConfigHandler) { this.queryConfigHandler = queryConfigHandler; - } - + @Override public QueryConfigHandler getQueryConfigHandler() { return queryConfigHandler; } - } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanSingleChildOptimizationQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanSingleChildOptimizationQueryNodeProcessor.java index e241b8dd834..8a5bb1791dd 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanSingleChildOptimizationQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanSingleChildOptimizationQueryNodeProcessor.java @@ -17,25 +17,22 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode; -import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode.Modifier; +import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; import org.apache.lucene.queryparser.flexible.standard.nodes.BooleanModifierNode; /** - * This processor removes every {@link BooleanQueryNode} that contains only one - * child and returns this child. If this child is {@link ModifierQueryNode} that - * was defined by the user. A modifier is not defined by the user when it's a - * {@link BooleanModifierNode} - * + * This processor removes every {@link BooleanQueryNode} that contains only one child and returns + * this child. If this child is {@link ModifierQueryNode} that was defined by the user. A modifier + * is not defined by the user when it's a {@link BooleanModifierNode} + * * @see ModifierQueryNode */ -public class BooleanSingleChildOptimizationQueryNodeProcessor extends - QueryNodeProcessorImpl { +public class BooleanSingleChildOptimizationQueryNodeProcessor extends QueryNodeProcessorImpl { public BooleanSingleChildOptimizationQueryNodeProcessor() { // empty constructor @@ -57,34 +54,26 @@ public class BooleanSingleChildOptimizationQueryNodeProcessor extends || modNode.getModifier() == Modifier.MOD_NONE) { return child; - } } else { return child; } - } - } return node; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BoostQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BoostQueryNodeProcessor.java index 353d087c454..ed6fa460798 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BoostQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BoostQueryNodeProcessor.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.FieldConfig; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; @@ -29,10 +28,10 @@ import org.apache.lucene.queryparser.flexible.core.util.StringUtils; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; /** - * This processor iterates the query node tree looking for every - * {@link FieldableNode} that has {@link ConfigurationKeys#BOOST} in its - * config. If there is, the boost is applied to that {@link FieldableNode}. - * + * This processor iterates the query node tree looking for every {@link FieldableNode} that has + * {@link ConfigurationKeys#BOOST} in its config. If there is, the boost is applied to that {@link + * FieldableNode}. + * * @see ConfigurationKeys#BOOST * @see QueryConfigHandler * @see FieldableNode @@ -42,9 +41,9 @@ public class BoostQueryNodeProcessor extends QueryNodeProcessorImpl { @Override protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { - if (node instanceof FieldableNode && - (node.getParent() == null || !(node.getParent() instanceof FieldableNode))) { - + if (node instanceof FieldableNode + && (node.getParent() == null || !(node.getParent() instanceof FieldableNode))) { + FieldableNode fieldNode = (FieldableNode) node; QueryConfigHandler config = getQueryConfigHandler(); @@ -58,30 +57,22 @@ public class BoostQueryNodeProcessor extends QueryNodeProcessorImpl { if (boost != null) { return new BoostQueryNode(node, boost); } - } - } - } return node; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/DefaultPhraseSlopQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/DefaultPhraseSlopQueryNodeProcessor.java index fdf2195dad0..c3c14b7e12b 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/DefaultPhraseSlopQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/DefaultPhraseSlopQueryNodeProcessor.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; @@ -28,13 +27,12 @@ import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfi import org.apache.lucene.queryparser.flexible.standard.nodes.MultiPhraseQueryNode; /** - * This processor verifies if {@link ConfigurationKeys#PHRASE_SLOP} - * is defined in the {@link QueryConfigHandler}. If it is, it looks for every - * {@link TokenizedPhraseQueryNode} and {@link MultiPhraseQueryNode} that does - * not have any {@link SlopQueryNode} applied to it and creates an - * {@link SlopQueryNode} and apply to it. The new {@link SlopQueryNode} has the - * same slop value defined in the configuration. - * + * This processor verifies if {@link ConfigurationKeys#PHRASE_SLOP} is defined in the {@link + * QueryConfigHandler}. If it is, it looks for every {@link TokenizedPhraseQueryNode} and {@link + * MultiPhraseQueryNode} that does not have any {@link SlopQueryNode} applied to it and creates an + * {@link SlopQueryNode} and apply to it. The new {@link SlopQueryNode} has the same slop value + * defined in the configuration. + * * @see SlopQueryNode * @see ConfigurationKeys#PHRASE_SLOP */ @@ -53,33 +51,27 @@ public class DefaultPhraseSlopQueryNodeProcessor extends QueryNodeProcessorImpl QueryConfigHandler queryConfig = getQueryConfigHandler(); if (queryConfig != null) { - Integer defaultPhraseSlop = queryConfig.get(ConfigurationKeys.PHRASE_SLOP); - + Integer defaultPhraseSlop = queryConfig.get(ConfigurationKeys.PHRASE_SLOP); + if (defaultPhraseSlop != null) { this.defaultPhraseSlop = defaultPhraseSlop; return super.process(queryTree); - } - } return queryTree; - } @Override protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { - if (node instanceof TokenizedPhraseQueryNode - || node instanceof MultiPhraseQueryNode) { + if (node instanceof TokenizedPhraseQueryNode || node instanceof MultiPhraseQueryNode) { return new SlopQueryNode(node, this.defaultPhraseSlop); - } return node; - } @Override @@ -87,11 +79,9 @@ public class DefaultPhraseSlopQueryNodeProcessor extends QueryNodeProcessorImpl if (node instanceof SlopQueryNode) { this.processChildren = false; - } return node; - } @Override @@ -103,15 +93,11 @@ public class DefaultPhraseSlopQueryNodeProcessor extends QueryNodeProcessorImpl } else { this.processChildren = true; } - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/FuzzyQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/FuzzyQueryNodeProcessor.java index 9479fcf65a3..0d0730b23f5 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/FuzzyQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/FuzzyQueryNodeProcessor.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; @@ -29,13 +28,11 @@ import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfi import org.apache.lucene.search.FuzzyQuery; /** - * This processor iterates the query node tree looking for every - * {@link FuzzyQueryNode}, when this kind of node is found, it checks on the - * query configuration for - * {@link ConfigurationKeys#FUZZY_CONFIG}, gets the - * fuzzy prefix length and default similarity from it and set to the fuzzy node. - * For more information about fuzzy prefix length check: {@link FuzzyQuery}. - * + * This processor iterates the query node tree looking for every {@link FuzzyQueryNode}, when this + * kind of node is found, it checks on the query configuration for {@link + * ConfigurationKeys#FUZZY_CONFIG}, gets the fuzzy prefix length and default similarity from it and + * set to the fuzzy node. For more information about fuzzy prefix length check: {@link FuzzyQuery}. + * * @see ConfigurationKeys#FUZZY_CONFIG * @see FuzzyQuery * @see FuzzyQueryNode @@ -46,7 +43,6 @@ public class FuzzyQueryNodeProcessor extends QueryNodeProcessorImpl { protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override @@ -58,37 +54,33 @@ public class FuzzyQueryNodeProcessor extends QueryNodeProcessorImpl { Analyzer analyzer = getQueryConfigHandler().get(ConfigurationKeys.ANALYZER); if (analyzer != null) { - // because we call utf8ToString, this will only work with the default TermToBytesRefAttribute + // because we call utf8ToString, this will only work with the default + // TermToBytesRefAttribute String text = fuzzyNode.getTextAsString(); text = analyzer.normalize(fuzzyNode.getFieldAsString(), text).utf8ToString(); fuzzyNode.setText(text); } FuzzyConfig fuzzyConfig = null; - + if ((fuzzyConfig = config.get(ConfigurationKeys.FUZZY_CONFIG)) != null) { fuzzyNode.setPrefixLength(fuzzyConfig.getPrefixLength()); if (fuzzyNode.getSimilarity() < 0) { fuzzyNode.setSimilarity(fuzzyConfig.getMinSimilarity()); } - + } else if (fuzzyNode.getSimilarity() < 0) { throw new IllegalArgumentException("No FUZZY_CONFIG set in the config"); } - } return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MatchAllDocsQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MatchAllDocsQueryNodeProcessor.java index 89a7f82e52a..7a95d8d00a4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MatchAllDocsQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MatchAllDocsQueryNodeProcessor.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.MatchAllDocsQueryNode; @@ -27,9 +26,9 @@ import org.apache.lucene.queryparser.flexible.standard.nodes.WildcardQueryNode; import org.apache.lucene.search.MatchAllDocsQuery; /** - * This processor converts every {@link WildcardQueryNode} that is "*:*" to - * {@link MatchAllDocsQueryNode}. - * + * This processor converts every {@link WildcardQueryNode} that is "*:*" to {@link + * MatchAllDocsQueryNode}. + * * @see MatchAllDocsQueryNode * @see MatchAllDocsQuery */ @@ -45,32 +44,24 @@ public class MatchAllDocsQueryNodeProcessor extends QueryNodeProcessorImpl { if (node instanceof FieldQueryNode) { FieldQueryNode fqn = (FieldQueryNode) node; - if (fqn.getField().toString().equals("*") - && fqn.getText().toString().equals("*")) { + if (fqn.getField().toString().equals("*") && fqn.getText().toString().equals("*")) { return new MatchAllDocsQueryNode(); - } - } return node; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java index d7303ad7abd..8cd3fade139 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java @@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode; @@ -30,17 +29,16 @@ import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; /** - * This processor is used to expand terms so the query looks for the same term - * in different fields. It also boosts a query based on its field.
    + * This processor is used to expand terms so the query looks for the same term in different fields. + * It also boosts a query based on its field.
    *
    - * This processor looks for every {@link FieldableNode} contained in the query - * node tree. If a {@link FieldableNode} is found, it checks if there is a - * {@link ConfigurationKeys#MULTI_FIELDS} defined in the {@link QueryConfigHandler}. If - * there is, the {@link FieldableNode} is cloned N times and the clones are - * added to a {@link BooleanQueryNode} together with the original node. N is - * defined by the number of fields that it will be expanded to. The - * {@link BooleanQueryNode} is returned. - * + * This processor looks for every {@link FieldableNode} contained in the query node tree. If a + * {@link FieldableNode} is found, it checks if there is a {@link ConfigurationKeys#MULTI_FIELDS} + * defined in the {@link QueryConfigHandler}. If there is, the {@link FieldableNode} is cloned N + * times and the clones are added to a {@link BooleanQueryNode} together with the original node. N + * is defined by the number of fields that it will be expanded to. The {@link BooleanQueryNode} is + * returned. + * * @see ConfigurationKeys#MULTI_FIELDS */ public class MultiFieldQueryNodeProcessor extends QueryNodeProcessorImpl { @@ -55,7 +53,6 @@ public class MultiFieldQueryNodeProcessor extends QueryNodeProcessorImpl { protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override @@ -67,7 +64,6 @@ public class MultiFieldQueryNodeProcessor extends QueryNodeProcessorImpl { } else { this.processChildren = true; } - } @Override @@ -112,12 +108,10 @@ public class MultiFieldQueryNodeProcessor extends QueryNodeProcessorImpl { } return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiTermRewriteMethodProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiTermRewriteMethodProcessor.java index a65f6801c7f..7e4068e38b9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiTermRewriteMethodProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiTermRewriteMethodProcessor.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; @@ -27,10 +26,9 @@ import org.apache.lucene.queryparser.flexible.standard.nodes.WildcardQueryNode; import org.apache.lucene.search.MultiTermQuery; /** - * This processor instates the default - * {@link org.apache.lucene.search.MultiTermQuery.RewriteMethod}, - * {@link MultiTermQuery#CONSTANT_SCORE_REWRITE}, for multi-term - * query nodes. + * This processor instates the default {@link + * org.apache.lucene.search.MultiTermQuery.RewriteMethod}, {@link + * MultiTermQuery#CONSTANT_SCORE_REWRITE}, for multi-term query nodes. */ public class MultiTermRewriteMethodProcessor extends QueryNodeProcessorImpl { @@ -42,9 +40,11 @@ public class MultiTermRewriteMethodProcessor extends QueryNodeProcessorImpl { // set setMultiTermRewriteMethod for WildcardQueryNode and // PrefixWildcardQueryNode if (node instanceof WildcardQueryNode - || node instanceof AbstractRangeQueryNode || node instanceof RegexpQueryNode) { - - MultiTermQuery.RewriteMethod rewriteMethod = getQueryConfigHandler().get(ConfigurationKeys.MULTI_TERM_REWRITE_METHOD); + || node instanceof AbstractRangeQueryNode + || node instanceof RegexpQueryNode) { + + MultiTermQuery.RewriteMethod rewriteMethod = + getQueryConfigHandler().get(ConfigurationKeys.MULTI_TERM_REWRITE_METHOD); if (rewriteMethod == null) { // This should not happen, this configuration is set in the @@ -55,7 +55,6 @@ public class MultiTermRewriteMethodProcessor extends QueryNodeProcessorImpl { // use a TAG to take the value to the Builder node.setTag(MultiTermRewriteMethodProcessor.TAG_ID, rewriteMethod); - } return node; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/OpenRangeQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/OpenRangeQueryNodeProcessor.java index 190e9888e31..e328aef3b9c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/OpenRangeQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/OpenRangeQueryNodeProcessor.java @@ -17,63 +17,57 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - -import org.apache.lucene.search.TermRangeQuery; // javadocs import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; import org.apache.lucene.queryparser.flexible.core.util.UnescapedCharSequence; import org.apache.lucene.queryparser.flexible.standard.nodes.TermRangeQueryNode; +import org.apache.lucene.search.TermRangeQuery; // javadocs -/** - * Processes {@link TermRangeQuery}s with open ranges. - */ +/** Processes {@link TermRangeQuery}s with open ranges. */ public class OpenRangeQueryNodeProcessor extends QueryNodeProcessorImpl { - - final public static String OPEN_RANGE_TOKEN = "*"; - + + public static final String OPEN_RANGE_TOKEN = "*"; + public OpenRangeQueryNodeProcessor() {} - + @Override protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { - + if (node instanceof TermRangeQueryNode) { TermRangeQueryNode rangeNode = (TermRangeQueryNode) node; FieldQueryNode lowerNode = rangeNode.getLowerBound(); FieldQueryNode upperNode = rangeNode.getUpperBound(); CharSequence lowerText = lowerNode.getText(); CharSequence upperText = upperNode.getText(); - + if (OPEN_RANGE_TOKEN.equals(upperNode.getTextAsString()) - && (!(upperText instanceof UnescapedCharSequence) || !((UnescapedCharSequence) upperText) - .wasEscaped(0))) { + && (!(upperText instanceof UnescapedCharSequence) + || !((UnescapedCharSequence) upperText).wasEscaped(0))) { upperText = ""; } - + if (OPEN_RANGE_TOKEN.equals(lowerNode.getTextAsString()) - && (!(lowerText instanceof UnescapedCharSequence) || !((UnescapedCharSequence) lowerText) - .wasEscaped(0))) { + && (!(lowerText instanceof UnescapedCharSequence) + || !((UnescapedCharSequence) lowerText).wasEscaped(0))) { lowerText = ""; } - + lowerNode.setText(lowerText); upperNode.setText(upperText); } - + return node; - } - + @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; } - + @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PhraseSlopQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PhraseSlopQueryNodeProcessor.java index bfec9d8faa0..cdda18e3dbd 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PhraseSlopQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PhraseSlopQueryNodeProcessor.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.SlopQueryNode; @@ -26,10 +25,10 @@ import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor import org.apache.lucene.queryparser.flexible.standard.nodes.MultiPhraseQueryNode; /** - * This processor removes invalid {@link SlopQueryNode} objects in the query - * node tree. A {@link SlopQueryNode} is invalid if its child is neither a - * {@link TokenizedPhraseQueryNode} nor a {@link MultiPhraseQueryNode}. - * + * This processor removes invalid {@link SlopQueryNode} objects in the query node tree. A {@link + * SlopQueryNode} is invalid if its child is neither a {@link TokenizedPhraseQueryNode} nor a {@link + * MultiPhraseQueryNode}. + * * @see SlopQueryNode */ public class PhraseSlopQueryNodeProcessor extends QueryNodeProcessorImpl { @@ -48,26 +47,20 @@ public class PhraseSlopQueryNodeProcessor extends QueryNodeProcessorImpl { && !(phraseSlopNode.getChild() instanceof MultiPhraseQueryNode)) { return phraseSlopNode.getChild(); } - } return node; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PointQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PointQueryNodeProcessor.java index 81a84496156..89d141ffbd9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PointQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PointQueryNodeProcessor.java @@ -19,8 +19,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.text.NumberFormat; import java.text.ParseException; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.QueryNodeParseException; import org.apache.lucene.queryparser.flexible.core.config.FieldConfig; @@ -30,74 +28,69 @@ import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.RangeQueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.standard.config.PointsConfig; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; import org.apache.lucene.queryparser.flexible.standard.nodes.PointQueryNode; import org.apache.lucene.queryparser.flexible.standard.nodes.PointRangeQueryNode; /** - * This processor is used to convert {@link FieldQueryNode}s to - * {@link PointRangeQueryNode}s. It looks for - * {@link ConfigurationKeys#POINTS_CONFIG} set in the {@link FieldConfig} of - * every {@link FieldQueryNode} found. If - * {@link ConfigurationKeys#POINTS_CONFIG} is found, it considers that - * {@link FieldQueryNode} to be a numeric query and convert it to - * {@link PointRangeQueryNode} with upper and lower inclusive and lower and - * upper equals to the value represented by the {@link FieldQueryNode} converted - * to {@link Number}. It means that field:1 is converted to field:[1 - * TO 1].
    + * This processor is used to convert {@link FieldQueryNode}s to {@link PointRangeQueryNode}s. It + * looks for {@link ConfigurationKeys#POINTS_CONFIG} set in the {@link FieldConfig} of every {@link + * FieldQueryNode} found. If {@link ConfigurationKeys#POINTS_CONFIG} is found, it considers that + * {@link FieldQueryNode} to be a numeric query and convert it to {@link PointRangeQueryNode} with + * upper and lower inclusive and lower and upper equals to the value represented by the {@link + * FieldQueryNode} converted to {@link Number}. It means that field:1 is converted to + * field:[1 TO 1].
    *
    - * Note that {@link FieldQueryNode}s children of a - * {@link RangeQueryNode} are ignored. - * + * Note that {@link FieldQueryNode}s children of a {@link RangeQueryNode} are ignored. + * * @see ConfigurationKeys#POINTS_CONFIG * @see FieldQueryNode * @see PointsConfig * @see PointQueryNode */ public class PointQueryNodeProcessor extends QueryNodeProcessorImpl { - - /** - * Constructs a {@link PointQueryNodeProcessor} object. - */ + + /** Constructs a {@link PointQueryNodeProcessor} object. */ public PointQueryNodeProcessor() { - // empty constructor + // empty constructor } - + @Override protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { - - if (node instanceof FieldQueryNode - && !(node.getParent() instanceof RangeQueryNode)) { - + + if (node instanceof FieldQueryNode && !(node.getParent() instanceof RangeQueryNode)) { + QueryConfigHandler config = getQueryConfigHandler(); - + if (config != null) { FieldQueryNode fieldNode = (FieldQueryNode) node; - FieldConfig fieldConfig = config.getFieldConfig(fieldNode - .getFieldAsString()); - + FieldConfig fieldConfig = config.getFieldConfig(fieldNode.getFieldAsString()); + if (fieldConfig != null) { PointsConfig numericConfig = fieldConfig.get(ConfigurationKeys.POINTS_CONFIG); - + if (numericConfig != null) { - + NumberFormat numberFormat = numericConfig.getNumberFormat(); String text = fieldNode.getTextAsString(); Number number = null; - + if (text.length() > 0) { - + try { number = numberFormat.parse(text); - + } catch (ParseException e) { - throw new QueryNodeParseException(new MessageImpl( - QueryParserMessages.COULD_NOT_PARSE_NUMBER, fieldNode - .getTextAsString(), numberFormat.getClass() - .getCanonicalName()), e); + throw new QueryNodeParseException( + new MessageImpl( + QueryParserMessages.COULD_NOT_PARSE_NUMBER, + fieldNode.getTextAsString(), + numberFormat.getClass().getCanonicalName()), + e); } - + if (Integer.class.equals(numericConfig.getType())) { number = number.intValue(); } else if (Long.class.equals(numericConfig.getType())) { @@ -107,15 +100,18 @@ public class PointQueryNodeProcessor extends QueryNodeProcessorImpl { } else if (Float.class.equals(numericConfig.getType())) { number = number.floatValue(); } - + } else { - throw new QueryNodeParseException(new MessageImpl( - QueryParserMessages.NUMERIC_CANNOT_BE_EMPTY, fieldNode.getFieldAsString())); + throw new QueryNodeParseException( + new MessageImpl( + QueryParserMessages.NUMERIC_CANNOT_BE_EMPTY, fieldNode.getFieldAsString())); } - - PointQueryNode lowerNode = new PointQueryNode(fieldNode.getField(), number, numberFormat); - PointQueryNode upperNode = new PointQueryNode(fieldNode.getField(), number, numberFormat); - + + PointQueryNode lowerNode = + new PointQueryNode(fieldNode.getField(), number, numberFormat); + PointQueryNode upperNode = + new PointQueryNode(fieldNode.getField(), number, numberFormat); + return new PointRangeQueryNode(lowerNode, upperNode, true, true, numericConfig); } } @@ -123,12 +119,12 @@ public class PointQueryNodeProcessor extends QueryNodeProcessorImpl { } return node; } - + @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; } - + @Override protected List setChildrenOrder(List children) throws QueryNodeException { return children; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PointRangeQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PointRangeQueryNodeProcessor.java index 2ffc43735af..c8f0493f05c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PointRangeQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PointRangeQueryNodeProcessor.java @@ -19,8 +19,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.text.NumberFormat; import java.text.ParseException; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.QueryNodeParseException; import org.apache.lucene.queryparser.flexible.core.config.FieldConfig; @@ -30,6 +28,7 @@ import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; import org.apache.lucene.queryparser.flexible.core.util.StringUtils; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.standard.config.PointsConfig; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys; import org.apache.lucene.queryparser.flexible.standard.nodes.PointQueryNode; @@ -37,14 +36,12 @@ import org.apache.lucene.queryparser.flexible.standard.nodes.PointRangeQueryNode import org.apache.lucene.queryparser.flexible.standard.nodes.TermRangeQueryNode; /** - * This processor is used to convert {@link TermRangeQueryNode}s to - * {@link PointRangeQueryNode}s. It looks for - * {@link ConfigurationKeys#POINTS_CONFIG} set in the {@link FieldConfig} of - * every {@link TermRangeQueryNode} found. If - * {@link ConfigurationKeys#POINTS_CONFIG} is found, it considers that - * {@link TermRangeQueryNode} to be a numeric range query and convert it to - * {@link PointRangeQueryNode}. - * + * This processor is used to convert {@link TermRangeQueryNode}s to {@link PointRangeQueryNode}s. It + * looks for {@link ConfigurationKeys#POINTS_CONFIG} set in the {@link FieldConfig} of every {@link + * TermRangeQueryNode} found. If {@link ConfigurationKeys#POINTS_CONFIG} is found, it considers that + * {@link TermRangeQueryNode} to be a numeric range query and convert it to {@link + * PointRangeQueryNode}. + * * @see ConfigurationKeys#POINTS_CONFIG * @see TermRangeQueryNode * @see PointsConfig @@ -52,9 +49,7 @@ import org.apache.lucene.queryparser.flexible.standard.nodes.TermRangeQueryNode; */ public class PointRangeQueryNodeProcessor extends QueryNodeProcessorImpl { - /** - * Constructs an empty {@link PointRangeQueryNodeProcessor} object. - */ + /** Constructs an empty {@link PointRangeQueryNodeProcessor} object. */ public PointRangeQueryNodeProcessor() { // empty constructor } @@ -67,7 +62,8 @@ public class PointRangeQueryNodeProcessor extends QueryNodeProcessorImpl { if (config != null) { TermRangeQueryNode termRangeNode = (TermRangeQueryNode) node; - FieldConfig fieldConfig = config.getFieldConfig(StringUtils.toString(termRangeNode.getField())); + FieldConfig fieldConfig = + config.getFieldConfig(StringUtils.toString(termRangeNode.getField())); if (fieldConfig != null) { PointsConfig numericConfig = fieldConfig.get(ConfigurationKeys.POINTS_CONFIG); @@ -87,12 +83,13 @@ public class PointRangeQueryNodeProcessor extends QueryNodeProcessorImpl { lowerNumber = numberFormat.parse(lowerText); } catch (ParseException e) { - throw new QueryNodeParseException(new MessageImpl( - QueryParserMessages.COULD_NOT_PARSE_NUMBER, lower - .getTextAsString(), numberFormat.getClass() - .getCanonicalName()), e); + throw new QueryNodeParseException( + new MessageImpl( + QueryParserMessages.COULD_NOT_PARSE_NUMBER, + lower.getTextAsString(), + numberFormat.getClass().getCanonicalName()), + e); } - } if (upperText.length() > 0) { @@ -101,10 +98,12 @@ public class PointRangeQueryNodeProcessor extends QueryNodeProcessorImpl { upperNumber = numberFormat.parse(upperText); } catch (ParseException e) { - throw new QueryNodeParseException(new MessageImpl( - QueryParserMessages.COULD_NOT_PARSE_NUMBER, upper - .getTextAsString(), numberFormat.getClass() - .getCanonicalName()), e); + throw new QueryNodeParseException( + new MessageImpl( + QueryParserMessages.COULD_NOT_PARSE_NUMBER, + upper.getTextAsString(), + numberFormat.getClass().getCanonicalName()), + e); } } @@ -122,15 +121,18 @@ public class PointRangeQueryNodeProcessor extends QueryNodeProcessorImpl { if (lowerNumber != null) lowerNumber = lowerNumber.floatValue(); } - PointQueryNode lowerNode = new PointQueryNode(termRangeNode.getField(), lowerNumber, numberFormat); - PointQueryNode upperNode = new PointQueryNode(termRangeNode.getField(), upperNumber, numberFormat); + PointQueryNode lowerNode = + new PointQueryNode(termRangeNode.getField(), lowerNumber, numberFormat); + PointQueryNode upperNode = + new PointQueryNode(termRangeNode.getField(), upperNumber, numberFormat); boolean lowerInclusive = termRangeNode.isLowerInclusive(); boolean upperInclusive = termRangeNode.isUpperInclusive(); - return new PointRangeQueryNode(lowerNode, upperNode, lowerInclusive, upperInclusive, numericConfig); + return new PointRangeQueryNode( + lowerNode, upperNode, lowerInclusive, upperInclusive, numericConfig); } - } + } } } return node; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RegexpQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RegexpQueryNodeProcessor.java index 652de875861..afe676b007b 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RegexpQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RegexpQueryNodeProcessor.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; @@ -40,7 +39,8 @@ public class RegexpQueryNodeProcessor extends QueryNodeProcessorImpl { Analyzer analyzer = getQueryConfigHandler().get(ConfigurationKeys.ANALYZER); if (analyzer != null) { String text = regexpNode.getText().toString(); - // because we call utf8ToString, this will only work with the default TermToBytesRefAttribute + // because we call utf8ToString, this will only work with the default + // TermToBytesRefAttribute text = analyzer.normalize(regexpNode.getFieldAsString(), text).utf8ToString(); regexpNode.setText(text); } @@ -52,5 +52,4 @@ public class RegexpQueryNodeProcessor extends QueryNodeProcessorImpl { protected List setChildrenOrder(List children) throws QueryNodeException { return children; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java index ef183d232be..775b42a5bc8 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java @@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.LinkedList; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.GroupQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.MatchNoDocsQueryNode; @@ -27,19 +26,16 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; /** - * This processor removes every {@link QueryNode} that is not a leaf and has not - * children. If after processing the entire tree the root node is not a leaf and - * has no children, a {@link MatchNoDocsQueryNode} object is returned. - *
    - * This processor is used at the end of a pipeline to avoid invalid query node - * tree structures like a {@link GroupQueryNode} or {@link ModifierQueryNode} - * with no children. - * + * This processor removes every {@link QueryNode} that is not a leaf and has not children. If after + * processing the entire tree the root node is not a leaf and has no children, a {@link + * MatchNoDocsQueryNode} object is returned.
    + * This processor is used at the end of a pipeline to avoid invalid query node tree structures like + * a {@link GroupQueryNode} or {@link ModifierQueryNode} with no children. + * * @see QueryNode * @see MatchNoDocsQueryNode */ -public class RemoveEmptyNonLeafQueryNodeProcessor extends - QueryNodeProcessorImpl { +public class RemoveEmptyNonLeafQueryNodeProcessor extends QueryNodeProcessorImpl { private LinkedList childrenBuffer = new LinkedList<>(); @@ -58,30 +54,25 @@ public class RemoveEmptyNonLeafQueryNodeProcessor extends if (children == null || children.size() == 0) { return new MatchNoDocsQueryNode(); } - } return queryTree; - } @Override protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { try { @@ -98,7 +89,6 @@ public class RemoveEmptyNonLeafQueryNodeProcessor extends } else { this.childrenBuffer.add(child); } - } children.clear(); @@ -109,7 +99,5 @@ public class RemoveEmptyNonLeafQueryNodeProcessor extends } return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/StandardQueryNodeProcessorPipeline.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/StandardQueryNodeProcessorPipeline.java index 15a44dfc7be..2df1f8fcdc5 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/StandardQueryNodeProcessorPipeline.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/StandardQueryNodeProcessorPipeline.java @@ -26,28 +26,26 @@ import org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxPars import org.apache.lucene.search.Query; /** - * This pipeline has all the processors needed to process a query node tree, - * generated by {@link StandardSyntaxParser}, already assembled.
    + * This pipeline has all the processors needed to process a query node tree, generated by {@link + * StandardSyntaxParser}, already assembled.
    *
    * The order they are assembled affects the results.
    *
    - * This processor pipeline was designed to work with - * {@link StandardQueryConfigHandler}.
    + * This processor pipeline was designed to work with {@link StandardQueryConfigHandler}.
    *
    - * The result query node tree can be used to build a {@link Query} object using - * {@link StandardQueryTreeBuilder}. - * + * The result query node tree can be used to build a {@link Query} object using {@link + * StandardQueryTreeBuilder}. + * * @see StandardQueryTreeBuilder * @see StandardQueryConfigHandler * @see StandardSyntaxParser */ -public class StandardQueryNodeProcessorPipeline extends - QueryNodeProcessorPipeline { +public class StandardQueryNodeProcessorPipeline extends QueryNodeProcessorPipeline { public StandardQueryNodeProcessorPipeline(QueryConfigHandler queryConfig) { super(queryConfig); - add(new WildcardQueryNodeProcessor()); + add(new WildcardQueryNodeProcessor()); add(new MultiFieldQueryNodeProcessor()); add(new FuzzyQueryNodeProcessor()); add(new RegexpQueryNodeProcessor()); @@ -56,18 +54,17 @@ public class StandardQueryNodeProcessorPipeline extends add(new PointQueryNodeProcessor()); add(new PointRangeQueryNodeProcessor()); add(new TermRangeQueryNodeProcessor()); - add(new AllowLeadingWildcardProcessor()); + add(new AllowLeadingWildcardProcessor()); add(new AnalyzerQueryNodeProcessor()); add(new PhraseSlopQueryNodeProcessor()); - //add(new GroupQueryNodeProcessor()); + // add(new GroupQueryNodeProcessor()); add(new BooleanQuery2ModifierNodeProcessor()); add(new NoChildOptimizationQueryNodeProcessor()); add(new RemoveDeletedQueryNodesProcessor()); add(new RemoveEmptyNonLeafQueryNodeProcessor()); add(new BooleanSingleChildOptimizationQueryNodeProcessor()); add(new DefaultPhraseSlopQueryNodeProcessor()); - add(new BoostQueryNodeProcessor()); + add(new BoostQueryNodeProcessor()); add(new MultiTermRewriteMethodProcessor()); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/TermRangeQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/TermRangeQueryNodeProcessor.java index 557c605c159..de55808ea19 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/TermRangeQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/TermRangeQueryNodeProcessor.java @@ -22,7 +22,6 @@ import java.util.Date; import java.util.List; import java.util.Locale; import java.util.TimeZone; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.DateTools; import org.apache.lucene.document.DateTools.Resolution; @@ -36,83 +35,79 @@ import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfi import org.apache.lucene.queryparser.flexible.standard.nodes.TermRangeQueryNode; /** - * This processors process {@link TermRangeQueryNode}s. It reads the lower and - * upper bounds value from the {@link TermRangeQueryNode} object and try - * to parse their values using a {@link DateFormat}. If the values cannot be - * parsed to a date value, it will only create the {@link TermRangeQueryNode} - * using the non-parsed values.
    + * This processors process {@link TermRangeQueryNode}s. It reads the lower and upper bounds value + * from the {@link TermRangeQueryNode} object and try to parse their values using a {@link + * DateFormat}. If the values cannot be parsed to a date value, it will only create the {@link + * TermRangeQueryNode} using the non-parsed values.
    *
    - * If a {@link ConfigurationKeys#LOCALE} is defined in the - * {@link QueryConfigHandler} it will be used to parse the date, otherwise - * {@link Locale#getDefault()} will be used.
    + * If a {@link ConfigurationKeys#LOCALE} is defined in the {@link QueryConfigHandler} it will be + * used to parse the date, otherwise {@link Locale#getDefault()} will be used.
    *
    - * If a {@link ConfigurationKeys#DATE_RESOLUTION} is defined and the - * {@link Resolution} is not null it will also be used to parse the - * date value. - * + * If a {@link ConfigurationKeys#DATE_RESOLUTION} is defined and the {@link Resolution} is not + * null it will also be used to parse the date value. + * * @see ConfigurationKeys#DATE_RESOLUTION * @see ConfigurationKeys#LOCALE * @see TermRangeQueryNode */ public class TermRangeQueryNodeProcessor extends QueryNodeProcessorImpl { - + public TermRangeQueryNodeProcessor() { - // empty constructor + // empty constructor } - + @Override protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { - + if (node instanceof TermRangeQueryNode) { TermRangeQueryNode termRangeNode = (TermRangeQueryNode) node; FieldQueryNode upper = termRangeNode.getUpperBound(); FieldQueryNode lower = termRangeNode.getLowerBound(); - + DateTools.Resolution dateRes = null; boolean inclusive = false; Locale locale = getQueryConfigHandler().get(ConfigurationKeys.LOCALE); - + if (locale == null) { locale = Locale.getDefault(); } - + TimeZone timeZone = getQueryConfigHandler().get(ConfigurationKeys.TIMEZONE); - + if (timeZone == null) { timeZone = TimeZone.getDefault(); } - + CharSequence field = termRangeNode.getField(); String fieldStr = null; - + if (field != null) { fieldStr = field.toString(); } - - FieldConfig fieldConfig = getQueryConfigHandler() - .getFieldConfig(fieldStr); - + + FieldConfig fieldConfig = getQueryConfigHandler().getFieldConfig(fieldStr); + if (fieldConfig != null) { dateRes = fieldConfig.get(ConfigurationKeys.DATE_RESOLUTION); } - + if (termRangeNode.isUpperInclusive()) { inclusive = true; } - + String part1 = lower.getTextAsString(); String part2 = upper.getTextAsString(); - + try { DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, locale); df.setLenient(true); - + if (part1.length() > 0) { Date d1 = df.parse(part1); part1 = DateTools.dateToString(d1, dateRes); lower.setText(part1); } - + if (part2.length() > 0) { Date d2 = df.parse(part2); if (inclusive) { @@ -128,43 +123,37 @@ public class TermRangeQueryNodeProcessor extends QueryNodeProcessorImpl { cal.set(Calendar.MILLISECOND, 999); d2 = cal.getTime(); } - + part2 = DateTools.dateToString(d2, dateRes); upper.setText(part2); - } - + } catch (Exception e) { // not a date Analyzer analyzer = getQueryConfigHandler().get(ConfigurationKeys.ANALYZER); if (analyzer != null) { - // because we call utf8ToString, this will only work with the default TermToBytesRefAttribute + // because we call utf8ToString, this will only work with the default + // TermToBytesRefAttribute part1 = analyzer.normalize(lower.getFieldAsString(), part1).utf8ToString(); part2 = analyzer.normalize(lower.getFieldAsString(), part2).utf8ToString(); lower.setText(part1); upper.setText(part2); } } - } - + return node; - } - + @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { - + return node; - } - + @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { - + protected List setChildrenOrder(List children) throws QueryNodeException { + return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/WildcardQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/WildcardQueryNodeProcessor.java index 39eb0df13ed..3fc52b317d3 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/WildcardQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/WildcardQueryNodeProcessor.java @@ -19,7 +19,6 @@ package org.apache.lucene.queryparser.flexible.standard.processors; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; @@ -37,12 +36,11 @@ import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.util.BytesRef; /** - * The {@link StandardSyntaxParser} creates {@link PrefixWildcardQueryNode} nodes which - * have values containing the prefixed wildcard. However, Lucene - * {@link PrefixQuery} cannot contain the prefixed wildcard. So, this processor - * basically removed the prefixed wildcard from the - * {@link PrefixWildcardQueryNode} value. - * + * The {@link StandardSyntaxParser} creates {@link PrefixWildcardQueryNode} nodes which have values + * containing the prefixed wildcard. However, Lucene {@link PrefixQuery} cannot contain the prefixed + * wildcard. So, this processor basically removed the prefixed wildcard from the {@link + * PrefixWildcardQueryNode} value. + * * @see PrefixQuery * @see PrefixWildcardQueryNode */ @@ -57,23 +55,23 @@ public class WildcardQueryNodeProcessor extends QueryNodeProcessorImpl { StringBuilder sb = new StringBuilder(); int last = 0; - while (wildcardMatcher.find()){ + while (wildcardMatcher.find()) { // continue if escaped char - if (wildcardMatcher.group(1) != null){ + if (wildcardMatcher.group(1) != null) { continue; } - if (wildcardMatcher.start() > 0){ + if (wildcardMatcher.start() > 0) { String chunk = wildcard.substring(last, wildcardMatcher.start()); BytesRef normalized = a.normalize(field, chunk); sb.append(normalized.utf8ToString()); } - //append the wildcard character + // append the wildcard character sb.append(wildcardMatcher.group(2)); last = wildcardMatcher.end(); } - if (last < wildcard.length()){ + if (last < wildcard.length()) { String chunk = wildcard.substring(last); BytesRef normalized = a.normalize(field, chunk); sb.append(normalized.utf8ToString()); @@ -88,24 +86,24 @@ public class WildcardQueryNodeProcessor extends QueryNodeProcessorImpl { @Override protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { - // the old Lucene Parser ignores FuzzyQueryNode that are also PrefixWildcardQueryNode or WildcardQueryNode + // the old Lucene Parser ignores FuzzyQueryNode that are also PrefixWildcardQueryNode or + // WildcardQueryNode // we do the same here, also ignore empty terms - if (node instanceof FieldQueryNode || node instanceof FuzzyQueryNode) { - FieldQueryNode fqn = (FieldQueryNode) node; - CharSequence text = fqn.getText(); - - // do not process wildcards for TermRangeQueryNode children and + if (node instanceof FieldQueryNode || node instanceof FuzzyQueryNode) { + FieldQueryNode fqn = (FieldQueryNode) node; + CharSequence text = fqn.getText(); + + // do not process wildcards for TermRangeQueryNode children and // QuotedFieldQueryNode to reproduce the old parser behavior - if (fqn.getParent() instanceof TermRangeQueryNode - || fqn instanceof QuotedFieldQueryNode - || text.length() <= 0){ + if (fqn.getParent() instanceof TermRangeQueryNode + || fqn instanceof QuotedFieldQueryNode + || text.length() <= 0) { // Ignore empty terms return node; } - + // Code below simulates the old lucene parser behavior for wildcards - - + if (isWildcard(text)) { Analyzer analyzer = getQueryConfigHandler().get(ConfigurationKeys.ANALYZER); if (analyzer != null) { @@ -117,48 +115,45 @@ public class WildcardQueryNodeProcessor extends QueryNodeProcessorImpl { return new WildcardQueryNode(fqn.getField(), text, fqn.getBegin(), fqn.getEnd()); } } - } return node; - } private boolean isWildcard(CharSequence text) { - if (text ==null || text.length() <= 0) return false; - + if (text == null || text.length() <= 0) return false; + // If a un-escaped '*' or '?' if found return true // start at the end since it's more common to put wildcards at the end - for(int i=text.length()-1; i>=0; i--){ - if ((text.charAt(i) == '*' || text.charAt(i) == '?') && !UnescapedCharSequence.wasEscaped(text, i)){ + for (int i = text.length() - 1; i >= 0; i--) { + if ((text.charAt(i) == '*' || text.charAt(i) == '?') + && !UnescapedCharSequence.wasEscaped(text, i)) { return true; } } - + return false; } private boolean isPrefixWildcard(CharSequence text) { if (text == null || text.length() <= 0 || !isWildcard(text)) return false; - + // Validate last character is a '*' and was not escaped // If single '*' is is a wildcard not prefix to simulate old queryparser - if (text.charAt(text.length()-1) != '*') return false; - if (UnescapedCharSequence.wasEscaped(text, text.length()-1)) return false; + if (text.charAt(text.length() - 1) != '*') return false; + if (UnescapedCharSequence.wasEscaped(text, text.length() - 1)) return false; if (text.length() == 1) return false; - + // Only make a prefix if there is only one single star at the end and no '?' or '*' characters // If single wildcard return false to mimic old queryparser - for(int i=0; i setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/package-info.java index ec68c7ef975..5c0b3138df3 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/package-info.java @@ -14,16 +14,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Lucene Query Node Processors. - * + * *

    Lucene Query Node Processors

    - *

    - * The package org.apache.lucene.queryparser.flexible.standard.processors contains every processor needed to assembly a pipeline - * that modifies the query node tree according to the actual Lucene queries. - *

    - * These processors are already assembled correctly in the StandardQueryNodeProcessorPipeline. + * + *

    The package org.apache.lucene.queryparser.flexible.standard.processors contains every + * processor needed to assembly a pipeline that modifies the query node tree according to the actual + * Lucene queries. + * + *

    These processors are already assembled correctly in the StandardQueryNodeProcessorPipeline. */ package org.apache.lucene.queryparser.flexible.standard.processors; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/SimpleQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/SimpleQueryParser.java index 18fdcf7068b..95c4a7592df 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/SimpleQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/SimpleQueryParser.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.queryparser.simple; +import java.util.Collections; +import java.util.Map; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; @@ -30,99 +32,100 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.QueryBuilder; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import java.util.Collections; -import java.util.Map; - /** * SimpleQueryParser is used to parse human readable query syntax. - *

    - * The main idea behind this parser is that a person should be able to type - * whatever they want to represent a query, and this parser will do its best - * to interpret what to search for no matter how poorly composed the request - * may be. Tokens are considered to be any of a term, phrase, or subquery for the - * operations described below. Whitespace including ' ' '\n' '\r' and '\t' - * and certain operators may be used to delimit tokens ( ) + | " . - *

    - * Any errors in query syntax will be ignored and the parser will attempt - * to decipher what it can; however, this may mean odd or unexpected results. - *

    - * Query Operators + * + *

    The main idea behind this parser is that a person should be able to type whatever they want to + * represent a query, and this parser will do its best to interpret what to search for no matter how + * poorly composed the request may be. Tokens are considered to be any of a term, phrase, or + * subquery for the operations described below. Whitespace including ' ' '\n' '\r' and '\t' and + * certain operators may be used to delimit tokens ( ) + | " . + * + *

    Any errors in query syntax will be ignored and the parser will attempt to decipher what it + * can; however, this may mean odd or unexpected results. + * + *

    Query Operators + * *

      - *
    • '{@code +}' specifies {@code AND} operation: token1+token2 - *
    • '{@code |}' specifies {@code OR} operation: token1|token2 - *
    • '{@code -}' negates a single token: -token0 - *
    • '{@code "}' creates phrases of terms: "term1 term2 ..." - *
    • '{@code *}' at the end of terms specifies prefix query: term* - *
    • '{@code ~}N' at the end of terms specifies fuzzy query: term~1 - *
    • '{@code ~}N' at the end of phrases specifies near query: "term1 term2"~5 - *
    • '{@code (}' and '{@code )}' specifies precedence: token1 + (token2 | token3) + *
    • '{@code +}' specifies {@code AND} operation: token1+token2 + *
    • '{@code |}' specifies {@code OR} operation: token1|token2 + *
    • '{@code -}' negates a single token: -token0 + *
    • '{@code "}' creates phrases of terms: "term1 term2 ..." + *
    • '{@code *}' at the end of terms specifies prefix query: term* + *
    • '{@code ~}N' at the end of terms specifies fuzzy query: term~1 + *
    • '{@code ~}N' at the end of phrases specifies near query: "term1 term2"~5 + *
    • '{@code (}' and '{@code )}' specifies precedence: token1 + (token2 | token3) *
    - *

    - * The {@link #setDefaultOperator default operator} is {@code OR} if no other operator is specified. - * For example, the following will {@code OR} {@code token1} and {@code token2} together: + * + *

    The {@link #setDefaultOperator default operator} is {@code OR} if no other operator is + * specified. For example, the following will {@code OR} {@code token1} and {@code token2} together: * token1 token2 - *

    - * Normal operator precedence will be simple order from right to left. - * For example, the following will evaluate {@code token1 OR token2} first, - * then {@code AND} with {@code token3}: - *

    token1 | token2 + token3
    + * + *

    Normal operator precedence will be simple order from right to left. For example, the following + * will evaluate {@code token1 OR token2} first, then {@code AND} with {@code token3}: + * + *

    + * + * token1 | token2 + token3 + * + *
    + * * Escaping - *

    - * An individual term may contain any possible character with certain characters - * requiring escaping using a '{@code \}'. The following characters will need to be escaped in - * terms and phrases: - * {@code + | " ( ) ' \} - *

    - * The '{@code -}' operator is a special case. On individual terms (not phrases) the first - * character of a term that is {@code -} must be escaped; however, any '{@code -}' characters - * beyond the first character do not need to be escaped. - * For example: + * + *

    An individual term may contain any possible character with certain characters requiring + * escaping using a '{@code \}'. The following characters will need to be escaped in terms and + * phrases: {@code + | " ( ) ' \} + * + *

    The '{@code -}' operator is a special case. On individual terms (not phrases) the first + * character of a term that is {@code -} must be escaped; however, any '{@code -}' characters beyond + * the first character do not need to be escaped. For example: + * *

      - *
    • {@code -term1} -- Specifies {@code NOT} operation against {@code term1} - *
    • {@code \-term1} -- Searches for the term {@code -term1}. - *
    • {@code term-1} -- Searches for the term {@code term-1}. - *
    • {@code term\-1} -- Searches for the term {@code term-1}. + *
    • {@code -term1} -- Specifies {@code NOT} operation against {@code term1} + *
    • {@code \-term1} -- Searches for the term {@code -term1}. + *
    • {@code term-1} -- Searches for the term {@code term-1}. + *
    • {@code term\-1} -- Searches for the term {@code term-1}. *
    - *

    - * The '{@code *}' operator is a special case. On individual terms (not phrases) the last + * + *

    The '{@code *}' operator is a special case. On individual terms (not phrases) the last * character of a term that is '{@code *}' must be escaped; however, any '{@code *}' characters * before the last character do not need to be escaped: + * *

      - *
    • {@code term1*} -- Searches for the prefix {@code term1} - *
    • {@code term1\*} -- Searches for the term {@code term1*} - *
    • {@code term*1} -- Searches for the term {@code term*1} - *
    • {@code term\*1} -- Searches for the term {@code term*1} + *
    • {@code term1*} -- Searches for the prefix {@code term1} + *
    • {@code term1\*} -- Searches for the term {@code term1*} + *
    • {@code term*1} -- Searches for the term {@code term*1} + *
    • {@code term\*1} -- Searches for the term {@code term*1} *
    - *

    - * Note that above examples consider the terms before text processing. + * + *

    Note that above examples consider the terms before text processing. */ public class SimpleQueryParser extends QueryBuilder { /** Map of fields to query against with their weights */ - protected final Map weights; + protected final Map weights; /** flags to the parser (to turn features on/off) */ protected final int flags; /** Enables {@code AND} operator (+) */ - public static final int AND_OPERATOR = 1<<0; + public static final int AND_OPERATOR = 1 << 0; /** Enables {@code NOT} operator (-) */ - public static final int NOT_OPERATOR = 1<<1; + public static final int NOT_OPERATOR = 1 << 1; /** Enables {@code OR} operator (|) */ - public static final int OR_OPERATOR = 1<<2; + public static final int OR_OPERATOR = 1 << 2; /** Enables {@code PREFIX} operator (*) */ - public static final int PREFIX_OPERATOR = 1<<3; + public static final int PREFIX_OPERATOR = 1 << 3; /** Enables {@code PHRASE} operator (") */ - public static final int PHRASE_OPERATOR = 1<<4; + public static final int PHRASE_OPERATOR = 1 << 4; /** Enables {@code PRECEDENCE} operators: {@code (} and {@code )} */ - public static final int PRECEDENCE_OPERATORS = 1<<5; + public static final int PRECEDENCE_OPERATORS = 1 << 5; /** Enables {@code ESCAPE} operator (\) */ - public static final int ESCAPE_OPERATOR = 1<<6; + public static final int ESCAPE_OPERATOR = 1 << 6; /** Enables {@code WHITESPACE} operators: ' ' '\n' '\r' '\t' */ - public static final int WHITESPACE_OPERATOR = 1<<7; + public static final int WHITESPACE_OPERATOR = 1 << 7; /** Enables {@code FUZZY} operators: (~) on single terms */ - public static final int FUZZY_OPERATOR = 1<<8; + public static final int FUZZY_OPERATOR = 1 << 8; /** Enables {@code NEAR} operators: (~) on phrases */ - public static final int NEAR_OPERATOR = 1<<9; - + public static final int NEAR_OPERATOR = 1 << 9; private BooleanClause.Occur defaultOperator = BooleanClause.Occur.SHOULD; @@ -204,9 +207,10 @@ public class SimpleQueryParser extends QueryBuilder { // before the next character is determined continue; } else if ((state.data[state.index] == ' ' - || state.data[state.index] == '\t' - || state.data[state.index] == '\n' - || state.data[state.index] == '\r') && (flags & WHITESPACE_OPERATOR) != 0) { + || state.data[state.index] == '\t' + || state.data[state.index] == '\n' + || state.data[state.index] == '\r') + && (flags & WHITESPACE_OPERATOR) != 0) { // ignore any whitespace found as it may have already been // used a delimiter across a term (or phrase or subquery) // or is simply extraneous @@ -302,9 +306,9 @@ public class SimpleQueryParser extends QueryBuilder { } else if (state.data[state.index] == '"') { // if there are still characters after the closing ", check for a // tilde - if (state.length > (state.index + 1) && - state.data[state.index+1] == '~' && - (flags & NEAR_OPERATOR) != 0) { + if (state.length > (state.index + 1) + && state.data[state.index + 1] == '~' + && (flags & NEAR_OPERATOR) != 0) { state.index++; // check for characters after the tilde if (state.length > (state.index + 1)) { @@ -477,6 +481,7 @@ public class SimpleQueryParser extends QueryBuilder { /** * Helper parsing fuzziness from parsing state + * * @return slop/edit distance, 0 in the case of non-parsing slop/edit string */ private int parseFuzziness(State state) { @@ -498,7 +503,7 @@ public class SimpleQueryParser extends QueryBuilder { } int fuzziness = 0; try { - String fuzzyString = new String(slopText, 0, slopLength); + String fuzzyString = new String(slopText, 0, slopLength); if ("".equals(fuzzyString)) { // Use automatic fuzziness, ~2 fuzziness = 2; @@ -517,9 +522,7 @@ public class SimpleQueryParser extends QueryBuilder { return 0; } - /** - * Helper returning true if the state has reached the end of token. - */ + /** Helper returning true if the state has reached the end of token. */ private boolean tokenFinished(State state) { if ((state.data[state.index] == '"' && (flags & PHRASE_OPERATOR) != 0) || (state.data[state.index] == '|' && (flags & OR_OPERATOR) != 0) @@ -527,20 +530,19 @@ public class SimpleQueryParser extends QueryBuilder { || (state.data[state.index] == '(' && (flags & PRECEDENCE_OPERATORS) != 0) || (state.data[state.index] == ')' && (flags & PRECEDENCE_OPERATORS) != 0) || ((state.data[state.index] == ' ' - || state.data[state.index] == '\t' - || state.data[state.index] == '\n' - || state.data[state.index] == '\r') && (flags & WHITESPACE_OPERATOR) != 0)) { + || state.data[state.index] == '\t' + || state.data[state.index] == '\n' + || state.data[state.index] == '\r') + && (flags & WHITESPACE_OPERATOR) != 0)) { return true; } return false; } - /** - * Factory method to generate a standard query (no phrase or prefix operators). - */ + /** Factory method to generate a standard query (no phrase or prefix operators). */ protected Query newDefaultQuery(String text) { BooleanQuery.Builder bq = new BooleanQuery.Builder(); - for (Map.Entry entry : weights.entrySet()) { + for (Map.Entry entry : weights.entrySet()) { Query q = createBooleanQuery(entry.getKey(), text, defaultOperator); if (q != null) { float boost = entry.getValue(); @@ -553,12 +555,10 @@ public class SimpleQueryParser extends QueryBuilder { return simplify(bq.build()); } - /** - * Factory method to generate a fuzzy query. - */ + /** Factory method to generate a fuzzy query. */ protected Query newFuzzyQuery(String text, int fuzziness) { BooleanQuery.Builder bq = new BooleanQuery.Builder(); - for (Map.Entry entry : weights.entrySet()) { + for (Map.Entry entry : weights.entrySet()) { final String fieldName = entry.getKey(); final BytesRef term = getAnalyzer().normalize(fieldName, text); Query q = new FuzzyQuery(new Term(fieldName, term), fuzziness); @@ -571,12 +571,10 @@ public class SimpleQueryParser extends QueryBuilder { return simplify(bq.build()); } - /** - * Factory method to generate a phrase query with slop. - */ + /** Factory method to generate a phrase query with slop. */ protected Query newPhraseQuery(String text, int slop) { BooleanQuery.Builder bq = new BooleanQuery.Builder(); - for (Map.Entry entry : weights.entrySet()) { + for (Map.Entry entry : weights.entrySet()) { Query q = createPhraseQuery(entry.getKey(), text, slop); if (q != null) { float boost = entry.getValue(); @@ -589,12 +587,10 @@ public class SimpleQueryParser extends QueryBuilder { return simplify(bq.build()); } - /** - * Factory method to generate a prefix query. - */ + /** Factory method to generate a prefix query. */ protected Query newPrefixQuery(String text) { BooleanQuery.Builder bq = new BooleanQuery.Builder(); - for (Map.Entry entry : weights.entrySet()) { + for (Map.Entry entry : weights.entrySet()) { final String fieldName = entry.getKey(); final BytesRef term = getAnalyzer().normalize(fieldName, text); Query q = new PrefixQuery(new Term(fieldName, term)); @@ -607,9 +603,7 @@ public class SimpleQueryParser extends QueryBuilder { return simplify(bq.build()); } - /** - * Helper to simplify boolean queries with 0 or 1 clause - */ + /** Helper to simplify boolean queries with 0 or 1 clause */ protected Query simplify(BooleanQuery bq) { if (bq.clauses().isEmpty()) { return null; @@ -620,18 +614,12 @@ public class SimpleQueryParser extends QueryBuilder { } } - /** - * Returns the implicit operator setting, which will be - * either {@code SHOULD} or {@code MUST}. - */ + /** Returns the implicit operator setting, which will be either {@code SHOULD} or {@code MUST}. */ public BooleanClause.Occur getDefaultOperator() { return defaultOperator; } - /** - * Sets the implicit operator setting, which must be - * either {@code SHOULD} or {@code MUST}. - */ + /** Sets the implicit operator setting, which must be either {@code SHOULD} or {@code MUST}. */ public void setDefaultOperator(BooleanClause.Occur operator) { if (operator != BooleanClause.Occur.SHOULD && operator != BooleanClause.Occur.MUST) { throw new IllegalArgumentException("invalid operator: only SHOULD or MUST are allowed"); @@ -640,7 +628,7 @@ public class SimpleQueryParser extends QueryBuilder { } static class State { - final char[] data; // the characters in the query string + final char[] data; // the characters in the query string final char[] buffer; // a temporary buffer used to reduce necessary allocations int index; int length; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/package-info.java index 926ed1795d5..fe913bcdbd0 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/simple/package-info.java @@ -14,8 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * A simple query parser for human-entered queries. - */ + +/** A simple query parser for human-entered queries. */ package org.apache.lucene.queryparser.simple; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/package-info.java index 78a8e71b794..35c64ba116d 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/package-info.java @@ -14,12 +14,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * This package contains the QueryParser.jj source file for the Surround parser. - *

    - * Parsing the text of a query results in a SrndQuery in the + * + *

    Parsing the text of a query results in a SrndQuery in the * org.apache.lucene.queryparser.surround.query package. */ package org.apache.lucene.queryparser.surround.parser; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/AndQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/AndQuery.java index 2ed8d796024..3c20d8b24fc 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/AndQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/AndQuery.java @@ -15,21 +15,21 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.util.List; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.BooleanClause; -/** - * Factory for conjunctions - */ -public class AndQuery extends ComposedQuery { - public AndQuery(List queries, boolean inf, String opName) { +import java.util.List; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.Query; + +/** Factory for conjunctions */ +public class AndQuery extends ComposedQuery { + public AndQuery(List queries, boolean inf, String opName) { super(queries, inf, opName); } - + @Override public Query makeLuceneQueryFieldNoBoost(String fieldName, BasicQueryFactory qf) { - return SrndBooleanQuery.makeBooleanQuery( /* subqueries can be individually boosted */ - makeLuceneSubQueriesField(fieldName, qf), BooleanClause.Occur.MUST); + return SrndBooleanQuery.makeBooleanQuery( + /* subqueries can be individually boosted */ + makeLuceneSubQueriesField(fieldName, qf), BooleanClause.Occur.MUST); } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/BasicQueryFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/BasicQueryFactory.java index 4f01a2d36e1..fab0be888b9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/BasicQueryFactory.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/BasicQueryFactory.java @@ -23,7 +23,7 @@ package org.apache.lucene.queryparser.surround.query; * Use this class to limit the buffer usage for reading terms from an index. * Default is 1024, the same as the max. number of subqueries for a BooleanQuery. */ - + import org.apache.lucene.index.Term; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanTermQuery; @@ -34,23 +34,30 @@ public class BasicQueryFactory { this.maxBasicQueries = maxBasicQueries; this.queriesMade = 0; } - + public BasicQueryFactory() { this(1024); } - + private int maxBasicQueries; private int queriesMade; - - public int getNrQueriesMade() {return queriesMade;} - public int getMaxBasicQueries() {return maxBasicQueries;} - + + public int getNrQueriesMade() { + return queriesMade; + } + + public int getMaxBasicQueries() { + return maxBasicQueries; + } + @Override public String toString() { return getClass().getName() - + "(maxBasicQueries: " + maxBasicQueries - + ", queriesMade: " + queriesMade - + ")"; + + "(maxBasicQueries: " + + maxBasicQueries + + ", queriesMade: " + + queriesMade + + ")"; } private boolean atMax() { @@ -58,16 +65,15 @@ public class BasicQueryFactory { } protected synchronized void checkMax() throws TooManyBasicQueries { - if (atMax()) - throw new TooManyBasicQueries(getMaxBasicQueries()); + if (atMax()) throw new TooManyBasicQueries(getMaxBasicQueries()); queriesMade++; } - + public TermQuery newTermQuery(Term term) throws TooManyBasicQueries { checkMax(); return new TermQuery(term); } - + public SpanTermQuery newSpanTermQuery(Term term) throws TooManyBasicQueries { checkMax(); return new SpanTermQuery(term); @@ -75,19 +81,17 @@ public class BasicQueryFactory { @Override public int hashCode() { - return getClass().hashCode() ^ (atMax() ? 7 : 31*32); + return getClass().hashCode() ^ (atMax() ? 7 : 31 * 32); } - /** Two BasicQueryFactory's are equal when they generate - * the same types of basic queries, or both cannot generate queries anymore. + /** + * Two BasicQueryFactory's are equal when they generate the same types of basic queries, or both + * cannot generate queries anymore. */ @Override public boolean equals(Object obj) { - if (! (obj instanceof BasicQueryFactory)) - return false; + if (!(obj instanceof BasicQueryFactory)) return false; BasicQueryFactory other = (BasicQueryFactory) obj; return atMax() == other.atMax(); } } - - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/ComposedQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/ComposedQuery.java index e44c41fd029..e49da7928fc 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/ComposedQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/ComposedQuery.java @@ -15,45 +15,57 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.util.List; + import java.util.ArrayList; import java.util.Iterator; - +import java.util.List; import org.apache.lucene.search.Query; /** Base class for composite queries (such as AND/OR/NOT) */ -public abstract class ComposedQuery extends SrndQuery { - +public abstract class ComposedQuery extends SrndQuery { + public ComposedQuery(List qs, boolean operatorInfix, String opName) { recompose(qs); this.operatorInfix = operatorInfix; this.opName = opName; } - + protected void recompose(List queries) { - if (queries.size() < 2) throw new AssertionError("Too few subqueries"); + if (queries.size() < 2) throw new AssertionError("Too few subqueries"); this.queries = queries; } - + protected String opName; - public String getOperatorName() {return opName;} - + + public String getOperatorName() { + return opName; + } + protected List queries; - - public Iterator getSubQueriesIterator() {return queries.listIterator();} - public int getNrSubQueries() {return queries.size();} - - public SrndQuery getSubQuery(int qn) {return queries.get(qn);} + public Iterator getSubQueriesIterator() { + return queries.listIterator(); + } + + public int getNrSubQueries() { + return queries.size(); + } + + public SrndQuery getSubQuery(int qn) { + return queries.get(qn); + } + + private boolean operatorInfix; + + public boolean isOperatorInfix() { + return operatorInfix; + } /* else prefix operator */ - private boolean operatorInfix; - public boolean isOperatorInfix() { return operatorInfix; } /* else prefix operator */ - public List makeLuceneSubQueriesField(String fn, BasicQueryFactory qf) { List luceneSubQueries = new ArrayList<>(); Iterator sqi = getSubQueriesIterator(); while (sqi.hasNext()) { - luceneSubQueries.add( (sqi.next()).makeLuceneQueryField(fn, qf)); + luceneSubQueries.add((sqi.next()).makeLuceneQueryField(fn, qf)); } return luceneSubQueries; } @@ -71,10 +83,18 @@ public abstract class ComposedQuery extends SrndQuery { } /* Override for different spacing */ - protected String getPrefixSeparator() { return ", ";} - protected String getBracketOpen() { return "(";} - protected String getBracketClose() { return ")";} - + protected String getPrefixSeparator() { + return ", "; + } + + protected String getBracketOpen() { + return "("; + } + + protected String getBracketClose() { + return ")"; + } + protected void infixToString(StringBuilder r) { /* Brackets are possibly redundant in the result. */ Iterator sqi = getSubQueriesIterator(); @@ -104,8 +124,7 @@ public abstract class ComposedQuery extends SrndQuery { } r.append(getBracketClose()); } - - + @Override public boolean isFieldsSubQueryAcceptable() { /* at least one subquery should be acceptable */ @@ -118,4 +137,3 @@ public abstract class ComposedQuery extends SrndQuery { return false; } } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceQuery.java index 74fd3ae90b1..69b819f631b 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceQuery.java @@ -15,10 +15,10 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.util.List; -import java.util.Iterator; -import java.io.IOException; +import java.io.IOException; +import java.util.Iterator; +import java.util.List; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -28,23 +28,24 @@ import org.apache.lucene.search.spans.SpanQuery; /** Factory for NEAR queries */ public class DistanceQuery extends ComposedQuery implements DistanceSubQuery { public DistanceQuery( - List queries, - boolean infix, - int opDistance, - String opName, - boolean ordered) { + List queries, boolean infix, int opDistance, String opName, boolean ordered) { super(queries, infix, opName); this.opDistance = opDistance; /* the distance indicated in the operator */ this.ordered = ordered; } - private int opDistance; - public int getOpDistance() {return opDistance;} - + + public int getOpDistance() { + return opDistance; + } + private boolean ordered; - public boolean subQueriesOrdered() {return ordered;} - + + public boolean subQueriesOrdered() { + return ordered; + } + @Override public String distanceSubQueryNotAllowed() { Iterator sqi = getSubQueriesIterator(); @@ -54,7 +55,7 @@ public class DistanceQuery extends ComposedQuery implements DistanceSubQuery { DistanceSubQuery dsq = (DistanceSubQuery) leq; String m = dsq.distanceSubQueryNotAllowed(); if (m != null) { - return m; + return m; } } else { return "Operator " + getOperatorName() + " does not allow subquery " + leq.toString(); @@ -62,34 +63,33 @@ public class DistanceQuery extends ComposedQuery implements DistanceSubQuery { } return null; /* subqueries acceptable */ } - + @Override public void addSpanQueries(SpanNearClauseFactory sncf) throws IOException { - Query snq = getSpanNearQuery(sncf.getIndexReader(), - sncf.getFieldName(), - sncf.getBasicQueryFactory()); + Query snq = + getSpanNearQuery(sncf.getIndexReader(), sncf.getFieldName(), sncf.getBasicQueryFactory()); sncf.addSpanQuery(snq); } - - public Query getSpanNearQuery( - IndexReader reader, - String fieldName, - BasicQueryFactory qf) throws IOException { + + public Query getSpanNearQuery(IndexReader reader, String fieldName, BasicQueryFactory qf) + throws IOException { SpanQuery[] spanClauses = new SpanQuery[getNrSubQueries()]; Iterator sqi = getSubQueriesIterator(); int qi = 0; while (sqi.hasNext()) { SpanNearClauseFactory sncf = new SpanNearClauseFactory(reader, fieldName, qf); - - ((DistanceSubQuery)sqi.next()).addSpanQueries(sncf); - if (sncf.size() == 0) { /* distance operator requires all sub queries */ - while (sqi.hasNext()) { /* produce evt. error messages but ignore results */ - ((DistanceSubQuery)sqi.next()).addSpanQueries(sncf); + + ((DistanceSubQuery) sqi.next()).addSpanQueries(sncf); + if (sncf.size() == 0) { + /* distance operator requires all sub queries */ + while (sqi.hasNext()) { + /* produce evt. error messages but ignore results */ + ((DistanceSubQuery) sqi.next()).addSpanQueries(sncf); sncf.clear(); } return new MatchNoDocsQuery(); } - + spanClauses[qi] = sncf.makeSpanClause(); qi++; } @@ -102,4 +102,3 @@ public class DistanceQuery extends ComposedQuery implements DistanceSubQuery { return new DistanceRewriteQuery(this, fieldName, qf); } } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceRewriteQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceRewriteQuery.java index adbbd0a580b..d739bc022e6 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceRewriteQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceRewriteQuery.java @@ -17,17 +17,13 @@ package org.apache.lucene.queryparser.surround.query; import java.io.IOException; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; class DistanceRewriteQuery extends RewriteQuery { - DistanceRewriteQuery( - DistanceQuery srndQuery, - String fieldName, - BasicQueryFactory qf) { + DistanceRewriteQuery(DistanceQuery srndQuery, String fieldName, BasicQueryFactory qf) { super(srndQuery, fieldName, qf); } @@ -42,4 +38,3 @@ class DistanceRewriteQuery extends RewriteQuery { visitor.visitLeaf(this); } } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceSubQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceSubQuery.java index 2ec384d3f1d..93f68e5ee37 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceSubQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/DistanceSubQuery.java @@ -15,20 +15,18 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; + import java.io.IOException; -/** - * Interface for queries that can be nested as subqueries - * into a span near. - */ +/** Interface for queries that can be nested as subqueries into a span near. */ public interface DistanceSubQuery { - /** When distanceSubQueryNotAllowed() returns non null, the reason why the subquery - * is not allowed as a distance subquery is returned. - *
    When distanceSubQueryNotAllowed() returns null addSpanNearQueries() can be used - * in the creation of the span near clause for the subquery. + /** + * When distanceSubQueryNotAllowed() returns non null, the reason why the subquery is not allowed + * as a distance subquery is returned.
    + * When distanceSubQueryNotAllowed() returns null addSpanNearQueries() can be used in the creation + * of the span near clause for the subquery. */ String distanceSubQueryNotAllowed(); - + void addSpanQueries(SpanNearClauseFactory sncf) throws IOException; } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java index 4d933b7b0a4..0764020338a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java @@ -15,54 +15,55 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.util.ArrayList; -import java.util.List; -import java.util.Iterator; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; import org.apache.lucene.search.Query; -/** - * Forms an OR query of the provided query across multiple fields. - */ -public class FieldsQuery extends SrndQuery { /* mostly untested */ +/** Forms an OR query of the provided query across multiple fields. */ +public class FieldsQuery extends SrndQuery { + /* mostly untested */ private SrndQuery q; private List fieldNames; private final char fieldOp; - private static final String OR_OPERATOR_NAME = "OR"; /* for expanded queries, not normally visible */ - + private static final String OR_OPERATOR_NAME = + "OR"; /* for expanded queries, not normally visible */ + public FieldsQuery(SrndQuery q, List fieldNames, char fieldOp) { this.q = q; this.fieldNames = fieldNames; this.fieldOp = fieldOp; } - + public FieldsQuery(SrndQuery q, String fieldName, char fieldOp) { this.q = q; fieldNames = new ArrayList<>(); fieldNames.add(fieldName); this.fieldOp = fieldOp; } - + @Override public boolean isFieldsSubQueryAcceptable() { return false; } - + public Query makeLuceneQueryNoBoost(BasicQueryFactory qf) { - if (fieldNames.size() == 1) { /* single field name: no new queries needed */ + if (fieldNames.size() == 1) { + /* single field name: no new queries needed */ return q.makeLuceneQueryFieldNoBoost(fieldNames.get(0), qf); - } else { /* OR query over the fields */ + } else { + /* OR query over the fields */ List queries = new ArrayList<>(); Iterator fni = getFieldNames().listIterator(); SrndQuery qc; while (fni.hasNext()) { qc = q.clone(); - queries.add( new FieldsQuery( qc, fni.next(), fieldOp)); + queries.add(new FieldsQuery(qc, fni.next(), fieldOp)); } - OrQuery oq = new OrQuery(queries, - true /* infix OR for field names */, - OR_OPERATOR_NAME); - // System.out.println(getClass().toString() + ", fields expanded: " + oq.toString()); /* needs testing */ + OrQuery oq = new OrQuery(queries, true /* infix OR for field names */, OR_OPERATOR_NAME); + // System.out.println(getClass().toString() + ", fields expanded: " + oq.toString()); /* needs + // testing */ return oq.makeLuceneQueryField(null, qf); } } @@ -72,11 +73,14 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */ return makeLuceneQueryNoBoost(qf); /* use this.fieldNames instead of fieldName */ } - - public List getFieldNames() {return fieldNames;} + public List getFieldNames() { + return fieldNames; + } + + public char getFieldOperator() { + return fieldOp; + } - public char getFieldOperator() { return fieldOp;} - @Override public String toString() { StringBuilder r = new StringBuilder(); @@ -86,7 +90,7 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */ r.append(")"); return r.toString(); } - + protected void fieldNamesToString(StringBuilder r) { Iterator fni = getFieldNames().listIterator(); while (fni.hasNext()) { @@ -95,4 +99,3 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */ } } } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/NotQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/NotQuery.java index 897d3a8e385..23c20e5bdf5 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/NotQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/NotQuery.java @@ -15,27 +15,29 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.util.List; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.BooleanClause; -/** - * Factory for prohibited clauses - */ -public class NotQuery extends ComposedQuery { - public NotQuery(List queries, String opName) { super(queries, true /* infix */, opName); } - +import java.util.List; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Query; + +/** Factory for prohibited clauses */ +public class NotQuery extends ComposedQuery { + public NotQuery(List queries, String opName) { + super(queries, true /* infix */, opName); + } + @Override public Query makeLuceneQueryFieldNoBoost(String fieldName, BasicQueryFactory qf) { List luceneSubQueries = makeLuceneSubQueriesField(fieldName, qf); BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add( luceneSubQueries.get(0), BooleanClause.Occur.MUST); - SrndBooleanQuery.addQueriesToBoolean(bq, - // FIXME: do not allow weights on prohibited subqueries. - luceneSubQueries.subList(1, luceneSubQueries.size()), - // later subqueries: not required, prohibited - BooleanClause.Occur.MUST_NOT); + bq.add(luceneSubQueries.get(0), BooleanClause.Occur.MUST); + SrndBooleanQuery.addQueriesToBoolean( + bq, + // FIXME: do not allow weights on prohibited subqueries. + luceneSubQueries.subList(1, luceneSubQueries.size()), + // later subqueries: not required, prohibited + BooleanClause.Occur.MUST_NOT); return bq.build(); } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/OrQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/OrQuery.java index 76daa19d979..bdf71abc56a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/OrQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/OrQuery.java @@ -15,35 +15,33 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.util.List; -import java.util.Iterator; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.BooleanClause; import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.Query; -/** - * Factory for disjunctions - */ -public class OrQuery extends ComposedQuery implements DistanceSubQuery { +/** Factory for disjunctions */ +public class OrQuery extends ComposedQuery implements DistanceSubQuery { public OrQuery(List queries, boolean infix, String opName) { super(queries, infix, opName); } - + @Override public Query makeLuceneQueryFieldNoBoost(String fieldName, BasicQueryFactory qf) { return SrndBooleanQuery.makeBooleanQuery( - /* subqueries can be individually boosted */ - makeLuceneSubQueriesField(fieldName, qf), BooleanClause.Occur.SHOULD); + /* subqueries can be individually boosted */ + makeLuceneSubQueriesField(fieldName, qf), BooleanClause.Occur.SHOULD); } - + @Override public String distanceSubQueryNotAllowed() { Iterator sqi = getSubQueriesIterator(); while (sqi.hasNext()) { SrndQuery leq = sqi.next(); if (leq instanceof DistanceSubQuery) { - String m = ((DistanceSubQuery)leq).distanceSubQueryNotAllowed(); + String m = ((DistanceSubQuery) leq).distanceSubQueryNotAllowed(); if (m != null) { return m; } @@ -53,7 +51,7 @@ public class OrQuery extends ComposedQuery implements DistanceSubQuery { } return null; } - + @Override public void addSpanQueries(SpanNearClauseFactory sncf) throws IOException { Iterator sqi = getSubQueriesIterator(); @@ -63,4 +61,3 @@ public class OrQuery extends ComposedQuery implements DistanceSubQuery { } } } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/RewriteQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/RewriteQuery.java index 438535ff5fe..69005e6dd00 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/RewriteQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/RewriteQuery.java @@ -15,9 +15,9 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; + import java.io.IOException; import java.util.Objects; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; @@ -26,46 +26,41 @@ abstract class RewriteQuery extends Query { protected final String fieldName; protected final BasicQueryFactory qf; - RewriteQuery( - SQ srndQuery, - String fieldName, - BasicQueryFactory qf) { + RewriteQuery(SQ srndQuery, String fieldName, BasicQueryFactory qf) { this.srndQuery = Objects.requireNonNull(srndQuery); this.fieldName = Objects.requireNonNull(fieldName); this.qf = Objects.requireNonNull(qf); } @Override - abstract public Query rewrite(IndexReader reader) throws IOException; + public abstract Query rewrite(IndexReader reader) throws IOException; @Override public String toString(String field) { return getClass().getName() - + (field.isEmpty() ? "" : "(unused: " + field + ")") - + "(" + fieldName - + ", " + srndQuery.toString() - + ", " + qf.toString() - + ")"; + + (field.isEmpty() ? "" : "(unused: " + field + ")") + + "(" + + fieldName + + ", " + + srndQuery.toString() + + ", " + + qf.toString() + + ")"; } @Override public int hashCode() { - return classHash() - ^ fieldName.hashCode() - ^ qf.hashCode() - ^ srndQuery.hashCode(); + return classHash() ^ fieldName.hashCode() ^ qf.hashCode() ^ srndQuery.hashCode(); } @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } private boolean equalsTo(RewriteQuery other) { - return fieldName.equals(other.fieldName) && - qf.equals(other.qf) && - srndQuery.equals(other.srndQuery); + return fieldName.equals(other.fieldName) + && qf.equals(other.qf) + && srndQuery.equals(other.srndQuery); } } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTerm.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTerm.java index f574feb9a3b..81219501b06 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTerm.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTerm.java @@ -15,41 +15,48 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.io.IOException; +import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; -/** - * Base class for queries that expand to sets of simple terms. - */ -public abstract class SimpleTerm - extends SrndQuery - implements DistanceSubQuery, Comparable -{ - public SimpleTerm(boolean q) {quoted = q;} - +/** Base class for queries that expand to sets of simple terms. */ +public abstract class SimpleTerm extends SrndQuery + implements DistanceSubQuery, Comparable { + public SimpleTerm(boolean q) { + quoted = q; + } + private boolean quoted; - boolean isQuoted() {return quoted;} - - public String getQuote() {return "\"";} - public String getFieldOperator() {return "/";} - + + boolean isQuoted() { + return quoted; + } + + public String getQuote() { + return "\""; + } + + public String getFieldOperator() { + return "/"; + } + public abstract String toStringUnquoted(); - /** @deprecated (March 2011) Not normally used, to be removed from Lucene 4.0. - * This class implementing Comparable is to be removed at the same time. + /** + * @deprecated (March 2011) Not normally used, to be removed from Lucene 4.0. This class + * implementing Comparable is to be removed at the same time. */ @Override @Deprecated public int compareTo(SimpleTerm ost) { /* for ordering terms and prefixes before using an index, not used */ - return this.toStringUnquoted().compareTo( ost.toStringUnquoted()); + return this.toStringUnquoted().compareTo(ost.toStringUnquoted()); } - + protected void suffixToString(StringBuilder r) {} /* override for prefix query */ - + @Override public String toString() { StringBuilder r = new StringBuilder(); @@ -64,34 +71,31 @@ public abstract class SimpleTerm weightToString(r); return r.toString(); } - + public abstract void visitMatchingTerms( - IndexReader reader, - String fieldName, - MatchingTermVisitor mtv) throws IOException; - - /** - * Callback to visit each matching term during "rewrite" - * in {@link #visitMatchingTerm(Term)} - */ + IndexReader reader, String fieldName, MatchingTermVisitor mtv) throws IOException; + + /** Callback to visit each matching term during "rewrite" in {@link #visitMatchingTerm(Term)} */ public interface MatchingTermVisitor { - void visitMatchingTerm(Term t)throws IOException; + void visitMatchingTerm(Term t) throws IOException; } @Override - public String distanceSubQueryNotAllowed() {return null;} - + public String distanceSubQueryNotAllowed() { + return null; + } + @Override public void addSpanQueries(final SpanNearClauseFactory sncf) throws IOException { visitMatchingTerms( - sncf.getIndexReader(), - sncf.getFieldName(), - new MatchingTermVisitor() { - @Override - public void visitMatchingTerm(Term term) throws IOException { - sncf.addTermWeighted(term, getWeight()); - } - }); + sncf.getIndexReader(), + sncf.getFieldName(), + new MatchingTermVisitor() { + @Override + public void visitMatchingTerm(Term term) throws IOException { + sncf.addTermWeighted(term, getWeight()); + } + }); } @Override @@ -99,6 +103,3 @@ public abstract class SimpleTerm return new SimpleTermRewriteQuery(this, fieldName, qf); } } - - - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java index 50203a8af94..22f1118beb4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java @@ -19,7 +19,6 @@ package org.apache.lucene.queryparser.surround.query; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; @@ -29,28 +28,29 @@ import org.apache.lucene.search.QueryVisitor; class SimpleTermRewriteQuery extends RewriteQuery { - SimpleTermRewriteQuery( - SimpleTerm srndQuery, - String fieldName, - BasicQueryFactory qf) { + SimpleTermRewriteQuery(SimpleTerm srndQuery, String fieldName, BasicQueryFactory qf) { super(srndQuery, fieldName, qf); } @Override public Query rewrite(IndexReader reader) throws IOException { final List luceneSubQueries = new ArrayList<>(); - srndQuery.visitMatchingTerms(reader, fieldName, - new SimpleTerm.MatchingTermVisitor() { - @Override - public void visitMatchingTerm(Term term) throws IOException { - luceneSubQueries.add(qf.newTermQuery(term)); - } - }); - return (luceneSubQueries.size() == 0) ? new MatchNoDocsQuery() - : (luceneSubQueries.size() == 1) ? luceneSubQueries.get(0) - : SrndBooleanQuery.makeBooleanQuery( - /* luceneSubQueries all have default weight */ - luceneSubQueries, BooleanClause.Occur.SHOULD); /* OR the subquery terms */ + srndQuery.visitMatchingTerms( + reader, + fieldName, + new SimpleTerm.MatchingTermVisitor() { + @Override + public void visitMatchingTerm(Term term) throws IOException { + luceneSubQueries.add(qf.newTermQuery(term)); + } + }); + return (luceneSubQueries.size() == 0) + ? new MatchNoDocsQuery() + : (luceneSubQueries.size() == 1) + ? luceneSubQueries.get(0) + : SrndBooleanQuery.makeBooleanQuery( + /* luceneSubQueries all have default weight */ + luceneSubQueries, BooleanClause.Occur.SHOULD); /* OR the subquery terms */ } @Override @@ -59,4 +59,3 @@ class SimpleTermRewriteQuery extends RewriteQuery { visitor.visitLeaf(this); } } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java index 2db315f617f..682d16c4906 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java @@ -25,8 +25,8 @@ Operations: - add a weighted Term this should add a corresponding SpanTermQuery, or increase the weight of an existing one. - -- add a weighted subquery SpanNearQuery + +- add a weighted subquery SpanNearQuery - create a clause for SpanNearQuery from the things added above. For this, create an array of SpanQuery's from the added ones. @@ -35,26 +35,25 @@ Operations: */ /* When it is necessary to suppress double subqueries as much as possible: - hashCode() and equals() on unweighted SpanQuery are needed (possibly via getTerms(), - the terms are individually hashable). - Idem SpanNearQuery: hash on the subqueries and the slop. - Evt. merge SpanNearQuery's by adding the weights of the corresponding subqueries. - */ - + hashCode() and equals() on unweighted SpanQuery are needed (possibly via getTerms(), + the terms are individually hashable). + Idem SpanNearQuery: hash on the subqueries and the slop. + Evt. merge SpanNearQuery's by adding the weights of the corresponding subqueries. +*/ + /* To be determined: - Are SpanQuery weights handled correctly during search by Lucene? - Should the resulting SpanOrQuery be sorted? - Could other SpanQueries be added for use in this factory: - - SpanOrQuery: in principle yes, but it only has access to its terms - via getTerms(); are the corresponding weights available? - - SpanFirstQuery: treat similar to subquery SpanNearQuery. (ok?) - - SpanNotQuery: treat similar to subquery SpanNearQuery. (ok?) - */ + Are SpanQuery weights handled correctly during search by Lucene? + Should the resulting SpanOrQuery be sorted? + Could other SpanQueries be added for use in this factory: + - SpanOrQuery: in principle yes, but it only has access to its terms + via getTerms(); are the corresponding weights available? + - SpanFirstQuery: treat similar to subquery SpanNearQuery. (ok?) + - SpanNotQuery: treat similar to subquery SpanNearQuery. (ok?) +*/ import java.io.IOException; import java.util.HashMap; import java.util.Iterator; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.MatchNoDocsQuery; @@ -64,10 +63,7 @@ import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; - -/** - * Factory for {@link SpanOrQuery} - */ +/** Factory for {@link SpanOrQuery} */ public class SpanNearClauseFactory { // FIXME: rename to SpanClauseFactory public SpanNearClauseFactory(IndexReader reader, String fieldName, BasicQueryFactory qf) { this.reader = reader; @@ -75,40 +71,48 @@ public class SpanNearClauseFactory { // FIXME: rename to SpanClauseFactory this.weightBySpanQuery = new HashMap<>(); this.qf = qf; } + private IndexReader reader; private String fieldName; private HashMap weightBySpanQuery; private BasicQueryFactory qf; - - public IndexReader getIndexReader() {return reader;} - - public String getFieldName() {return fieldName;} - public BasicQueryFactory getBasicQueryFactory() {return qf;} - - public int size() {return weightBySpanQuery.size();} - - public void clear() {weightBySpanQuery.clear();} + public IndexReader getIndexReader() { + return reader; + } + + public String getFieldName() { + return fieldName; + } + + public BasicQueryFactory getBasicQueryFactory() { + return qf; + } + + public int size() { + return weightBySpanQuery.size(); + } + + public void clear() { + weightBySpanQuery.clear(); + } protected void addSpanQueryWeighted(SpanQuery sq, float weight) { Float w = weightBySpanQuery.get(sq); - if (w != null) - w = Float.valueOf(w.floatValue() + weight); - else - w = Float.valueOf(weight); - weightBySpanQuery.put(sq, w); + if (w != null) w = Float.valueOf(w.floatValue() + weight); + else w = Float.valueOf(weight); + weightBySpanQuery.put(sq, w); } - - public void addTermWeighted(Term t, float weight) throws IOException { + + public void addTermWeighted(Term t, float weight) throws IOException { SpanTermQuery stq = qf.newSpanTermQuery(t); /* CHECKME: wrap in Hashable...? */ addSpanQueryWeighted(stq, weight); } public void addSpanQuery(Query q) { - if (q.getClass() == MatchNoDocsQuery.class) - return; - if (! (q instanceof SpanQuery)) + if (q.getClass() == MatchNoDocsQuery.class) return; + if (!(q instanceof SpanQuery)) throw new AssertionError("Expected SpanQuery: " + q.toString(getFieldName())); float boost = 1f; if (q instanceof SpanBoostQuery) { @@ -116,11 +120,11 @@ public class SpanNearClauseFactory { // FIXME: rename to SpanClauseFactory boost = bq.getBoost(); q = bq.getQuery(); } - addSpanQueryWeighted((SpanQuery)q, boost); + addSpanQueryWeighted((SpanQuery) q, boost); } public SpanQuery makeSpanClause() { - SpanQuery [] spanQueries = new SpanQuery[size()]; + SpanQuery[] spanQueries = new SpanQuery[size()]; Iterator sqi = weightBySpanQuery.keySet().iterator(); int i = 0; while (sqi.hasNext()) { @@ -131,11 +135,8 @@ public class SpanNearClauseFactory { // FIXME: rename to SpanClauseFactory } spanQueries[i++] = sq; } - - if (spanQueries.length == 1) - return spanQueries[0]; - else - return new SpanOrQuery(spanQueries); + + if (spanQueries.length == 1) return spanQueries[0]; + else return new SpanOrQuery(spanQueries); } } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndBooleanQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndBooleanQuery.java index 501034edd99..4741e7ce4ff 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndBooleanQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndBooleanQuery.java @@ -15,25 +15,21 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.util.List; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.BooleanQuery; +import java.util.List; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Query; class SrndBooleanQuery { public static void addQueriesToBoolean( - BooleanQuery.Builder bq, - List queries, - BooleanClause.Occur occur) { + BooleanQuery.Builder bq, List queries, BooleanClause.Occur occur) { for (int i = 0; i < queries.size(); i++) { - bq.add( queries.get(i), occur); + bq.add(queries.get(i), occur); } } - - public static Query makeBooleanQuery( - List queries, - BooleanClause.Occur occur) { + + public static Query makeBooleanQuery(List queries, BooleanClause.Occur occur) { if (queries.size() <= 1) { throw new AssertionError("Too few subqueries: " + queries.size()); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java index b83b525f60e..7a5754e99e1 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java @@ -15,22 +15,20 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; + +import java.io.IOException; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.index.IndexReader; -import java.io.IOException; - - -/** - * Query that matches String prefixes - */ +/** Query that matches String prefixes */ public class SrndPrefixQuery extends SimpleTerm { private final BytesRef prefixRef; + public SrndPrefixQuery(String prefix, boolean quoted, char truncator) { super(quoted); this.prefix = prefix; @@ -39,27 +37,34 @@ public class SrndPrefixQuery extends SimpleTerm { } private final String prefix; - public String getPrefix() {return prefix;} - + + public String getPrefix() { + return prefix; + } + private final char truncator; - public char getSuffixOperator() {return truncator;} - + + public char getSuffixOperator() { + return truncator; + } + public Term getLucenePrefixTerm(String fieldName) { return new Term(fieldName, getPrefix()); } - + @Override - public String toStringUnquoted() {return getPrefix();} - + public String toStringUnquoted() { + return getPrefix(); + } + @Override - protected void suffixToString(StringBuilder r) {r.append(getSuffixOperator());} - + protected void suffixToString(StringBuilder r) { + r.append(getSuffixOperator()); + } + @Override - public void visitMatchingTerms( - IndexReader reader, - String fieldName, - MatchingTermVisitor mtv) throws IOException - { + public void visitMatchingTerms(IndexReader reader, String fieldName, MatchingTermVisitor mtv) + throws IOException { /* inspired by PrefixQuery.rewrite(): */ Terms terms = MultiTerms.getTerms(reader, fieldName); if (terms != null) { @@ -81,7 +86,7 @@ public class SrndPrefixQuery extends SimpleTerm { } if (!skip) { - while(true) { + while (true) { BytesRef text = termsEnum.next(); if (text != null && StringHelper.startsWith(text, prefixRef)) { mtv.visitMatchingTerm(new Term(fieldName, text.utf8ToString())); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndQuery.java index 4ecb30f6e5f..9719b1dafc9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndQuery.java @@ -15,83 +15,94 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; + import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; /** Lowest level base class for surround queries */ public abstract class SrndQuery implements Cloneable { public SrndQuery() {} - + private float weight = (float) 1.0; private boolean weighted = false; public void setWeight(float w) { weight = w; /* as parsed from the query text */ weighted = true; - } - public boolean isWeighted() {return weighted;} - public float getWeight() { return weight; } - public String getWeightString() {return Float.toString(getWeight());} + } - public String getWeightOperator() {return "^";} + public boolean isWeighted() { + return weighted; + } - protected void weightToString(StringBuilder r) { /* append the weight part of a query */ + public float getWeight() { + return weight; + } + + public String getWeightString() { + return Float.toString(getWeight()); + } + + public String getWeightOperator() { + return "^"; + } + + protected void weightToString(StringBuilder r) { + /* append the weight part of a query */ if (isWeighted()) { r.append(getWeightOperator()); r.append(getWeightString()); } } - - public Query makeLuceneQueryField(String fieldName, BasicQueryFactory qf){ + + public Query makeLuceneQueryField(String fieldName, BasicQueryFactory qf) { Query q = makeLuceneQueryFieldNoBoost(fieldName, qf); if (isWeighted()) { q = new BoostQuery(q, getWeight()); /* weight may be at any level in a SrndQuery */ } return q; } - + public abstract Query makeLuceneQueryFieldNoBoost(String fieldName, BasicQueryFactory qf); - - /** This method is used by {@link #hashCode()} and {@link #equals(Object)}, - * see LUCENE-2945. - */ + + /** This method is used by {@link #hashCode()} and {@link #equals(Object)}, see LUCENE-2945. */ @Override public abstract String toString(); - - public boolean isFieldsSubQueryAcceptable() {return true;} - + + public boolean isFieldsSubQueryAcceptable() { + return true; + } + @Override public SrndQuery clone() { try { - return (SrndQuery)super.clone(); + return (SrndQuery) super.clone(); } catch (CloneNotSupportedException cns) { throw new Error(cns); } } - /** For subclasses of {@link SrndQuery} within the package - * {@link org.apache.lucene.queryparser.surround.query} - * it is not necessary to override this method, - * @see #toString() + /** + * For subclasses of {@link SrndQuery} within the package {@link + * org.apache.lucene.queryparser.surround.query} it is not necessary to override this method, + * + * @see #toString() */ @Override public int hashCode() { return getClass().hashCode() ^ toString().hashCode(); } - /** For subclasses of {@link SrndQuery} within the package - * {@link org.apache.lucene.queryparser.surround.query} - * it is not necessary to override this method, - * @see #toString() + /** + * For subclasses of {@link SrndQuery} within the package {@link + * org.apache.lucene.queryparser.surround.query} it is not necessary to override this method, + * + * @see #toString() */ @Override public boolean equals(Object obj) { - if (obj == null) - return false; - if (! getClass().equals(obj.getClass())) - return false; + if (obj == null) return false; + if (!getClass().equals(obj.getClass())) return false; return toString().equals(obj.toString()); } - } - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java index fc83bd110a0..c7cd51d6e32 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java @@ -15,19 +15,16 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.io.IOException; +import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; - -/** - * Simple single-term clause - */ +/** Simple single-term clause */ public class SrndTermQuery extends SimpleTerm { public SrndTermQuery(String termText, boolean quoted) { super(quoted); @@ -35,21 +32,23 @@ public class SrndTermQuery extends SimpleTerm { } private final String termText; - public String getTermText() {return termText;} - + + public String getTermText() { + return termText; + } + public Term getLuceneTerm(String fieldName) { return new Term(fieldName, getTermText()); } - + @Override - public String toStringUnquoted() {return getTermText();} - + public String toStringUnquoted() { + return getTermText(); + } + @Override - public void visitMatchingTerms( - IndexReader reader, - String fieldName, - MatchingTermVisitor mtv) throws IOException - { + public void visitMatchingTerms(IndexReader reader, String fieldName, MatchingTermVisitor mtv) + throws IOException { /* check term presence in index here for symmetry with other SimpleTerm's */ Terms terms = MultiTerms.getTerms(reader, fieldName); if (terms != null) { @@ -62,6 +61,3 @@ public class SrndTermQuery extends SimpleTerm { } } } - - - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java index e96e80e8492..ce7f19c4ce4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java @@ -15,22 +15,19 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import org.apache.lucene.index.MultiTerms; -import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.index.Terms; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.StringHelper; -import org.apache.lucene.index.IndexReader; import java.io.IOException; - -import java.util.regex.Pattern; import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.MultiTerms; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; -/** - * Query that matches wildcards - */ +/** Query that matches wildcards */ public class SrndTruncQuery extends SimpleTerm { public SrndTruncQuery(String truncated, char unlimited, char mask) { super(false); /* not quoted */ @@ -39,35 +36,34 @@ public class SrndTruncQuery extends SimpleTerm { this.mask = mask; truncatedToPrefixAndPattern(); } - + private final String truncated; private final char unlimited; private final char mask; - + private String prefix; private BytesRef prefixRef; private Pattern pattern; - - - public String getTruncated() {return truncated;} - - @Override - public String toStringUnquoted() {return getTruncated();} - + public String getTruncated() { + return truncated; + } + + @Override + public String toStringUnquoted() { + return getTruncated(); + } + protected boolean matchingChar(char c) { return (c != unlimited) && (c != mask); } protected void appendRegExpForChar(char c, StringBuilder re) { - if (c == unlimited) - re.append(".*"); - else if (c == mask) - re.append("."); - else - re.append(c); + if (c == unlimited) re.append(".*"); + else if (c == mask) re.append("."); + else re.append(c); } - + protected void truncatedToPrefixAndPattern() { int i = 0; while ((i < truncated.length()) && matchingChar(truncated.charAt(i))) { @@ -75,7 +71,7 @@ public class SrndTruncQuery extends SimpleTerm { } prefix = truncated.substring(0, i); prefixRef = new BytesRef(prefix); - + StringBuilder re = new StringBuilder(); while (i < truncated.length()) { appendRegExpForChar(truncated.charAt(i), re); @@ -83,13 +79,10 @@ public class SrndTruncQuery extends SimpleTerm { } pattern = Pattern.compile(re.toString()); } - + @Override - public void visitMatchingTerms( - IndexReader reader, - String fieldName, - MatchingTermVisitor mtv) throws IOException - { + public void visitMatchingTerms(IndexReader reader, String fieldName, MatchingTermVisitor mtv) + throws IOException { int prefixLength = prefix.length(); Terms terms = MultiTerms.getTerms(reader, fieldName); if (terms != null) { @@ -107,7 +100,7 @@ public class SrndTruncQuery extends SimpleTerm { text = null; } - while(text != null) { + while (text != null) { if (text != null && StringHelper.startsWith(text, prefixRef)) { String textString = text.utf8ToString(); matcher.reset(textString.substring(prefixLength)); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/TooManyBasicQueries.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/TooManyBasicQueries.java index cce7b87e16e..613946ff8ee 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/TooManyBasicQueries.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/TooManyBasicQueries.java @@ -15,16 +15,14 @@ * limitations under the License. */ package org.apache.lucene.queryparser.surround.query; -import java.io.IOException; /* subclass to be usable from within Query.rewrite() */ -/** - * Exception thrown when {@link BasicQueryFactory} would exceed the limit - * of query clauses. - */ +import java.io.IOException; + +/* subclass to be usable from within Query.rewrite() */ + +/** Exception thrown when {@link BasicQueryFactory} would exceed the limit of query clauses. */ public class TooManyBasicQueries extends IOException { public TooManyBasicQueries(int maxBasicQueries) { super("Exceeded maximum of " + maxBasicQueries + " basic queries."); } } - - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/package-info.java index d0c7fae8c0e..1604bcee809 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/package-info.java @@ -14,16 +14,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * This package contains SrndQuery and its subclasses. - *

    - * The parser in the org.apache.lucene.queryparser.surround.parser package - * normally generates a SrndQuery. - *

    - * For searching an org.apache.lucene.search.Query is provided by - * the SrndQuery.makeLuceneQueryField method. - * For this, TermQuery, BooleanQuery and SpanQuery are used from Lucene. + +/** + * This package contains SrndQuery and its subclasses. + * + *

    The parser in the org.apache.lucene.queryparser.surround.parser package normally generates a + * SrndQuery. + * + *

    For searching an org.apache.lucene.search.Query is provided by the + * SrndQuery.makeLuceneQueryField method. For this, TermQuery, BooleanQuery and SpanQuery are used + * from Lucene. */ package org.apache.lucene.queryparser.surround.query; - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java index b4703120dd5..9db8d04a6a9 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java @@ -16,6 +16,12 @@ */ package org.apache.lucene.queryparser.xml; +import java.io.InputStream; +import java.util.Locale; +import javax.xml.XMLConstants; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.queryparser.xml.builders.*; @@ -27,29 +33,18 @@ import org.xml.sax.EntityResolver; import org.xml.sax.ErrorHandler; import org.xml.sax.SAXException; -import javax.xml.XMLConstants; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; - -import java.io.InputStream; -import java.util.Locale; - -/** - * Assembles a QueryBuilder which uses only core Lucene Query objects - */ +/** Assembles a QueryBuilder which uses only core Lucene Query objects */ public class CoreParser implements QueryBuilder, SpanQueryBuilder { protected String defaultField; protected Analyzer analyzer; protected QueryParser parser; protected QueryBuilderFactory queryFactory; - final protected SpanQueryBuilderFactory spanFactory; - + protected final SpanQueryBuilderFactory spanFactory; /** - * Construct an XML parser that uses a single instance QueryParser for handling - * UserQuery tags - all parse operations are synchronised on this parser + * Construct an XML parser that uses a single instance QueryParser for handling UserQuery tags - + * all parse operations are synchronised on this parser * * @param parser A QueryParser which will be synchronized on during parse calls. */ @@ -122,8 +117,8 @@ public class CoreParser implements QueryBuilder, SpanQueryBuilder { } /** - * Parses the given stream as XML file and returns a {@link Query}. - * By default this disallows external entities for security reasons. + * Parses the given stream as XML file and returns a {@link Query}. By default this disallows + * external entities for security reasons. */ public Query parse(InputStream xmlStream) throws ParserException { return getQuery(parseXML(xmlStream).getDocumentElement()); @@ -148,19 +143,19 @@ public class CoreParser implements QueryBuilder, SpanQueryBuilder { } /** - * Returns a SAX {@link EntityResolver} to be used by {@link DocumentBuilder}. - * By default this returns {@link #DISALLOW_EXTERNAL_ENTITY_RESOLVER}, which disallows the - * expansion of external entities (for security reasons). To restore legacy behavior, - * override this method to return {@code null}. + * Returns a SAX {@link EntityResolver} to be used by {@link DocumentBuilder}. By default this + * returns {@link #DISALLOW_EXTERNAL_ENTITY_RESOLVER}, which disallows the expansion of external + * entities (for security reasons). To restore legacy behavior, override this method to return + * {@code null}. */ protected EntityResolver getEntityResolver() { return DISALLOW_EXTERNAL_ENTITY_RESOLVER; } /** - * Subclass and override to return a SAX {@link ErrorHandler} to be used by {@link DocumentBuilder}. - * By default this returns {@code null} so no error handler is used. - * This method can be used to redirect XML parse errors/warnings to a custom logger. + * Subclass and override to return a SAX {@link ErrorHandler} to be used by {@link + * DocumentBuilder}. By default this returns {@code null} so no error handler is used. This method + * can be used to redirect XML parse errors/warnings to a custom logger. */ protected ErrorHandler getErrorHandler() { return null; @@ -199,10 +194,13 @@ public class CoreParser implements QueryBuilder, SpanQueryBuilder { return spanFactory.getSpanQuery(e); } - public static final EntityResolver DISALLOW_EXTERNAL_ENTITY_RESOLVER = (String publicId, String systemId) -> { - throw new SAXException(String.format(Locale.ENGLISH, - "External Entity resolving unsupported: publicId=\"%s\" systemId=\"%s\"", - publicId, systemId)); - }; - + public static final EntityResolver DISALLOW_EXTERNAL_ENTITY_RESOLVER = + (String publicId, String systemId) -> { + throw new SAXException( + String.format( + Locale.ENGLISH, + "External Entity resolving unsupported: publicId=\"%s\" systemId=\"%s\"", + publicId, + systemId)); + }; } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java index c366ee9e1f5..fa103e4ac47 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java @@ -21,15 +21,14 @@ import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.queryparser.xml.builders.FuzzyLikeThisQueryBuilder; /** - * Assembles a QueryBuilder which uses Query objects from - * Lucene's sandbox and queries - * modules in addition to core queries. + * Assembles a QueryBuilder which uses Query objects from Lucene's sandbox and + * queries modules in addition to core queries. */ public class CorePlusExtensionsParser extends CorePlusQueriesParser { /** - * Construct an XML parser that uses a single instance QueryParser for handling - * UserQuery tags - all parse operations are synchronized on this parser + * Construct an XML parser that uses a single instance QueryParser for handling UserQuery tags - + * all parse operations are synchronized on this parser * * @param parser A QueryParser which will be synchronized on during parse calls. */ @@ -49,6 +48,5 @@ public class CorePlusExtensionsParser extends CorePlusQueriesParser { private CorePlusExtensionsParser(String defaultField, Analyzer analyzer, QueryParser parser) { super(defaultField, analyzer, parser); queryFactory.addBuilder("FuzzyLikeThisQuery", new FuzzyLikeThisQueryBuilder(analyzer)); - } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusQueriesParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusQueriesParser.java index 3243928d617..cb7c8ad12ba 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusQueriesParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusQueriesParser.java @@ -21,14 +21,14 @@ import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.queryparser.xml.builders.LikeThisQueryBuilder; /** - * Assembles a QueryBuilder which uses Query objects from - * Lucene's queries module in addition to core queries. + * Assembles a QueryBuilder which uses Query objects from Lucene's queries module in + * addition to core queries. */ public class CorePlusQueriesParser extends CoreParser { /** - * Construct an XML parser that uses a single instance QueryParser for handling - * UserQuery tags - all parse operations are synchronized on this parser + * Construct an XML parser that uses a single instance QueryParser for handling UserQuery tags - + * all parse operations are synchronized on this parser * * @param parser A QueryParser which will be synchronized on during parse calls. */ @@ -49,6 +49,5 @@ public class CorePlusQueriesParser extends CoreParser { super(defaultField, analyzer, parser); String fields[] = {"contents"}; queryFactory.addBuilder("LikeThisQuery", new LikeThisQueryBuilder(analyzer, fields)); - } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/DOMUtils.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/DOMUtils.java index 5d98fbaec74..7376125b823 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/DOMUtils.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/DOMUtils.java @@ -16,24 +16,21 @@ */ package org.apache.lucene.queryparser.xml; +import java.io.Reader; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.xml.sax.InputSource; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import java.io.Reader; -/** - * Helper methods for parsing XML - */ +/** Helper methods for parsing XML */ public class DOMUtils { public static Element getChildByTagOrFail(Element e, String name) throws ParserException { Element kid = getChildByTagName(e, name); if (null == kid) { - throw new ParserException(e.getTagName() + " missing \"" + name - + "\" child element"); + throw new ParserException(e.getTagName() + " missing \"" + name + "\" child element"); } return kid; } @@ -41,8 +38,7 @@ public class DOMUtils { public static Element getFirstChildOrFail(Element e) throws ParserException { Element kid = getFirstChildElement(e); if (null == kid) { - throw new ParserException(e.getTagName() - + " does not contain a child element"); + throw new ParserException(e.getTagName() + " does not contain a child element"); } return kid; } @@ -50,25 +46,23 @@ public class DOMUtils { public static String getAttributeOrFail(Element e, String name) throws ParserException { String v = e.getAttribute(name); if (null == v) { - throw new ParserException(e.getTagName() + " missing \"" + name - + "\" attribute"); + throw new ParserException(e.getTagName() + " missing \"" + name + "\" attribute"); } return v; } - public static String getAttributeWithInheritanceOrFail(Element e, String name) throws ParserException { + public static String getAttributeWithInheritanceOrFail(Element e, String name) + throws ParserException { String v = getAttributeWithInheritance(e, name); if (null == v) { - throw new ParserException(e.getTagName() + " missing \"" + name - + "\" attribute"); + throw new ParserException(e.getTagName() + " missing \"" + name + "\" attribute"); } return v; } public static String getNonBlankTextOrFail(Element e) throws ParserException { String v = getText(e); - if (null != v) - v = v.trim(); + if (null != v) v = v.trim(); if (null == v || 0 == v.length()) { throw new ParserException(e.getTagName() + " has no text"); } @@ -101,12 +95,11 @@ public class DOMUtils { Element parent = (Element) n; return getAttributeWithInheritance(parent, attributeName); } - return null; //we reached the top level of the document without finding attribute + return null; // we reached the top level of the document without finding attribute } return result; } - /* Convenience method where there is only one child Element of a given name */ public static String getChildTextByTagName(Element e, String tagName) { Element child = getChildByTagName(e, tagName); @@ -138,14 +131,13 @@ public class DOMUtils { return (result == null) || ("".equals(result)) ? deflt : Integer.parseInt(result); } - public static boolean getAttribute(Element element, String attributeName, - boolean deflt) { + public static boolean getAttribute(Element element, String attributeName, boolean deflt) { String result = element.getAttribute(attributeName); return (result == null) || ("".equals(result)) ? deflt : Boolean.valueOf(result); } /* Returns text of node and all child nodes - without markup */ - //MH changed to Node from Element 25/11/2005 + // MH changed to Node from Element 25/11/2005 public static String getText(Node e) { StringBuilder sb = new StringBuilder(); @@ -165,18 +157,21 @@ public class DOMUtils { private static void getTextBuffer(Node e, StringBuilder sb) { for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling()) { switch (kid.getNodeType()) { - case Node.TEXT_NODE: { - sb.append(kid.getNodeValue()); - break; - } - case Node.ELEMENT_NODE: { - getTextBuffer(kid, sb); - break; - } - case Node.ENTITY_REFERENCE_NODE: { - getTextBuffer(kid, sb); - break; - } + case Node.TEXT_NODE: + { + sb.append(kid.getNodeValue()); + break; + } + case Node.ELEMENT_NODE: + { + getTextBuffer(kid, sb); + break; + } + case Node.ENTITY_REFERENCE_NODE: + { + getTextBuffer(kid, sb); + break; + } } } } @@ -193,8 +188,7 @@ public class DOMUtils { try { db = dbf.newDocumentBuilder(); - } - catch (Exception se) { + } catch (Exception se) { throw new RuntimeException("Parser configuration error", se); } @@ -202,15 +196,11 @@ public class DOMUtils { org.w3c.dom.Document doc = null; try { doc = db.parse(new InputSource(is)); - //doc = db.parse(is); - } - catch (Exception se) { + // doc = db.parse(is); + } catch (Exception se) { throw new RuntimeException("Error parsing file:" + se, se); } return doc; } } - - - diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/ParserException.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/ParserException.java index d55932c974e..643f07f1a75 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/ParserException.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/ParserException.java @@ -16,10 +16,7 @@ */ package org.apache.lucene.queryparser.xml; -/** - * Thrown when the xml queryparser encounters - * invalid syntax/configuration. - */ +/** Thrown when the xml queryparser encounters invalid syntax/configuration. */ public class ParserException extends Exception { public ParserException() { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java index 663b81d5c22..db820e07e1a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java @@ -18,9 +18,11 @@ package org.apache.lucene.queryparser.xml; import org.apache.lucene.search.Query; import org.w3c.dom.Element; + /** * Implemented by objects that produce Lucene Query objects from XML streams. Implementations are - * expected to be thread-safe so that they can be used to simultaneously parse multiple XML documents. + * expected to be thread-safe so that they can be used to simultaneously parse multiple XML + * documents. */ public interface QueryBuilder { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java index 5b9d5f3190b..458854ca099 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java @@ -16,14 +16,11 @@ */ package org.apache.lucene.queryparser.xml; +import java.util.HashMap; import org.apache.lucene.search.Query; import org.w3c.dom.Element; -import java.util.HashMap; - -/** - * Factory for {@link QueryBuilder} - */ +/** Factory for {@link QueryBuilder} */ public class QueryBuilderFactory implements QueryBuilder { HashMap builders = new HashMap<>(); @@ -44,5 +41,4 @@ public class QueryBuilderFactory implements QueryBuilder { public QueryBuilder getQueryBuilder(String nodeName) { return builders.get(nodeName); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanQueryBuilder.java index 9c4e69e5c6e..974a97f29c6 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanQueryBuilder.java @@ -16,20 +16,18 @@ */ package org.apache.lucene.queryparser.xml.builders; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; +import org.apache.lucene.queryparser.xml.QueryBuilder; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; -import org.apache.lucene.queryparser.xml.QueryBuilder; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; -/** - * Builder for {@link BooleanQuery} - */ +/** Builder for {@link BooleanQuery} */ public class BooleanQueryBuilder implements QueryBuilder { private final QueryBuilder factory; @@ -39,8 +37,8 @@ public class BooleanQueryBuilder implements QueryBuilder { } /* (non-Javadoc) - * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element) - */ + * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element) + */ @Override public Query getQuery(Element e) throws ParserException { @@ -82,5 +80,4 @@ public class BooleanQueryBuilder implements QueryBuilder { } throw new ParserException("Invalid value for \"occurs\" attribute of clause:" + occs); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingTermBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingTermBuilder.java index 65dcb2a8a19..200e610c5c1 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingTermBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingTermBuilder.java @@ -27,9 +27,7 @@ import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.w3c.dom.Element; -/** - * Builder for {@link PayloadScoreQuery} - */ +/** Builder for {@link PayloadScoreQuery} */ public class BoostingTermBuilder extends SpanBuilderBase { @Override @@ -38,10 +36,12 @@ public class BoostingTermBuilder extends SpanBuilderBase { String value = DOMUtils.getNonBlankTextOrFail(e); // TODO make function and decoder pluggable somehow? - SpanQuery btq = new PayloadScoreQuery(new SpanTermQuery(new Term(fieldName, value)), - new AveragePayloadFunction(), PayloadDecoder.FLOAT_DECODER); + SpanQuery btq = + new PayloadScoreQuery( + new SpanTermQuery(new Term(fieldName, value)), + new AveragePayloadFunction(), + PayloadDecoder.FLOAT_DECODER); btq = new SpanBoostQuery(btq, DOMUtils.getAttribute(e, "boost", 1.0f)); return btq; } - -} \ No newline at end of file +} diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/ConstantScoreQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/ConstantScoreQueryBuilder.java index 7521514d971..6e0051a7361 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/ConstantScoreQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/ConstantScoreQueryBuilder.java @@ -24,9 +24,8 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.w3c.dom.Element; -/** - * Builder for {@link ConstantScoreQuery} - */ + +/** Builder for {@link ConstantScoreQuery} */ public class ConstantScoreQueryBuilder implements QueryBuilder { private final QueryBuilderFactory queryFactory; @@ -46,5 +45,4 @@ public class ConstantScoreQueryBuilder implements QueryBuilder { } return q; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DisjunctionMaxQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DisjunctionMaxQueryBuilder.java index 0dc9e05b8e3..62dd72d83cf 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DisjunctionMaxQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DisjunctionMaxQueryBuilder.java @@ -15,9 +15,9 @@ * limitations under the License. */ package org.apache.lucene.queryparser.xml.builders; + import java.util.ArrayList; import java.util.List; - import org.apache.lucene.queryparser.xml.DOMUtils; import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.queryparser.xml.QueryBuilder; @@ -28,9 +28,7 @@ import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; -/** - * Builder for {@link DisjunctionMaxQuery} - */ +/** Builder for {@link DisjunctionMaxQuery} */ public class DisjunctionMaxQueryBuilder implements QueryBuilder { private final QueryBuilder factory; @@ -40,12 +38,12 @@ public class DisjunctionMaxQueryBuilder implements QueryBuilder { } /* (non-Javadoc) - * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element) - */ + * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element) + */ @Override public Query getQuery(Element e) throws ParserException { - float tieBreaker = DOMUtils.getAttribute(e, "tieBreaker", 0.0f); + float tieBreaker = DOMUtils.getAttribute(e, "tieBreaker", 0.0f); List disjuncts = new ArrayList<>(); NodeList nl = e.getChildNodes(); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FuzzyLikeThisQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FuzzyLikeThisQueryBuilder.java index 3cae565ccc4..31ca7e19714 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FuzzyLikeThisQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FuzzyLikeThisQueryBuilder.java @@ -27,9 +27,7 @@ import org.apache.lucene.search.Query; import org.w3c.dom.Element; import org.w3c.dom.NodeList; -/** - * Builder for {@link FuzzyLikeThisQuery} - */ +/** Builder for {@link FuzzyLikeThisQuery} */ public class FuzzyLikeThisQueryBuilder implements QueryBuilder { private static final int DEFAULT_MAX_NUM_TERMS = 50; @@ -53,7 +51,8 @@ public class FuzzyLikeThisQueryBuilder implements QueryBuilder { final int nlLen = nl.getLength(); for (int i = 0; i < nlLen; i++) { Element fieldElem = (Element) nl.item(i); - float minSimilarity = DOMUtils.getAttribute(fieldElem, "minSimilarity", DEFAULT_MIN_SIMILARITY); + float minSimilarity = + DOMUtils.getAttribute(fieldElem, "minSimilarity", DEFAULT_MIN_SIMILARITY); int prefixLength = DOMUtils.getAttribute(fieldElem, "prefixLength", DEFAULT_PREFIX_LENGTH); String fieldName = DOMUtils.getAttributeWithInheritance(fieldElem, "fieldName"); @@ -68,5 +67,4 @@ public class FuzzyLikeThisQueryBuilder implements QueryBuilder { } return q; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java index 2812043c50a..de45d724cbd 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java @@ -19,26 +19,24 @@ package org.apache.lucene.queryparser.xml.builders; import java.io.IOException; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.queries.mlt.MoreLikeThisQuery; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.queryparser.xml.QueryBuilder; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; import org.w3c.dom.Element; -/** - * Builder for {@link MoreLikeThisQuery} - */ +/** Builder for {@link MoreLikeThisQuery} */ public class LikeThisQueryBuilder implements QueryBuilder { private static final int DEFAULT_MAX_QUERY_TERMS = 20; private static final int DEFAULT_MIN_TERM_FREQUENCY = 1; - private static final float DEFAULT_PERCENT_TERMS_TO_MATCH = 30; //default is a 3rd of selected terms must match + private static final float DEFAULT_PERCENT_TERMS_TO_MATCH = + 30; // default is a 3rd of selected terms must match private final Analyzer analyzer; private final String defaultFieldNames[]; @@ -49,23 +47,23 @@ public class LikeThisQueryBuilder implements QueryBuilder { } /* (non-Javadoc) - * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element) - */ + * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element) + */ @Override public Query getQuery(Element e) throws ParserException { - String fieldsList = e.getAttribute("fieldNames"); //a comma-delimited list of fields + String fieldsList = e.getAttribute("fieldNames"); // a comma-delimited list of fields String fields[] = defaultFieldNames; if ((fieldsList != null) && (fieldsList.trim().length() > 0)) { fields = fieldsList.trim().split(","); - //trim the fieldnames + // trim the fieldnames for (int i = 0; i < fields.length; i++) { fields[i] = fields[i].trim(); } } - //Parse any "stopWords" attribute - //TODO MoreLikeThis needs to ideally have per-field stopWords lists - until then - //I use all analyzers/fields to generate multi-field compatible stop list + // Parse any "stopWords" attribute + // TODO MoreLikeThis needs to ideally have per-field stopWords lists - until then + // I use all analyzers/fields to generate multi-field compatible stop list String stopWords = e.getAttribute("stopWords"); Set stopWordsSet = null; if ((stopWords != null) && (fields != null)) { @@ -79,17 +77,21 @@ public class LikeThisQueryBuilder implements QueryBuilder { } ts.end(); } catch (IOException ioe) { - throw new ParserException("IoException parsing stop words list in " - + getClass().getName() + ":" + ioe.getLocalizedMessage()); + throw new ParserException( + "IoException parsing stop words list in " + + getClass().getName() + + ":" + + ioe.getLocalizedMessage()); } } } - MoreLikeThisQuery mlt = new MoreLikeThisQuery(DOMUtils.getText(e), fields, analyzer, fields[0]); mlt.setMaxQueryTerms(DOMUtils.getAttribute(e, "maxQueryTerms", DEFAULT_MAX_QUERY_TERMS)); - mlt.setMinTermFrequency(DOMUtils.getAttribute(e, "minTermFrequency", DEFAULT_MIN_TERM_FREQUENCY)); - mlt.setPercentTermsToMatch(DOMUtils.getAttribute(e, "percentTermsToMatch", DEFAULT_PERCENT_TERMS_TO_MATCH) / 100); + mlt.setMinTermFrequency( + DOMUtils.getAttribute(e, "minTermFrequency", DEFAULT_MIN_TERM_FREQUENCY)); + mlt.setPercentTermsToMatch( + DOMUtils.getAttribute(e, "percentTermsToMatch", DEFAULT_PERCENT_TERMS_TO_MATCH) / 100); mlt.setStopWords(stopWordsSet); int minDocFreq = DOMUtils.getAttribute(e, "minDocFreq", -1); if (minDocFreq >= 0) { @@ -103,5 +105,4 @@ public class LikeThisQueryBuilder implements QueryBuilder { } return q; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/MatchAllDocsQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/MatchAllDocsQueryBuilder.java index 92c28921a72..005b32873d4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/MatchAllDocsQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/MatchAllDocsQueryBuilder.java @@ -16,16 +16,15 @@ */ package org.apache.lucene.queryparser.xml.builders; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.queryparser.xml.QueryBuilder; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; import org.w3c.dom.Element; -/** - * Builder for {@link MatchAllDocsQuery} - */ + +/** Builder for {@link MatchAllDocsQuery} */ public class MatchAllDocsQueryBuilder implements QueryBuilder { - + @Override public Query getQuery(Element e) throws ParserException { return new MatchAllDocsQuery(); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/PointRangeQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/PointRangeQueryBuilder.java index 09cef86ee6f..25df87628fa 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/PointRangeQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/PointRangeQueryBuilder.java @@ -16,7 +16,6 @@ */ package org.apache.lucene.queryparser.xml.builders; -import org.apache.lucene.search.Query; import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.FloatPoint; import org.apache.lucene.document.IntPoint; @@ -25,11 +24,13 @@ import org.apache.lucene.index.PointValues; import org.apache.lucene.queryparser.xml.DOMUtils; import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.queryparser.xml.QueryBuilder; +import org.apache.lucene.search.Query; import org.w3c.dom.Element; /** * Creates a range query across 1D {@link PointValues}. The table below specifies the required * attributes and the defaults if optional attributes are omitted: + * * * * @@ -63,10 +64,9 @@ import org.w3c.dom.Element; * * *
    supported attributes
    int
    - *

    - * A {@link ParserException} will be thrown if an error occurs parsing the - * supplied lowerTerm or upperTerm into the numeric type - * specified by type. + * + *

    A {@link ParserException} will be thrown if an error occurs parsing the supplied + * lowerTerm or upperTerm into the numeric type specified by type. */ public class PointRangeQueryBuilder implements QueryBuilder { @@ -79,19 +79,23 @@ public class PointRangeQueryBuilder implements QueryBuilder { String type = DOMUtils.getAttribute(e, "type", "int"); try { if (type.equalsIgnoreCase("int")) { - return IntPoint.newRangeQuery(field, + return IntPoint.newRangeQuery( + field, (lowerTerm == null ? Integer.MIN_VALUE : Integer.parseInt(lowerTerm)), (upperTerm == null ? Integer.MAX_VALUE : Integer.parseInt(upperTerm))); } else if (type.equalsIgnoreCase("long")) { - return LongPoint.newRangeQuery(field, + return LongPoint.newRangeQuery( + field, (lowerTerm == null ? Long.MIN_VALUE : Long.parseLong(lowerTerm)), (upperTerm == null ? Long.MAX_VALUE : Long.parseLong(upperTerm))); } else if (type.equalsIgnoreCase("double")) { - return DoublePoint.newRangeQuery(field, + return DoublePoint.newRangeQuery( + field, (lowerTerm == null ? Double.NEGATIVE_INFINITY : Double.parseDouble(lowerTerm)), (upperTerm == null ? Double.POSITIVE_INFINITY : Double.parseDouble(upperTerm))); } else if (type.equalsIgnoreCase("float")) { - return FloatPoint.newRangeQuery(field, + return FloatPoint.newRangeQuery( + field, (lowerTerm == null ? Float.NEGATIVE_INFINITY : Float.parseFloat(lowerTerm)), (upperTerm == null ? Float.POSITIVE_INFINITY : Float.parseFloat(upperTerm))); } else { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/RangeQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/RangeQueryBuilder.java index 8b054245d4e..c2afee2fc67 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/RangeQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/RangeQueryBuilder.java @@ -23,9 +23,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.w3c.dom.Element; -/** - * Builder for {@link TermRangeQuery} - */ +/** Builder for {@link TermRangeQuery} */ public class RangeQueryBuilder implements QueryBuilder { @Override @@ -36,7 +34,7 @@ public class RangeQueryBuilder implements QueryBuilder { String upperTerm = e.getAttribute("upperTerm"); boolean includeLower = DOMUtils.getAttribute(e, "includeLower", true); boolean includeUpper = DOMUtils.getAttribute(e, "includeUpper", true); - return TermRangeQuery.newStringRange(fieldName, lowerTerm, upperTerm, includeLower, includeUpper); + return TermRangeQuery.newStringRange( + fieldName, lowerTerm, upperTerm, includeLower, includeUpper); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanBuilderBase.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanBuilderBase.java index 48b7450aede..4733fe10d70 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanBuilderBase.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanBuilderBase.java @@ -16,18 +16,16 @@ */ package org.apache.lucene.queryparser.xml.builders; +import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.search.Query; import org.apache.lucene.search.spans.SpanQuery; // javadocs -import org.apache.lucene.queryparser.xml.ParserException; import org.w3c.dom.Element; -/** - * Base class for building {@link SpanQuery}s - */ + +/** Base class for building {@link SpanQuery}s */ public abstract class SpanBuilderBase implements SpanQueryBuilder { - + @Override public Query getQuery(Element e) throws ParserException { return getSpanQuery(e); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanFirstBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanFirstBuilder.java index 52475113830..15bf0ef5302 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanFirstBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanFirstBuilder.java @@ -16,15 +16,14 @@ */ package org.apache.lucene.queryparser.xml.builders; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanFirstQuery; import org.apache.lucene.search.spans.SpanQuery; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; import org.w3c.dom.Element; -/** - * Builder for {@link SpanFirstQuery} - */ + +/** Builder for {@link SpanFirstQuery} */ public class SpanFirstBuilder extends SpanBuilderBase { private final SpanQueryBuilder factory; @@ -44,5 +43,4 @@ public class SpanFirstBuilder extends SpanBuilderBase { float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new SpanBoostQuery(sfq, boost); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java index c6b6c813b41..06055104577 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java @@ -16,19 +16,17 @@ */ package org.apache.lucene.queryparser.xml.builders; +import java.util.ArrayList; +import java.util.List; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanQuery; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; import org.w3c.dom.Element; import org.w3c.dom.Node; -import java.util.ArrayList; -import java.util.List; -/** - * Builder for {@link SpanNearQuery} - */ +/** Builder for {@link SpanNearQuery} */ public class SpanNearBuilder extends SpanBuilderBase { private final SpanQueryBuilder factory; @@ -53,5 +51,4 @@ public class SpanNearBuilder extends SpanBuilderBase { float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new SpanBoostQuery(snq, boost); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNotBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNotBuilder.java index 9bac89db7f4..fbe2bffcc21 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNotBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNotBuilder.java @@ -16,15 +16,14 @@ */ package org.apache.lucene.queryparser.xml.builders; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanNotQuery; import org.apache.lucene.search.spans.SpanQuery; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; import org.w3c.dom.Element; -/** - * Builder for {@link SpanNotQuery} - */ + +/** Builder for {@link SpanNotQuery} */ public class SpanNotBuilder extends SpanBuilderBase { private final SpanQueryBuilder factory; @@ -49,5 +48,4 @@ public class SpanNotBuilder extends SpanBuilderBase { float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new SpanBoostQuery(snq, boost); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java index a5d5f6a9934..01ce8aee6c8 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java @@ -16,19 +16,17 @@ */ package org.apache.lucene.queryparser.xml.builders; +import java.util.ArrayList; +import java.util.List; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; import org.w3c.dom.Element; import org.w3c.dom.Node; -import java.util.ArrayList; -import java.util.List; -/** - * Builder for {@link SpanOrQuery} - */ +/** Builder for {@link SpanOrQuery} */ public class SpanOrBuilder extends SpanBuilderBase { private final SpanQueryBuilder factory; @@ -51,5 +49,4 @@ public class SpanOrBuilder extends SpanBuilderBase { float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new SpanBoostQuery(soq, boost); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java index d30653e2982..5475758d49b 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java @@ -16,26 +16,23 @@ */ package org.apache.lucene.queryparser.xml.builders; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.index.Term; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; import org.w3c.dom.Element; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Builder that analyzes the text into a {@link SpanOrQuery} - */ +/** Builder that analyzes the text into a {@link SpanOrQuery} */ public class SpanOrTermsBuilder extends SpanBuilderBase { private final Analyzer analyzer; @@ -55,17 +52,16 @@ public class SpanOrTermsBuilder extends SpanBuilderBase { TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); ts.reset(); while (ts.incrementToken()) { - SpanTermQuery stq = new SpanTermQuery(new Term(fieldName, BytesRef.deepCopyOf(termAtt.getBytesRef()))); + SpanTermQuery stq = + new SpanTermQuery(new Term(fieldName, BytesRef.deepCopyOf(termAtt.getBytesRef()))); clausesList.add(stq); } ts.end(); SpanOrQuery soq = new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()])); float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new SpanBoostQuery(soq, boost); - } - catch (IOException ioe) { + } catch (IOException ioe) { throw new ParserException("IOException parsing value:" + value); } } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanPositionRangeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanPositionRangeBuilder.java index a2097ca3aae..9cb544c90c2 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanPositionRangeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanPositionRangeBuilder.java @@ -23,9 +23,7 @@ import org.apache.lucene.search.spans.SpanPositionRangeQuery; import org.apache.lucene.search.spans.SpanQuery; import org.w3c.dom.Element; -/** - * Builder for {@link SpanPositionRangeQuery} - */ +/** Builder for {@link SpanPositionRangeQuery} */ public class SpanPositionRangeBuilder extends SpanBuilderBase { private final SpanQueryBuilder factory; @@ -46,5 +44,4 @@ public class SpanPositionRangeBuilder extends SpanBuilderBase { float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new SpanBoostQuery(query, boost); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java index 74d03b33304..ee3a4515403 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java @@ -15,14 +15,13 @@ * limitations under the License. */ package org.apache.lucene.queryparser.xml.builders; -import org.apache.lucene.search.spans.SpanQuery; + import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.queryparser.xml.QueryBuilder; +import org.apache.lucene.search.spans.SpanQuery; import org.w3c.dom.Element; -/** - * Interface for retrieving a {@link SpanQuery}. - */ +/** Interface for retrieving a {@link SpanQuery}. */ public interface SpanQueryBuilder extends QueryBuilder { public SpanQuery getSpanQuery(Element e) throws ParserException; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java index 6f41428c6a8..d72f6cf4849 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java @@ -16,16 +16,14 @@ */ package org.apache.lucene.queryparser.xml.builders; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.spans.SpanQuery; -import org.apache.lucene.queryparser.xml.ParserException; -import org.w3c.dom.Element; - import java.util.HashMap; import java.util.Map; -/** - * Factory for {@link SpanQueryBuilder}s - */ +import org.apache.lucene.queryparser.xml.ParserException; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanQuery; +import org.w3c.dom.Element; + +/** Factory for {@link SpanQueryBuilder}s */ public class SpanQueryBuilderFactory implements SpanQueryBuilder { private final Map builders = new HashMap<>(); @@ -47,5 +45,4 @@ public class SpanQueryBuilderFactory implements SpanQueryBuilder { } return builder.getSpanQuery(e); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanTermBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanTermBuilder.java index 560a8accf02..907ff239773 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanTermBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanTermBuilder.java @@ -17,15 +17,14 @@ package org.apache.lucene.queryparser.xml.builders; import org.apache.lucene.index.Term; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; import org.w3c.dom.Element; -/** - * Builder for {@link SpanTermQuery} - */ + +/** Builder for {@link SpanTermQuery} */ public class SpanTermBuilder extends SpanBuilderBase { @Override @@ -37,5 +36,4 @@ public class SpanTermBuilder extends SpanBuilderBase { float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new SpanBoostQuery(stq, boost); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermQueryBuilder.java index 221c596d1e0..22353ddd84a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermQueryBuilder.java @@ -17,16 +17,15 @@ package org.apache.lucene.queryparser.xml.builders; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BoostQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; import org.apache.lucene.queryparser.xml.DOMUtils; import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.queryparser.xml.QueryBuilder; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; import org.w3c.dom.Element; -/** - * Builder for {@link TermQuery} - */ + +/** Builder for {@link TermQuery} */ public class TermQueryBuilder implements QueryBuilder { @Override @@ -40,5 +39,4 @@ public class TermQueryBuilder implements QueryBuilder { } return tq; } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java index 0b295c079a6..547a80fd5a2 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java @@ -16,23 +16,22 @@ */ package org.apache.lucene.queryparser.xml.builders; +import java.io.IOException; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.index.Term; +import org.apache.lucene.queryparser.xml.DOMUtils; +import org.apache.lucene.queryparser.xml.ParserException; +import org.apache.lucene.queryparser.xml.QueryBuilder; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.queryparser.xml.DOMUtils; -import org.apache.lucene.queryparser.xml.ParserException; -import org.apache.lucene.queryparser.xml.QueryBuilder; import org.w3c.dom.Element; -import java.io.IOException; - /** * Builds a BooleanQuery from all of the terms found in the XML element using the choice of analyzer */ @@ -60,8 +59,7 @@ public class TermsQueryBuilder implements QueryBuilder { bq.add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD)); } ts.end(); - } - catch (IOException ioe) { + } catch (IOException ioe) { throw new RuntimeException("Error constructing terms from index:" + ioe); } @@ -69,5 +67,4 @@ public class TermsQueryBuilder implements QueryBuilder { float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new BoostQuery(q, boost); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java index ea446429a10..9254284940a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java @@ -19,18 +19,17 @@ package org.apache.lucene.queryparser.xml.builders; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.queryparser.classic.ParseException; import org.apache.lucene.queryparser.classic.QueryParser; -import org.apache.lucene.search.BoostQuery; -import org.apache.lucene.search.Query; import org.apache.lucene.queryparser.xml.DOMUtils; import org.apache.lucene.queryparser.xml.ParserException; import org.apache.lucene.queryparser.xml.QueryBuilder; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.Query; import org.w3c.dom.Element; /** - * UserInputQueryBuilder uses 1 of 2 strategies for thread-safe parsing: - * 1) Synchronizing access to "parse" calls on a previously supplied QueryParser - * or.. - * 2) creating a new QueryParser object for each parse request + * UserInputQueryBuilder uses 1 of 2 strategies for thread-safe parsing: 1) Synchronizing access to + * "parse" calls on a previously supplied QueryParser or.. 2) creating a new QueryParser object for + * each parse request */ public class UserInputQueryBuilder implements QueryBuilder { @@ -53,8 +52,8 @@ public class UserInputQueryBuilder implements QueryBuilder { } /* (non-Javadoc) - * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element) - */ + * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element) + */ @Override public Query getQuery(Element e) throws ParserException { @@ -62,13 +61,13 @@ public class UserInputQueryBuilder implements QueryBuilder { try { Query q = null; if (unSafeParser != null) { - //synchronize on unsafe parser + // synchronize on unsafe parser synchronized (unSafeParser) { q = unSafeParser.parse(text); } } else { String fieldName = DOMUtils.getAttribute(e, "fieldName", defaultField); - //Create new parser + // Create new parser QueryParser parser = createQueryParser(fieldName, analyzer); q = parser.parse(text); } @@ -87,5 +86,4 @@ public class UserInputQueryBuilder implements QueryBuilder { protected QueryParser createQueryParser(String fieldName, Analyzer analyzer) { return new QueryParser(fieldName, analyzer); } - } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/package-info.java index 0d570fb86ae..d5875784ccb 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/package-info.java @@ -14,9 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * XML Parser factories for different Lucene Query/Filters. - */ -package org.apache.lucene.queryparser.xml.builders; +/** XML Parser factories for different Lucene Query/Filters. */ +package org.apache.lucene.queryparser.xml.builders; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/package-info.java index 2b1abee54f3..75a7fcb628a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/package-info.java @@ -14,9 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Parser that produces Lucene Query objects from XML streams. - */ -package org.apache.lucene.queryparser.xml; +/** Parser that produces Lucene Query objects from XML streams. */ +package org.apache.lucene.queryparser.xml; diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java index 15dd3c5b3a4..0f9d9fd4e73 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java @@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.classic; import java.io.IOException; import java.util.Objects; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; @@ -33,10 +32,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; /** - * Test QueryParser's ability to deal with Analyzers that return more - * than one token per position or that return tokens with a position - * increment > 1. - * + * Test QueryParser's ability to deal with Analyzers that return more than one token per position or + * that return tokens with a position increment > 1. */ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { @@ -57,24 +54,30 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { assertEquals("Synonym(multi multi2) foo", qp.parse("multi foo").toString()); assertEquals("foo Synonym(multi multi2)", qp.parse("foo multi").toString()); assertEquals("Synonym(multi multi2) Synonym(multi multi2)", qp.parse("multi multi").toString()); - assertEquals("+(foo Synonym(multi multi2)) +(bar Synonym(multi multi2))", + assertEquals( + "+(foo Synonym(multi multi2)) +(bar Synonym(multi multi2))", qp.parse("+(foo multi) +(bar multi)").toString()); - assertEquals("+(foo Synonym(multi multi2)) field:\"bar (multi multi2)\"", + assertEquals( + "+(foo Synonym(multi multi2)) field:\"bar (multi multi2)\"", qp.parse("+(foo multi) field:\"bar multi\"").toString()); // phrases: assertEquals("\"(multi multi2) foo\"", qp.parse("\"multi foo\"").toString()); assertEquals("\"foo (multi multi2)\"", qp.parse("\"foo multi\"").toString()); - assertEquals("\"foo (multi multi2) foobar (multi multi2)\"", + assertEquals( + "\"foo (multi multi2) foobar (multi multi2)\"", qp.parse("\"foo multi foobar multi\"").toString()); // fields: - assertEquals("Synonym(field:multi field:multi2) field:foo", qp.parse("field:multi field:foo").toString()); + assertEquals( + "Synonym(field:multi field:multi2) field:foo", + qp.parse("field:multi field:foo").toString()); assertEquals("field:\"(multi multi2) foo\"", qp.parse("field:\"multi foo\"").toString()); // three tokens at one position: assertEquals("Synonym(multi2 multi3 triplemulti)", qp.parse("triplemulti").toString()); - assertEquals("foo Synonym(multi2 multi3 triplemulti) foobar", + assertEquals( + "foo Synonym(multi2 multi3 triplemulti) foobar", qp.parse("foo triplemulti foobar").toString()); // phrase with non-default slop: @@ -85,16 +88,15 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { // phrase after changing default slop qp.setPhraseSlop(99); - assertEquals("\"(multi multi2) foo\"~99 bar", - qp.parse("\"multi foo\" bar").toString()); - assertEquals("\"(multi multi2) foo\"~99 \"foo bar\"~2", - qp.parse("\"multi foo\" \"foo bar\"~2").toString()); + assertEquals("\"(multi multi2) foo\"~99 bar", qp.parse("\"multi foo\" bar").toString()); + assertEquals( + "\"(multi multi2) foo\"~99 \"foo bar\"~2", + qp.parse("\"multi foo\" \"foo bar\"~2").toString()); qp.setPhraseSlop(0); // non-default operator: qp.setDefaultOperator(QueryParserBase.AND_OPERATOR); assertEquals("+Synonym(multi multi2) +foo", qp.parse("multi foo").toString()); - } public void testMultiAnalyzerWithSubclassOfQueryParser() throws ParseException { @@ -104,16 +106,12 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { // direct call to (super's) getFieldQuery to demonstrate differnce // between phrase and multiphrase with modified default slop - assertEquals("\"foo bar\"~99", - qp.getSuperFieldQuery("","foo bar", true).toString()); - assertEquals("\"(multi multi2) bar\"~99", - qp.getSuperFieldQuery("","multi bar", true).toString()); - + assertEquals("\"foo bar\"~99", qp.getSuperFieldQuery("", "foo bar", true).toString()); + assertEquals( + "\"(multi multi2) bar\"~99", qp.getSuperFieldQuery("", "multi bar", true).toString()); // ask sublcass to parse phrase with modified default slop - assertEquals("\"(multi multi2) foo\"~99 bar", - qp.parse("\"multi foo\" bar").toString()); - + assertEquals("\"(multi multi2) foo\"~99 bar", qp.parse("\"multi foo\" bar").toString()); } public void testPosIncrementAnalyzer() throws ParseException { @@ -123,8 +121,8 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { } /** - * Expands "multi" to "multi" and "multi2", both at the same position, - * and expands "triplemulti" to "triplemulti", "multi3", and "multi2". + * Expands "multi" to "multi" and "multi2", both at the same position, and expands "triplemulti" + * to "triplemulti", "multi3", and "multi2". */ private static class MultiAnalyzer extends Analyzer { @@ -157,7 +155,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { @Override public final boolean incrementToken() throws java.io.IOException { if (multiToken > 0) { - termAtt.setEmpty().append("multi"+(multiToken+1)); + termAtt.setEmpty().append("multi" + (multiToken + 1)); offsetAtt.setOffset(prevStartOffset, prevEndOffset); typeAtt.setType(prevType); posIncrAtt.setPositionIncrement(0); @@ -194,8 +192,8 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { } /** - * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1). - * Does not work correctly for input other than "the quick brown ...". + * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1). Does not work correctly for input + * other than "the quick brown ...". */ private static class PosIncrementAnalyzer extends Analyzer { @@ -218,8 +216,8 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { } @Override - public final boolean incrementToken () throws java.io.IOException { - while(input.incrementToken()) { + public final boolean incrementToken() throws java.io.IOException { + while (input.incrementToken()) { if (termAtt.toString().equals("the")) { // stopword, do nothing } else if (termAtt.toString().equals("quick")) { @@ -234,56 +232,52 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { } } - /** a very simple subclass of QueryParser */ - private final static class DumbQueryParser extends QueryParser { + /** a very simple subclass of QueryParser */ + private static final class DumbQueryParser extends QueryParser { - public DumbQueryParser(String f, Analyzer a) { - super(f, a); - } - - /** expose super's version */ - public Query getSuperFieldQuery(String f, String t, boolean quoted) - throws ParseException { - return super.getFieldQuery(f,t,quoted); - } - /** wrap super's version */ - @Override - protected Query getFieldQuery(String f, String t, boolean quoted) - throws ParseException { - return new DumbQueryWrapper(getSuperFieldQuery(f,t,quoted)); - } + public DumbQueryParser(String f, Analyzer a) { + super(f, a); } - /** - * A very simple wrapper to prevent instanceof checks but uses - * the toString of the query it wraps. - */ - private final static class DumbQueryWrapper extends Query { - private Query q; + /** expose super's version */ + public Query getSuperFieldQuery(String f, String t, boolean quoted) throws ParseException { + return super.getFieldQuery(f, t, quoted); + } + /** wrap super's version */ + @Override + protected Query getFieldQuery(String f, String t, boolean quoted) throws ParseException { + return new DumbQueryWrapper(getSuperFieldQuery(f, t, quoted)); + } + } - public DumbQueryWrapper(Query q) { - this.q = Objects.requireNonNull(q); - } - @Override - public String toString(String f) { - return q.toString(f); - } + /** + * A very simple wrapper to prevent instanceof checks but uses the toString of the query it wraps. + */ + private static final class DumbQueryWrapper extends Query { + private Query q; - @Override - public void visit(QueryVisitor visitor) { - q.visit(visitor); - } - - @Override - public boolean equals(Object other) { - return sameClassAs(other) && - Objects.equals(q, ((DumbQueryWrapper) other).q); - } - - @Override - public int hashCode() { - return classHash() & q.hashCode(); - } + public DumbQueryWrapper(Query q) { + this.q = Objects.requireNonNull(q); } + @Override + public String toString(String f) { + return q.toString(f); + } + + @Override + public void visit(QueryVisitor visitor) { + q.visit(visitor); + } + + @Override + public boolean equals(Object other) { + return sameClassAs(other) && Objects.equals(q, ((DumbQueryWrapper) other).q); + } + + @Override + public int hashCode() { + return classHash() & q.hashCode(); + } + } } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java index cc46599dc0e..367b5c2e818 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java @@ -20,7 +20,6 @@ import java.io.Reader; import java.io.StringReader; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockSynonymFilter; @@ -43,53 +42,53 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -/** - * Tests QueryParser. - */ +/** Tests QueryParser. */ public class TestMultiFieldQueryParser extends LuceneTestCase { - /** test stop words parsing for both the non static form, and for the - * corresponding static form (qtxt, fields[]). */ + /** + * test stop words parsing for both the non static form, and for the corresponding static form + * (qtxt, fields[]). + */ public void testStopwordsParsing() throws Exception { - assertStopQueryEquals("one", "b:one t:one"); - assertStopQueryEquals("one stop", "b:one t:one"); - assertStopQueryEquals("one (stop)", "b:one t:one"); - assertStopQueryEquals("one ((stop))", "b:one t:one"); - assertStopQueryEquals("stop", ""); - assertStopQueryEquals("(stop)", ""); - assertStopQueryEquals("((stop))", ""); + assertStopQueryEquals("one", "b:one t:one"); + assertStopQueryEquals("one stop", "b:one t:one"); + assertStopQueryEquals("one (stop)", "b:one t:one"); + assertStopQueryEquals("one ((stop))", "b:one t:one"); + assertStopQueryEquals("stop", ""); + assertStopQueryEquals("(stop)", ""); + assertStopQueryEquals("((stop))", ""); } - // verify parsing of query using a stopping analyzer - private void assertStopQueryEquals (String qtxt, String expectedRes) throws Exception { + // verify parsing of query using a stopping analyzer + private void assertStopQueryEquals(String qtxt, String expectedRes) throws Exception { String[] fields = {"b", "t"}; Occur occur[] = {Occur.SHOULD, Occur.SHOULD}; TestQueryParser.QPTestAnalyzer a = new TestQueryParser.QPTestAnalyzer(); MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, a); - + Query q = mfqp.parse(qtxt); assertEquals(expectedRes, q.toString()); - + q = MultiFieldQueryParser.parse(qtxt, fields, occur, a); assertEquals(expectedRes, q.toString()); } - + public void testSimple() throws Exception { String[] fields = {"b", "t"}; MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new MockAnalyzer(random())); - + Query q = mfqp.parse("one"); assertEquals("b:one t:one", q.toString()); - + q = mfqp.parse("one two"); assertEquals("(b:one t:one) (b:two t:two)", q.toString()); - + q = mfqp.parse("+one +two"); assertEquals("+(b:one t:one) +(b:two t:two)", q.toString()); q = mfqp.parse("+one -two -three"); assertEquals("+(b:one t:one) -(b:two t:two) -(b:three t:three)", q.toString()); - + q = mfqp.parse("one^2 two"); assertEquals("(b:one t:one)^2.0 (b:two t:two)", q.toString()); @@ -118,7 +117,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { assertEquals("b:\"foo bar\"~4 t:\"foo bar\"~4", q.toString()); // LUCENE-1213: MultiFieldQueryParser was ignoring slop when phrase had a field. - q = mfqp.parse("b:\"foo bar\"~4"); + q = mfqp.parse("b:\"foo bar\"~4"); assertEquals("b:\"foo bar\"~4", q.toString()); // make sure that terms which have a field are not touched: @@ -131,35 +130,34 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { assertEquals("+(b:one t:one) +(b:two t:two)", q.toString()); q = mfqp.parse("\"aa bb cc\" \"dd ee\""); assertEquals("+(b:\"aa bb cc\" t:\"aa bb cc\") +(b:\"dd ee\" t:\"dd ee\")", q.toString()); - } - + public void testBoostsSimple() throws Exception { - Map boosts = new HashMap<>(); - boosts.put("b", Float.valueOf(5)); - boosts.put("t", Float.valueOf(10)); - String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new MockAnalyzer(random()), boosts); - - - //Check for simple - Query q = mfqp.parse("one"); - assertEquals("(b:one)^5.0 (t:one)^10.0", q.toString()); - - //Check for AND - q = mfqp.parse("one AND two"); - assertEquals("+((b:one)^5.0 (t:one)^10.0) +((b:two)^5.0 (t:two)^10.0)", q.toString()); - - //Check for OR - q = mfqp.parse("one OR two"); - assertEquals("((b:one)^5.0 (t:one)^10.0) ((b:two)^5.0 (t:two)^10.0)", q.toString()); - - //Check for AND and a field - q = mfqp.parse("one AND two AND foo:test"); - assertEquals("+((b:one)^5.0 (t:one)^10.0) +((b:two)^5.0 (t:two)^10.0) +foo:test", q.toString()); - - q = mfqp.parse("one^3 AND two^4"); - assertEquals("+((b:one)^5.0 (t:one)^10.0)^3.0 +((b:two)^5.0 (t:two)^10.0)^4.0", q.toString()); + Map boosts = new HashMap<>(); + boosts.put("b", Float.valueOf(5)); + boosts.put("t", Float.valueOf(10)); + String[] fields = {"b", "t"}; + MultiFieldQueryParser mfqp = + new MultiFieldQueryParser(fields, new MockAnalyzer(random()), boosts); + + // Check for simple + Query q = mfqp.parse("one"); + assertEquals("(b:one)^5.0 (t:one)^10.0", q.toString()); + + // Check for AND + q = mfqp.parse("one AND two"); + assertEquals("+((b:one)^5.0 (t:one)^10.0) +((b:two)^5.0 (t:two)^10.0)", q.toString()); + + // Check for OR + q = mfqp.parse("one OR two"); + assertEquals("((b:one)^5.0 (t:one)^10.0) ((b:two)^5.0 (t:two)^10.0)", q.toString()); + + // Check for AND and a field + q = mfqp.parse("one AND two AND foo:test"); + assertEquals("+((b:one)^5.0 (t:one)^10.0) +((b:two)^5.0 (t:two)^10.0) +foo:test", q.toString()); + + q = mfqp.parse("one^3 AND two^4"); + assertEquals("+((b:one)^5.0 (t:one)^10.0)^3.0 +((b:two)^5.0 (t:two)^10.0)^4.0", q.toString()); } public void testStaticMethod1() throws ParseException { @@ -182,21 +180,22 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { String[] queries5 = {"blah"}; // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - MultiFieldQueryParser.parse(queries5, fields, new MockAnalyzer(random())); - }); - + expectThrows( + IllegalArgumentException.class, + () -> { + MultiFieldQueryParser.parse(queries5, fields, new MockAnalyzer(random())); + }); + // check also with stop words for this static form (qtxts[], fields[]). TestQueryParser.QPTestAnalyzer stopA = new TestQueryParser.QPTestAnalyzer(); - + String[] queries6 = {"((+stop))", "+((stop))"}; q = MultiFieldQueryParser.parse(queries6, fields, stopA); assertEquals("", q.toString()); - + String[] queries7 = {"one ((+stop)) +more", "+((stop)) +two"}; q = MultiFieldQueryParser.parse(queries7, fields, stopA); assertEquals("(b:one +b:more) (+t:two)", q.toString()); - } public void testStaticMethod2() throws ParseException { @@ -209,61 +208,75 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - MultiFieldQueryParser.parse("blah", fields, flags2, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; + MultiFieldQueryParser.parse("blah", fields, flags2, new MockAnalyzer(random())); + }); } public void testStaticMethod2Old() throws ParseException { String[] fields = {"b", "t"}; - //int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD}; - BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; + // int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD}; + BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse("one", fields, flags, new MockAnalyzer(random()));//, fields, flags, new MockAnalyzer(random)); + Query q = + MultiFieldQueryParser.parse( + "one", + fields, + flags, + new MockAnalyzer(random())); // , fields, flags, new MockAnalyzer(random)); assertEquals("+b:one -t:one", q.toString()); q = MultiFieldQueryParser.parse("one two", fields, flags, new MockAnalyzer(random())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - MultiFieldQueryParser.parse("blah", fields, flags2, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; + MultiFieldQueryParser.parse("blah", fields, flags2, new MockAnalyzer(random())); + }); } public void testStaticMethod3() throws ParseException { String[] queries = {"one", "two", "three"}; String[] fields = {"f1", "f2", "f3"}; - BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, - BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD}; + BooleanClause.Occur[] flags = { + BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD + }; Query q = MultiFieldQueryParser.parse(queries, fields, flags, new MockAnalyzer(random())); assertEquals("+f1:one -f2:two f3:three", q.toString()); // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - MultiFieldQueryParser.parse(queries, fields, flags2, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; + MultiFieldQueryParser.parse(queries, fields, flags2, new MockAnalyzer(random())); + }); } public void testStaticMethod3Old() throws ParseException { String[] queries = {"one", "two"}; String[] fields = {"b", "t"}; - BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; + BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; Query q = MultiFieldQueryParser.parse(queries, fields, flags, new MockAnalyzer(random())); assertEquals("+b:one -t:two", q.toString()); // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - MultiFieldQueryParser.parse(queries, fields, flags2, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; + MultiFieldQueryParser.parse(queries, fields, flags2, new MockAnalyzer(random())); + }); } public void testAnalyzerReturningNull() throws ParseException { - String[] fields = new String[] { "f1", "f2", "f3" }; + String[] fields = new String[] {"f1", "f2", "f3"}; MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new AnalyzerReturningNull()); Query q = parser.parse("bla AND blo"); assertEquals("+(f2:bla f3:bla) +(f2:blo f3:blo)", q.toString()); @@ -279,14 +292,13 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testStopWordSearching() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); Directory ramDir = newDirectory(); - IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(analyzer)); + IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(analyzer)); Document doc = new Document(); doc.add(newTextField("body", "blah the footest blah", Field.Store.NO)); iw.addDocument(doc); iw.close(); - - MultiFieldQueryParser mfqp = - new MultiFieldQueryParser(new String[] {"body"}, analyzer); + + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(new String[] {"body"}, analyzer); mfqp.setDefaultOperator(QueryParser.Operator.AND); Query q = mfqp.parse("the footest"); IndexReader ir = DirectoryReader.open(ramDir); @@ -296,10 +308,8 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { ir.close(); ramDir.close(); } - - /** - * Return no tokens for field "f1". - */ + + /** Return no tokens for field "f1". */ private static class AnalyzerReturningNull extends Analyzer { MockAnalyzer stdAnalyzer = new MockAnalyzer(random()); @@ -324,7 +334,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { return stdAnalyzer.createComponents(fieldName); } } - + public void testSimpleRegex() throws ParseException { String[] fields = new String[] {"a", "b"}; MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new MockAnalyzer(random())); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiPhraseQueryParsing.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiPhraseQueryParsing.java index 043f597f487..92c11e1bbda 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiPhraseQueryParsing.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiPhraseQueryParsing.java @@ -16,6 +16,7 @@ */ package org.apache.lucene.queryparser.classic; +import java.io.IOException; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -25,18 +26,17 @@ import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.LuceneTestCase; -import java.io.IOException; - public class TestMultiPhraseQueryParsing extends LuceneTestCase { private static class TokenAndPos { - public final String token; - public final int pos; - public TokenAndPos(String token, int pos) { - this.token = token; - this.pos = pos; - } + public final String token; + public final int pos; + + public TokenAndPos(String token, int pos) { + this.token = token; + this.pos = pos; } + } private static class CannedAnalyzer extends Analyzer { private final TokenAndPos[] tokens; @@ -56,7 +56,8 @@ public class TestMultiPhraseQueryParsing extends LuceneTestCase { private int upto = 0; private int lastPos = 0; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncrAtt = + addAttribute(PositionIncrementAttribute.class); public CannedTokenizer(TokenAndPos[] tokens) { super(); @@ -87,24 +88,24 @@ public class TestMultiPhraseQueryParsing extends LuceneTestCase { } public void testMultiPhraseQueryParsing() throws Exception { - TokenAndPos[] INCR_0_QUERY_TOKENS_AND = new TokenAndPos[]{ - new TokenAndPos("a", 0), - new TokenAndPos("1", 0), - new TokenAndPos("b", 1), - new TokenAndPos("1", 1), - new TokenAndPos("c", 2) - }; + TokenAndPos[] INCR_0_QUERY_TOKENS_AND = + new TokenAndPos[] { + new TokenAndPos("a", 0), + new TokenAndPos("1", 0), + new TokenAndPos("b", 1), + new TokenAndPos("1", 1), + new TokenAndPos("c", 2) + }; QueryParser qp = new QueryParser("field", new CannedAnalyzer(INCR_0_QUERY_TOKENS_AND)); Query q = qp.parse("\"this text is acually ignored\""); assertTrue("wrong query type!", q instanceof MultiPhraseQuery); MultiPhraseQuery.Builder multiPhraseQueryBuilder = new MultiPhraseQuery.Builder(); - multiPhraseQueryBuilder.add(new Term[]{ new Term("field", "a"), new Term("field", "1") }, -1); - multiPhraseQueryBuilder.add(new Term[]{ new Term("field", "b"), new Term("field", "1") }, 0); - multiPhraseQueryBuilder.add(new Term[]{ new Term("field", "c") }, 1); + multiPhraseQueryBuilder.add(new Term[] {new Term("field", "a"), new Term("field", "1")}, -1); + multiPhraseQueryBuilder.add(new Term[] {new Term("field", "b"), new Term("field", "1")}, 0); + multiPhraseQueryBuilder.add(new Term[] {new Term("field", "c")}, 1); assertEquals(multiPhraseQueryBuilder.build(), q); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java index 2338c1e9bfd..916b8799add 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.classic; import java.io.IOException; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockBytesAnalyzer; @@ -54,9 +53,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; -/** - * Tests QueryParser. - */ +/** Tests QueryParser. */ public class TestQueryParser extends QueryParserTestBase { protected boolean splitOnWhitespace = QueryParser.DEFAULT_SPLIT_ON_WHITESPACE; @@ -66,20 +63,19 @@ public class TestQueryParser extends QueryParserTestBase { public QPTestParser(String f, Analyzer a) { super(f, a); } - + @Override - protected Query getFuzzyQuery(String field, String termStr, - float minSimilarity) throws ParseException { + protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) + throws ParseException { throw new ParseException("Fuzzy queries not allowed"); } - + @Override - protected Query getWildcardQuery(String field, String termStr) - throws ParseException { + protected Query getWildcardQuery(String field, String termStr) throws ParseException { throw new ParseException("Wildcard queries not allowed"); } } - + public QueryParser getParser(Analyzer a) throws Exception { if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); QueryParser qp = new QueryParser(getDefaultField(), a); @@ -87,62 +83,59 @@ public class TestQueryParser extends QueryParserTestBase { qp.setSplitOnWhitespace(splitOnWhitespace); return qp; } - + @Override - public CommonQueryParserConfiguration getParserConfig(Analyzer a) - throws Exception { + public CommonQueryParserConfiguration getParserConfig(Analyzer a) throws Exception { return getParser(a); } - + @Override - public Query getQuery(String query, CommonQueryParserConfiguration cqpC) - throws Exception { + public Query getQuery(String query, CommonQueryParserConfiguration cqpC) throws Exception { assert cqpC != null : "Parameter must not be null"; assert (cqpC instanceof QueryParser) : "Parameter must be instance of QueryParser"; QueryParser qp = (QueryParser) cqpC; return qp.parse(query); } - + @Override public Query getQuery(String query, Analyzer a) throws Exception { return getParser(a).parse(query); } - + @Override public boolean isQueryParserException(Exception exception) { return exception instanceof ParseException; } - + @Override public void setDefaultOperatorOR(CommonQueryParserConfiguration cqpC) { assert (cqpC instanceof QueryParser); QueryParser qp = (QueryParser) cqpC; qp.setDefaultOperator(Operator.OR); } - + @Override public void setDefaultOperatorAND(CommonQueryParserConfiguration cqpC) { assert (cqpC instanceof QueryParser); QueryParser qp = (QueryParser) cqpC; qp.setDefaultOperator(Operator.AND); } - + @Override - public void setAutoGeneratePhraseQueries(CommonQueryParserConfiguration cqpC, - boolean value) { + public void setAutoGeneratePhraseQueries(CommonQueryParserConfiguration cqpC, boolean value) { assert (cqpC instanceof QueryParser); QueryParser qp = (QueryParser) cqpC; qp.setAutoGeneratePhraseQueries(value); } - + @Override - public void setDateResolution(CommonQueryParserConfiguration cqpC, - CharSequence field, Resolution value) { + public void setDateResolution( + CommonQueryParserConfiguration cqpC, CharSequence field, Resolution value) { assert (cqpC instanceof QueryParser); QueryParser qp = (QueryParser) cqpC; qp.setDateResolution(field.toString(), value); } - + @Override public void testDefaultOperator() throws Exception { QueryParser qp = getParser(new MockAnalyzer(random())); @@ -153,7 +146,7 @@ public class TestQueryParser extends QueryParserTestBase { setDefaultOperatorOR(qp); assertEquals(QueryParserBase.OR_OPERATOR, qp.getDefaultOperator()); } - + // LUCENE-2002: when we run javacc to regen QueryParser, // we also run a replaceregexp step to fix 2 of the public // ctors (change them to protected): @@ -166,61 +159,70 @@ public class TestQueryParser extends QueryParserTestBase { // doesn't work for some reason. @SuppressWarnings("rawtype") public void testProtectedCtors() throws Exception { - expectThrows(NoSuchMethodException.class, () -> QueryParser.class.getConstructor(CharStream.class)); - expectThrows(NoSuchMethodException.class, () -> QueryParser.class.getConstructor(QueryParserTokenManager.class)); + expectThrows( + NoSuchMethodException.class, () -> QueryParser.class.getConstructor(CharStream.class)); + expectThrows( + NoSuchMethodException.class, + () -> QueryParser.class.getConstructor(QueryParserTokenManager.class)); } - - public void testFuzzySlopeExtendability() throws ParseException { - QueryParser qp = new QueryParser("a", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) { - @Override - Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) - throws ParseException { - - if(fuzzySlop.image.endsWith("€")) { - float fms = fuzzyMinSim; - try { - fms = Float.parseFloat(fuzzySlop.image.substring(1, fuzzySlop.image.length()-1)); - } catch (Exception ignored) { } - float value = Float.parseFloat(termImage); - return getRangeQuery(qfield, Float.toString(value-fms/2.f), Float.toString(value+fms/2.f), true, true); - } - return super.handleBareFuzzy(qfield, fuzzySlop, termImage); - } - - }; + public void testFuzzySlopeExtendability() throws ParseException { + QueryParser qp = + new QueryParser("a", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) { + + @Override + Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) + throws ParseException { + + if (fuzzySlop.image.endsWith("€")) { + float fms = fuzzyMinSim; + try { + fms = Float.parseFloat(fuzzySlop.image.substring(1, fuzzySlop.image.length() - 1)); + } catch (Exception ignored) { + } + float value = Float.parseFloat(termImage); + return getRangeQuery( + qfield, + Float.toString(value - fms / 2.f), + Float.toString(value + fms / 2.f), + true, + true); + } + return super.handleBareFuzzy(qfield, fuzzySlop, termImage); + } + }; assertEquals(qp.parse("a:[11.95 TO 12.95]"), qp.parse("12.45~1€")); } - + @Override public void testStarParsing() throws Exception { final int[] type = new int[1]; - QueryParser qp = new QueryParser(FIELD, - new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) { - @Override - protected Query getWildcardQuery(String field, String termStr) { - // override error checking of superclass - type[0] = 1; - return new TermQuery(new Term(field, termStr)); - } - - @Override - protected Query getPrefixQuery(String field, String termStr) { - // override error checking of superclass - type[0] = 2; - return new TermQuery(new Term(field, termStr)); - } - - @Override - protected Query getFieldQuery(String field, String queryText, - boolean quoted) throws ParseException { - type[0] = 3; - return super.getFieldQuery(field, queryText, quoted); - } - }; - + QueryParser qp = + new QueryParser(FIELD, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) { + @Override + protected Query getWildcardQuery(String field, String termStr) { + // override error checking of superclass + type[0] = 1; + return new TermQuery(new Term(field, termStr)); + } + + @Override + protected Query getPrefixQuery(String field, String termStr) { + // override error checking of superclass + type[0] = 2; + return new TermQuery(new Term(field, termStr)); + } + + @Override + protected Query getFieldQuery(String field, String queryText, boolean quoted) + throws ParseException { + type[0] = 3; + return super.getFieldQuery(field, queryText, quoted); + } + }; + TermQuery tq; - + tq = (TermQuery) qp.parse("foo:zoo*"); assertEquals("zoo", tq.getTerm().text()); assertEquals(2, type[0]); @@ -230,59 +232,62 @@ public class TestQueryParser extends QueryParserTestBase { assertEquals("zoo", tq.getTerm().text()); assertEquals(2, type[0]); assertEquals(bq.getBoost(), 2, 0); - + tq = (TermQuery) qp.parse("foo:*"); assertEquals("*", tq.getTerm().text()); assertEquals(1, type[0]); // could be a valid prefix query in the future too - + bq = (BoostQuery) qp.parse("foo:*^2"); tq = (TermQuery) bq.getQuery(); assertEquals("*", tq.getTerm().text()); assertEquals(1, type[0]); assertEquals(bq.getBoost(), 2, 0); - + tq = (TermQuery) qp.parse("*:foo"); assertEquals("*", tq.getTerm().field()); assertEquals("foo", tq.getTerm().text()); assertEquals(3, type[0]); - + tq = (TermQuery) qp.parse("*:*"); assertEquals("*", tq.getTerm().field()); assertEquals("*", tq.getTerm().text()); assertEquals(1, type[0]); // could be handled as a prefix query in the - // future - + // future + tq = (TermQuery) qp.parse("(*:*)"); assertEquals("*", tq.getTerm().field()); assertEquals("*", tq.getTerm().text()); assertEquals(1, type[0]); - } - + // Wildcard queries should not be allowed public void testCustomQueryParserWildcard() { - expectThrows(ParseException.class, () -> { - new QPTestParser("contents", new MockAnalyzer(random(), - MockTokenizer.WHITESPACE, false)).parse("a?t"); - }); + expectThrows( + ParseException.class, + () -> { + new QPTestParser("contents", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) + .parse("a?t"); + }); } - + // Fuzzy queries should not be allowed public void testCustomQueryParserFuzzy() throws Exception { - expectThrows(ParseException.class, () -> { - new QPTestParser("contents", new MockAnalyzer(random(), - MockTokenizer.WHITESPACE, false)).parse("xunit~"); - }); + expectThrows( + ParseException.class, + () -> { + new QPTestParser("contents", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) + .parse("xunit~"); + }); } - + /** query parser that doesn't expand synonyms when users use double quotes */ private class SmartQueryParser extends QueryParser { Analyzer morePrecise = new Analyzer2(); - + public SmartQueryParser() { super(FIELD, new Analyzer1()); } - + @Override protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { @@ -290,36 +295,35 @@ public class TestQueryParser extends QueryParserTestBase { else return super.getFieldQuery(field, queryText, quoted); } } - + @Override public void testNewFieldQuery() throws Exception { /** ordinary behavior, synonyms form uncoordinated boolean query */ - QueryParser dumb = new QueryParser(FIELD, - new Analyzer1()); - Query expanded = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "dogs")) - .addTerm(new Term(FIELD, "dog")) - .build(); + QueryParser dumb = new QueryParser(FIELD, new Analyzer1()); + Query expanded = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "dogs")) + .addTerm(new Term(FIELD, "dog")) + .build(); assertEquals(expanded, dumb.parse("\"dogs\"")); /** even with the phrase operator the behavior is the same */ assertEquals(expanded, dumb.parse("dogs")); - - /** - * custom behavior, the synonyms are expanded, unless you use quote operator - */ + + /** custom behavior, the synonyms are expanded, unless you use quote operator */ QueryParser smart = new SmartQueryParser(); assertEquals(expanded, smart.parse("dogs")); - + Query unexpanded = new TermQuery(new Term(FIELD, "dogs")); assertEquals(unexpanded, smart.parse("\"dogs\"")); } /** simple synonyms test */ public void testSynonyms() throws Exception { - Query expected = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "dogs")) - .addTerm(new Term(FIELD, "dog")) - .build(); + Query expected = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "dogs")) + .addTerm(new Term(FIELD, "dog")) + .build(); QueryParser qp = new QueryParser(FIELD, new MockSynonymAnalyzer()); assertEquals(expected, qp.parse("dogs")); assertEquals(expected, qp.parse("\"dogs\"")); @@ -330,12 +334,12 @@ public class TestQueryParser extends QueryParserTestBase { assertEquals(expected, qp.parse("dogs^2")); assertEquals(expected, qp.parse("\"dogs\"^2")); } - + /** forms multiphrase query */ public void testSynonymsPhrase() throws Exception { MultiPhraseQuery.Builder expectedQBuilder = new MultiPhraseQuery.Builder(); expectedQBuilder.add(new Term(FIELD, "old")); - expectedQBuilder.add(new Term[] { new Term(FIELD, "dogs"), new Term(FIELD, "dog") }); + expectedQBuilder.add(new Term[] {new Term(FIELD, "dogs"), new Term(FIELD, "dog")}); QueryParser qp = new QueryParser(FIELD, new MockSynonymAnalyzer()); assertEquals(expectedQBuilder.build(), qp.parse("\"old dogs\"")); qp.setDefaultOperator(Operator.AND); @@ -346,15 +350,13 @@ public class TestQueryParser extends QueryParserTestBase { expected = new BoostQuery(expectedQBuilder.build(), 2f); assertEquals(expected, qp.parse("\"old dogs\"~3^2")); } - - /** - * adds synonym of "國" for "国". - */ + + /** adds synonym of "國" for "国". */ protected static class MockCJKSynonymFilter extends TokenFilter { CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); boolean addSynonym = false; - + public MockCJKSynonymFilter(TokenStream input) { super(input); } @@ -368,16 +370,16 @@ public class TestQueryParser extends QueryParserTestBase { addSynonym = false; return true; } - + if (input.incrementToken()) { addSynonym = termAtt.toString().equals("国"); return true; } else { return false; } - } + } } - + static class MockCJKSynonymAnalyzer extends Analyzer { @Override protected TokenStreamComponents createComponents(String fieldName) { @@ -385,13 +387,14 @@ public class TestQueryParser extends QueryParserTestBase { return new TokenStreamComponents(tokenizer, new MockCJKSynonymFilter(tokenizer)); } } - + /** simple CJK synonym test */ public void testCJKSynonym() throws Exception { - Query expected = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "国")) - .addTerm(new Term(FIELD, "國")) - .build(); + Query expected = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "国")) + .addTerm(new Term(FIELD, "國")) + .build(); QueryParser qp = new QueryParser(FIELD, new MockCJKSynonymAnalyzer()); assertEquals(expected, qp.parse("国")); qp.setDefaultOperator(Operator.AND); @@ -399,15 +402,16 @@ public class TestQueryParser extends QueryParserTestBase { expected = new BoostQuery(expected, 2f); assertEquals(expected, qp.parse("国^2")); } - + /** synonyms with default OR operator */ public void testCJKSynonymsOR() throws Exception { BooleanQuery.Builder expectedB = new BooleanQuery.Builder(); expectedB.add(new TermQuery(new Term(FIELD, "中")), BooleanClause.Occur.SHOULD); - Query inner = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "国")) - .addTerm(new Term(FIELD, "國")) - .build(); + Query inner = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "国")) + .addTerm(new Term(FIELD, "國")) + .build(); expectedB.add(inner, BooleanClause.Occur.SHOULD); Query expected = expectedB.build(); QueryParser qp = new QueryParser(FIELD, new MockCJKSynonymAnalyzer()); @@ -415,20 +419,22 @@ public class TestQueryParser extends QueryParserTestBase { expected = new BoostQuery(expected, 2f); assertEquals(expected, qp.parse("中国^2")); } - + /** more complex synonyms with default OR operator */ public void testCJKSynonymsOR2() throws Exception { BooleanQuery.Builder expectedB = new BooleanQuery.Builder(); expectedB.add(new TermQuery(new Term(FIELD, "中")), BooleanClause.Occur.SHOULD); - SynonymQuery inner = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "国")) - .addTerm(new Term(FIELD, "國")) - .build(); + SynonymQuery inner = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "国")) + .addTerm(new Term(FIELD, "國")) + .build(); expectedB.add(inner, BooleanClause.Occur.SHOULD); - SynonymQuery inner2 = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "国")) - .addTerm(new Term(FIELD, "國")) - .build(); + SynonymQuery inner2 = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "国")) + .addTerm(new Term(FIELD, "國")) + .build(); expectedB.add(inner2, BooleanClause.Occur.SHOULD); Query expected = expectedB.build(); QueryParser qp = new QueryParser(FIELD, new MockCJKSynonymAnalyzer()); @@ -436,15 +442,16 @@ public class TestQueryParser extends QueryParserTestBase { expected = new BoostQuery(expected, 2f); assertEquals(expected, qp.parse("中国国^2")); } - + /** synonyms with default AND operator */ public void testCJKSynonymsAND() throws Exception { BooleanQuery.Builder expectedB = new BooleanQuery.Builder(); expectedB.add(new TermQuery(new Term(FIELD, "中")), BooleanClause.Occur.MUST); - Query inner = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "国")) - .addTerm(new Term(FIELD, "國")) - .build(); + Query inner = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "国")) + .addTerm(new Term(FIELD, "國")) + .build(); expectedB.add(inner, BooleanClause.Occur.MUST); Query expected = expectedB.build(); QueryParser qp = new QueryParser(FIELD, new MockCJKSynonymAnalyzer()); @@ -453,20 +460,22 @@ public class TestQueryParser extends QueryParserTestBase { expected = new BoostQuery(expected, 2f); assertEquals(expected, qp.parse("中国^2")); } - + /** more complex synonyms with default AND operator */ public void testCJKSynonymsAND2() throws Exception { BooleanQuery.Builder expectedB = new BooleanQuery.Builder(); expectedB.add(new TermQuery(new Term(FIELD, "中")), BooleanClause.Occur.MUST); - Query inner = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "国")) - .addTerm(new Term(FIELD, "國")) - .build(); + Query inner = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "国")) + .addTerm(new Term(FIELD, "國")) + .build(); expectedB.add(inner, BooleanClause.Occur.MUST); - Query inner2 = new SynonymQuery.Builder(FIELD) - .addTerm(new Term(FIELD, "国")) - .addTerm(new Term(FIELD, "國")) - .build(); + Query inner2 = + new SynonymQuery.Builder(FIELD) + .addTerm(new Term(FIELD, "国")) + .addTerm(new Term(FIELD, "國")) + .build(); expectedB.add(inner2, BooleanClause.Occur.MUST); Query expected = expectedB.build(); QueryParser qp = new QueryParser(FIELD, new MockCJKSynonymAnalyzer()); @@ -475,12 +484,12 @@ public class TestQueryParser extends QueryParserTestBase { expected = new BoostQuery(expected, 2f); assertEquals(expected, qp.parse("中国国^2")); } - + /** forms multiphrase query */ public void testCJKSynonymsPhrase() throws Exception { MultiPhraseQuery.Builder expectedQBuilder = new MultiPhraseQuery.Builder(); expectedQBuilder.add(new Term(FIELD, "中")); - expectedQBuilder.add(new Term[] { new Term(FIELD, "国"), new Term(FIELD, "國")}); + expectedQBuilder.add(new Term[] {new Term(FIELD, "国"), new Term(FIELD, "國")}); QueryParser qp = new QueryParser(FIELD, new MockCJKSynonymAnalyzer()); qp.setDefaultOperator(Operator.AND); assertEquals(expectedQBuilder.build(), qp.parse("\"中国\"")); @@ -495,12 +504,15 @@ public class TestQueryParser extends QueryParserTestBase { public void testWildcardMaxDeterminizedStates() throws Exception { QueryParser qp = new QueryParser(FIELD, new MockAnalyzer(random())); qp.setMaxDeterminizedStates(10); - expectThrows(TooComplexToDeterminizeException.class, () -> { - qp.parse("a*aaaaaaa"); - }); + expectThrows( + TooComplexToDeterminizeException.class, + () -> { + qp.parse("a*aaaaaaa"); + }); } - // TODO: Remove this specialization once the flexible standard parser gets multi-word synonym support + // TODO: Remove this specialization once the flexible standard parser gets multi-word synonym + // support @Override public void testQPA() throws Exception { boolean oldSplitOnWhitespace = splitOnWhitespace; @@ -524,60 +536,75 @@ public class TestQueryParser extends QueryParserTestBase { TermQuery pig = new TermQuery(new Term("field", "pig")); TermQuery cavy = new TermQuery(new Term("field", "cavy")); - // A multi-word synonym source will form a graph query for synonyms that formed the graph token stream + // A multi-word synonym source will form a graph query for synonyms that formed the graph token + // stream BooleanQuery.Builder synonym = new BooleanQuery.Builder(); synonym.add(guinea, BooleanClause.Occur.MUST); synonym.add(pig, BooleanClause.Occur.MUST); BooleanQuery guineaPig = synonym.build(); - PhraseQuery phraseGuineaPig = new PhraseQuery.Builder() - .add(new Term("field", "guinea")) - .add(new Term("field", "pig")) - .build(); + PhraseQuery phraseGuineaPig = + new PhraseQuery.Builder() + .add(new Term("field", "guinea")) + .add(new Term("field", "pig")) + .build(); - BooleanQuery graphQuery = new BooleanQuery.Builder() - .add(new BooleanQuery.Builder() - .add(guineaPig, BooleanClause.Occur.SHOULD) - .add(cavy, BooleanClause.Occur.SHOULD) - .build(), BooleanClause.Occur.SHOULD) - .build(); + BooleanQuery graphQuery = + new BooleanQuery.Builder() + .add( + new BooleanQuery.Builder() + .add(guineaPig, BooleanClause.Occur.SHOULD) + .add(cavy, BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD) + .build(); assertEquals(graphQuery, dumb.parse("guinea pig")); - Query synonyms = new BooleanQuery.Builder() - .add(new PhraseQuery("field", "guinea", "pig"), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field", "cavy")), BooleanClause.Occur.SHOULD) - .build(); + Query synonyms = + new BooleanQuery.Builder() + .add(new PhraseQuery("field", "guinea", "pig"), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field", "cavy")), BooleanClause.Occur.SHOULD) + .build(); assertEquals(synonyms, dumb.parse("\"guinea pig\"")); // custom behavior, the synonyms are expanded, unless you use quote operator QueryParser smart = new SmartQueryParser(); smart.setSplitOnWhitespace(false); - graphQuery = new BooleanQuery.Builder() - .add(new BooleanQuery.Builder() - .add(guineaPig, BooleanClause.Occur.SHOULD) - .add(cavy, BooleanClause.Occur.SHOULD) - .build(), BooleanClause.Occur.SHOULD) - .build(); + graphQuery = + new BooleanQuery.Builder() + .add( + new BooleanQuery.Builder() + .add(guineaPig, BooleanClause.Occur.SHOULD) + .add(cavy, BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD) + .build(); assertEquals(graphQuery, smart.parse("guinea pig")); assertEquals(phraseGuineaPig, smart.parse("\"guinea pig\"")); // with the AND operator dumb.setDefaultOperator(Operator.AND); - BooleanQuery graphAndQuery = new BooleanQuery.Builder() - .add(new BooleanQuery.Builder() - .add(guineaPig, BooleanClause.Occur.SHOULD) - .add(cavy, BooleanClause.Occur.SHOULD) - .build(), BooleanClause.Occur.MUST) - .build(); + BooleanQuery graphAndQuery = + new BooleanQuery.Builder() + .add( + new BooleanQuery.Builder() + .add(guineaPig, BooleanClause.Occur.SHOULD) + .add(cavy, BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.MUST) + .build(); assertEquals(graphAndQuery, dumb.parse("guinea pig")); - graphAndQuery = new BooleanQuery.Builder() - .add(new BooleanQuery.Builder() - .add(guineaPig, BooleanClause.Occur.SHOULD) - .add(cavy, BooleanClause.Occur.SHOULD) - .build(), BooleanClause.Occur.MUST) - .add(cavy, BooleanClause.Occur.MUST) - .build(); + graphAndQuery = + new BooleanQuery.Builder() + .add( + new BooleanQuery.Builder() + .add(guineaPig, BooleanClause.Occur.SHOULD) + .add(cavy, BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.MUST) + .add(cavy, BooleanClause.Occur.MUST) + .build(); assertEquals(graphAndQuery, dumb.parse("guinea pig cavy")); } @@ -585,14 +612,15 @@ public class TestQueryParser extends QueryParserTestBase { QueryParser dumb = new QueryParser("field", new Analyzer1()); dumb.setSplitOnWhitespace(false); dumb.setEnableGraphQueries(false); - + TermQuery pig = new TermQuery(new Term("field", "pig")); // A multi-word synonym source will just form a boolean query when graph queries are disabled: - Query inner = new SynonymQuery.Builder("field") - .addTerm(new Term("field", "cavy")) - .addTerm(new Term("field", "guinea")) - .build(); + Query inner = + new SynonymQuery.Builder("field") + .addTerm(new Term("field", "cavy")) + .addTerm(new Term("field", "guinea")) + .build(); BooleanQuery.Builder b = new BooleanQuery.Builder(); b.add(inner, BooleanClause.Occur.SHOULD); b.add(pig, BooleanClause.Occur.SHOULD); @@ -771,18 +799,22 @@ public class TestQueryParser extends QueryParserTestBase { TermQuery pig = new TermQuery(new Term("field", "pig")); TermQuery cavy = new TermQuery(new Term("field", "cavy")); - // A multi-word synonym source will form a graph query for synonyms that formed the graph token stream + // A multi-word synonym source will form a graph query for synonyms that formed the graph token + // stream BooleanQuery.Builder synonym = new BooleanQuery.Builder(); synonym.add(guinea, BooleanClause.Occur.MUST); synonym.add(pig, BooleanClause.Occur.MUST); BooleanQuery guineaPig = synonym.build(); - BooleanQuery graphQuery = new BooleanQuery.Builder() - .add(new BooleanQuery.Builder() - .add(guineaPig, BooleanClause.Occur.SHOULD) - .add(cavy, BooleanClause.Occur.SHOULD) - .build(), BooleanClause.Occur.SHOULD) - .build(); + BooleanQuery graphQuery = + new BooleanQuery.Builder() + .add( + new BooleanQuery.Builder() + .add(guineaPig, BooleanClause.Occur.SHOULD) + .add(cavy, BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD) + .build(); assertEquals(graphQuery, parser.parse("guinea pig")); boolean oldSplitOnWhitespace = splitOnWhitespace; @@ -790,14 +822,16 @@ public class TestQueryParser extends QueryParserTestBase { assertQueryEquals("guinea pig", new MockSynonymAnalyzer(), "((+guinea +pig) cavy)"); splitOnWhitespace = oldSplitOnWhitespace; } - + public void testWildcardAlone() throws ParseException { - //seems like crazy edge case, but can be useful in concordance + // seems like crazy edge case, but can be useful in concordance QueryParser parser = new QueryParser(FIELD, new ASCIIAnalyzer()); parser.setAllowLeadingWildcard(false); - expectThrows(ParseException.class, () -> { - parser.parse("*"); - }); + expectThrows( + ParseException.class, + () -> { + parser.parse("*"); + }); QueryParser parser2 = new QueryParser("*", new ASCIIAnalyzer()); parser2.setAllowLeadingWildcard(false); @@ -837,11 +871,13 @@ public class TestQueryParser extends QueryParserTestBase { parser.setAllowLeadingWildcard(true); assertEquals("*bersetzung uber*ung", parser.parse("*bersetzung über*ung").toString(FIELD)); parser.setAllowLeadingWildcard(false); - assertEquals("motley crue motl?* cru?", parser.parse("Mötley Cr\u00fce Mötl?* Crü?").toString(FIELD)); - assertEquals("renee zellweger ren?? zellw?ger", parser.parse("Renée Zellweger Ren?? Zellw?ger").toString(FIELD)); + assertEquals( + "motley crue motl?* cru?", parser.parse("Mötley Cr\u00fce Mötl?* Crü?").toString(FIELD)); + assertEquals( + "renee zellweger ren?? zellw?ger", + parser.parse("Renée Zellweger Ren?? Zellw?ger").toString(FIELD)); } - public void testPrefixQuery() throws ParseException { Analyzer a = new ASCIIAnalyzer(); QueryParser parser = new QueryParser(FIELD, a); @@ -860,12 +896,17 @@ public class TestQueryParser extends QueryParserTestBase { public void testFuzzyQuery() throws ParseException { Analyzer a = new ASCIIAnalyzer(); QueryParser parser = new QueryParser(FIELD, a); - assertEquals("ubersetzung ubersetzung~1", parser.parse("Übersetzung Übersetzung~0.9").toString(FIELD)); - assertEquals("motley crue motley~1 crue~2", parser.parse("Mötley Crüe Mötley~0.75 Crüe~0.5").toString(FIELD)); - assertEquals("renee zellweger renee~0 zellweger~2", parser.parse("Renée Zellweger Renée~0.9 Zellweger~").toString(FIELD)); + assertEquals( + "ubersetzung ubersetzung~1", parser.parse("Übersetzung Übersetzung~0.9").toString(FIELD)); + assertEquals( + "motley crue motley~1 crue~2", + parser.parse("Mötley Crüe Mötley~0.75 Crüe~0.5").toString(FIELD)); + assertEquals( + "renee zellweger renee~0 zellweger~2", + parser.parse("Renée Zellweger Renée~0.9 Zellweger~").toString(FIELD)); } - final static class FoldingFilter extends TokenFilter { + static final class FoldingFilter extends TokenFilter { final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); public FoldingFilter(TokenStream input) { @@ -877,18 +918,18 @@ public class TestQueryParser extends QueryParserTestBase { if (input.incrementToken()) { char term[] = termAtt.buffer(); for (int i = 0; i < term.length; i++) - switch(term[i]) { + switch (term[i]) { case 'ü': - term[i] = 'u'; + term[i] = 'u'; break; - case 'ö': - term[i] = 'o'; + case 'ö': + term[i] = 'o'; break; - case 'é': - term[i] = 'e'; + case 'é': + term[i] = 'e'; break; - case 'ï': - term[i] = 'i'; + case 'ï': + term[i] = 'i'; break; } return true; @@ -898,12 +939,13 @@ public class TestQueryParser extends QueryParserTestBase { } } - final static class ASCIIAnalyzer extends Analyzer { + static final class ASCIIAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName) { Tokenizer result = new MockTokenizer(MockTokenizer.WHITESPACE, true); return new TokenStreamComponents(result, new FoldingFilter(result)); } + @Override protected TokenStream normalize(String fieldName, TokenStream in) { return new FoldingFilter(new MockLowerCaseFilter(in)); @@ -922,23 +964,27 @@ public class TestQueryParser extends QueryParserTestBase { assertTrue(isAHit(qp.parse("เ*"), s, analyzer)); assertTrue(isAHit(qp.parse("เ??"), s, analyzer)); } - + // LUCENE-7533 public void test_splitOnWhitespace_with_autoGeneratePhraseQueries() { final QueryParser qp = new QueryParser(FIELD, new MockAnalyzer(random())); - expectThrows(IllegalArgumentException.class, () -> { - qp.setSplitOnWhitespace(false); - qp.setAutoGeneratePhraseQueries(true); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + qp.setSplitOnWhitespace(false); + qp.setAutoGeneratePhraseQueries(true); + }); final QueryParser qp2 = new QueryParser(FIELD, new MockAnalyzer(random())); - expectThrows(IllegalArgumentException.class, () -> { - qp2.setSplitOnWhitespace(true); - qp2.setAutoGeneratePhraseQueries(true); - qp2.setSplitOnWhitespace(false); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + qp2.setSplitOnWhitespace(true); + qp2.setAutoGeneratePhraseQueries(true); + qp2.setSplitOnWhitespace(false); + }); } - - private boolean isAHit(Query q, String content, Analyzer analyzer) throws IOException{ + + private boolean isAHit(Query q, String content, Analyzer analyzer) throws IOException { Directory ramDir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), ramDir, analyzer); Document doc = new Document(); @@ -952,15 +998,14 @@ public class TestQueryParser extends QueryParserTestBase { writer.close(); DirectoryReader ir = DirectoryReader.open(ramDir); IndexSearcher is = new IndexSearcher(ir); - + long hits = is.count(q); ir.close(); ramDir.close(); - if (hits == 1){ + if (hits == 1) { return true; } else { return false; } - } } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java index 68b3e395c68..1f3904ade50 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.complexPhrase; import java.util.HashSet; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockSynonymAnalyzer; @@ -37,14 +36,14 @@ public class TestComplexPhraseQuery extends LuceneTestCase { Directory rd; Analyzer analyzer; DocData docsContent[] = { - new DocData("john smith", "1", "developer"), - new DocData("johathon smith", "2", "developer"), - new DocData("john percival smith", "3", "designer"), - new DocData("jackson waits tom", "4", "project manager"), - new DocData("johny perkins", "5", "orders pizza"), - new DocData("hapax neverson", "6", "never matches"), - new DocData("dog cigar", "7", "just for synonyms"), - new DocData("dogs don't smoke cigarettes", "8", "just for synonyms"), + new DocData("john smith", "1", "developer"), + new DocData("johathon smith", "2", "developer"), + new DocData("john percival smith", "3", "designer"), + new DocData("jackson waits tom", "4", "project manager"), + new DocData("johny perkins", "5", "orders pizza"), + new DocData("hapax neverson", "6", "never matches"), + new DocData("dog cigar", "7", "just for synonyms"), + new DocData("dogs don't smoke cigarettes", "8", "just for synonyms"), }; private IndexSearcher searcher; @@ -80,30 +79,29 @@ public class TestComplexPhraseQuery extends LuceneTestCase { } public void testSingleTermPhrase() throws Exception { - checkMatches("\"joh*\"","1,2,3,5"); - checkMatches("\"joh~\"","1,3,5"); - checkMatches("\"joh*\" \"tom\"", "1,2,3,4,5"); - checkMatches("+\"j*\" +\"tom\"", "4"); + checkMatches("\"joh*\"", "1,2,3,5"); + checkMatches("\"joh~\"", "1,3,5"); + checkMatches("\"joh*\" \"tom\"", "1,2,3,4,5"); + checkMatches("+\"j*\" +\"tom\"", "4"); checkMatches("\"jo*\" \"[sma TO smZ]\" ", "1,2,3,5,8"); - checkMatches("+\"j*hn\" +\"sm*h\"", "1,3"); + checkMatches("+\"j*hn\" +\"sm*h\"", "1,3"); } public void testSynonyms() throws Exception { - checkMatches("\"dogs\"","8"); + checkMatches("\"dogs\"", "8"); MockSynonymAnalyzer synonym = new MockSynonymAnalyzer(); - checkMatches("\"dogs\"","7,8",synonym); - // synonym is unidirectional - checkMatches("\"dog\"","7",synonym); - checkMatches("\"dogs cigar*\"",""); - checkMatches("\"dog cigar*\"","7"); - checkMatches("\"dogs cigar*\"","7", synonym); - checkMatches("\"dog cigar*\"","7", synonym); - checkMatches("\"dogs cigar*\"~2","7,8", synonym); + checkMatches("\"dogs\"", "7,8", synonym); // synonym is unidirectional - checkMatches("\"dog cigar*\"~2","7", synonym); - + checkMatches("\"dog\"", "7", synonym); + checkMatches("\"dogs cigar*\"", ""); + checkMatches("\"dog cigar*\"", "7"); + checkMatches("\"dogs cigar*\"", "7", synonym); + checkMatches("\"dog cigar*\"", "7", synonym); + checkMatches("\"dogs cigar*\"~2", "7,8", synonym); + // synonym is unidirectional + checkMatches("\"dog cigar*\"~2", "7", synonym); } - + public void testUnOrderedProximitySearches() throws Exception { inOrder = true; @@ -111,19 +109,19 @@ public class TestComplexPhraseQuery extends LuceneTestCase { inOrder = false; checkMatches("\"smith jo*\"~2", "1,2,3"); // un-ordered proximity - } private void checkBadQuery(String qString) { ComplexPhraseQueryParser qp = new ComplexPhraseQueryParser(defaultFieldName, analyzer); qp.setInOrder(inOrder); - expectThrows(Throwable.class, () -> { - qp.parse(qString); - }); + expectThrows( + Throwable.class, + () -> { + qp.parse(qString); + }); } - private void checkMatches(String qString, String expectedVals) - throws Exception { + private void checkMatches(String qString, String expectedVals) throws Exception { checkMatches(qString, expectedVals, analyzer); } @@ -138,8 +136,7 @@ public class TestComplexPhraseQuery extends LuceneTestCase { HashSet expecteds = new HashSet<>(); String[] vals = expectedVals.split(","); for (int i = 0; i < vals.length; i++) { - if (vals[i].length() > 0) - expecteds.add(vals[i]); + if (vals[i].length() > 0) expecteds.add(vals[i]); } TopDocs td = searcher.search(q, 10); @@ -147,15 +144,13 @@ public class TestComplexPhraseQuery extends LuceneTestCase { for (int i = 0; i < sd.length; i++) { Document doc = searcher.doc(sd[i].doc); String id = doc.get("id"); - assertTrue(qString + "matched doc#" + id + " not expected", expecteds - .contains(id)); + assertTrue(qString + "matched doc#" + id + " not expected", expecteds.contains(id)); expecteds.remove(id); } assertEquals(qString + " missing some matches ", 0, expecteds.size()); - } - + public void testFieldedQuery() throws Exception { checkMatches("name:\"john smith\"", "1"); checkMatches("name:\"j* smyth~\"", "1,2"); @@ -200,18 +195,20 @@ public class TestComplexPhraseQuery extends LuceneTestCase { q2 = qp.parse(qString); - // although the general contract of hashCode can't guarantee different values, if we only change one thing - // about a single query, it normally should result in a different value (and will with the current + // although the general contract of hashCode can't guarantee different values, if we only change + // one thing + // about a single query, it normally should result in a different value (and will with the + // current // implementation in ComplexPhraseQuery) assertTrue(q.hashCode() != q2.hashCode()); assertTrue(!q.equals(q2)); assertTrue(!q2.equals(q)); } - + @Override public void setUp() throws Exception { super.setUp(); - + analyzer = new MockAnalyzer(random()); rd = newDirectory(); IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(analyzer)); @@ -238,7 +235,7 @@ public class TestComplexPhraseQuery extends LuceneTestCase { String name; String id; - + String role; public DocData(String name, String id, String role) { @@ -248,5 +245,4 @@ public class TestComplexPhraseQuery extends LuceneTestCase { this.role = role; } } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/ExtensionStub.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/ExtensionStub.java index 2897b0fc729..3a0b80a6b37 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/ExtensionStub.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/ExtensionStub.java @@ -25,8 +25,6 @@ class ExtensionStub extends ParserExtension { @Override public Query parse(ExtensionQuery components) throws ParseException { - return new TermQuery(new Term(components.getField(), components - .getRawQueryString())); + return new TermQuery(new Term(components.getField(), components.getRawQueryString())); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java index 934a4dac254..bb131bc9a30 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.ext; import java.util.Locale; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -30,25 +29,22 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -/** - * Testcase for the class {@link ExtendableQueryParser} - */ +/** Testcase for the class {@link ExtendableQueryParser} */ public class TestExtendableQueryParser extends TestQueryParser { - private static char[] DELIMITERS = new char[] { - Extensions.DEFAULT_EXTENSION_FIELD_DELIMITER, '-', '|' }; + private static char[] DELIMITERS = + new char[] {Extensions.DEFAULT_EXTENSION_FIELD_DELIMITER, '-', '|'}; @Override public QueryParser getParser(Analyzer a) throws Exception { return getParser(a, null); } - public QueryParser getParser(Analyzer a, Extensions extensions) - throws Exception { - if (a == null) - a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); - QueryParser qp = extensions == null ? new ExtendableQueryParser( - getDefaultField(), a) : new ExtendableQueryParser( - getDefaultField(), a, extensions); + public QueryParser getParser(Analyzer a, Extensions extensions) throws Exception { + if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); + QueryParser qp = + extensions == null + ? new ExtendableQueryParser(getDefaultField(), a) + : new ExtendableQueryParser(getDefaultField(), a, extensions); qp.setDefaultOperator(QueryParserBase.OR_OPERATOR); qp.setSplitOnWhitespace(splitOnWhitespace); return qp; @@ -58,37 +54,38 @@ public class TestExtendableQueryParser extends TestQueryParser { Extensions ext = newExtensions(':'); ext.add("testExt", new ExtensionStub()); ExtendableQueryParser parser = (ExtendableQueryParser) getParser(null, ext); - expectThrows(ParseException.class, () -> { - parser.parse("aField:testExt:\"foo \\& bar\""); - }); + expectThrows( + ParseException.class, + () -> { + parser.parse("aField:testExt:\"foo \\& bar\""); + }); } public void testExtFieldUnqoted() throws Exception { for (int i = 0; i < DELIMITERS.length; i++) { Extensions ext = newExtensions(DELIMITERS[i]); ext.add("testExt", new ExtensionStub()); - ExtendableQueryParser parser = (ExtendableQueryParser) getParser(null, - ext); + ExtendableQueryParser parser = (ExtendableQueryParser) getParser(null, ext); String field = ext.buildExtensionField("testExt", "aField"); Query query = parser.parse(String.format(Locale.ROOT, "%s:foo bar", field)); - assertTrue("expected instance of BooleanQuery but was " - + query.getClass(), query instanceof BooleanQuery); + assertTrue( + "expected instance of BooleanQuery but was " + query.getClass(), + query instanceof BooleanQuery); BooleanQuery bquery = (BooleanQuery) query; BooleanClause[] clauses = bquery.clauses().toArray(new BooleanClause[0]); assertEquals(2, clauses.length); BooleanClause booleanClause = clauses[0]; query = booleanClause.getQuery(); - assertTrue("expected instance of TermQuery but was " + query.getClass(), - query instanceof TermQuery); + assertTrue( + "expected instance of TermQuery but was " + query.getClass(), query instanceof TermQuery); TermQuery tquery = (TermQuery) query; - assertEquals("aField", tquery.getTerm() - .field()); + assertEquals("aField", tquery.getTerm().field()); assertEquals("foo", tquery.getTerm().text()); booleanClause = clauses[1]; query = booleanClause.getQuery(); - assertTrue("expected instance of TermQuery but was " + query.getClass(), - query instanceof TermQuery); + assertTrue( + "expected instance of TermQuery but was " + query.getClass(), query instanceof TermQuery); tquery = (TermQuery) query; assertEquals(getDefaultField(), tquery.getTerm().field()); assertEquals("bar", tquery.getTerm().text()); @@ -99,12 +96,11 @@ public class TestExtendableQueryParser extends TestQueryParser { for (int i = 0; i < DELIMITERS.length; i++) { Extensions ext = newExtensions(DELIMITERS[i]); ext.add("testExt", new ExtensionStub()); - ExtendableQueryParser parser = (ExtendableQueryParser) getParser(null, - ext); + ExtendableQueryParser parser = (ExtendableQueryParser) getParser(null, ext); String field = ext.buildExtensionField("testExt"); Query parse = parser.parse(String.format(Locale.ROOT, "%s:\"foo \\& bar\"", field)); - assertTrue("expected instance of TermQuery but was " + parse.getClass(), - parse instanceof TermQuery); + assertTrue( + "expected instance of TermQuery but was " + parse.getClass(), parse instanceof TermQuery); TermQuery tquery = (TermQuery) parse; assertEquals(getDefaultField(), tquery.getTerm().field()); assertEquals("foo & bar", tquery.getTerm().text()); @@ -119,16 +115,14 @@ public class TestExtendableQueryParser extends TestQueryParser { for (int i = 0; i < DELIMITERS.length; i++) { Extensions ext = newExtensions(DELIMITERS[i]); ext.add("testExt", new ExtensionStub()); - ExtendableQueryParser parser = (ExtendableQueryParser) getParser(null, - ext); + ExtendableQueryParser parser = (ExtendableQueryParser) getParser(null, ext); String field = ext.buildExtensionField("testExt", "afield"); Query parse = parser.parse(String.format(Locale.ROOT, "%s:\"foo \\& bar\"", field)); - assertTrue("expected instance of TermQuery but was " + parse.getClass(), - parse instanceof TermQuery); + assertTrue( + "expected instance of TermQuery but was " + parse.getClass(), parse instanceof TermQuery); TermQuery tquery = (TermQuery) parse; assertEquals("afield", tquery.getTerm().field()); assertEquals("foo & bar", tquery.getTerm().text()); } } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtensions.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtensions.java index 7899d501cb9..de1ed3c9117 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtensions.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtensions.java @@ -18,9 +18,7 @@ package org.apache.lucene.queryparser.ext; import org.apache.lucene.util.LuceneTestCase; -/** - * Testcase for the {@link Extensions} class - */ +/** Testcase for the {@link Extensions} class */ public class TestExtensions extends LuceneTestCase { private Extensions ext; @@ -59,18 +57,21 @@ public class TestExtensions extends LuceneTestCase { } public void testGetExtDelimiter() { - assertEquals(Extensions.DEFAULT_EXTENSION_FIELD_DELIMITER, this.ext - .getExtensionFieldDelimiter()); + assertEquals( + Extensions.DEFAULT_EXTENSION_FIELD_DELIMITER, this.ext.getExtensionFieldDelimiter()); ext = new Extensions('?'); assertEquals('?', this.ext.getExtensionFieldDelimiter()); } public void testEscapeExtension() { - assertEquals("abc\\:\\?\\{\\}\\[\\]\\\\\\(\\)\\+\\-\\!\\~", ext - .escapeExtensionField("abc:?{}[]\\()+-!~")); + assertEquals( + "abc\\:\\?\\{\\}\\[\\]\\\\\\(\\)\\+\\-\\!\\~", + ext.escapeExtensionField("abc:?{}[]\\()+-!~")); // should throw NPE - escape string is null - expectThrows(NullPointerException.class, () -> { - ext.escapeExtensionField(null); - }); + expectThrows( + NullPointerException.class, + () -> { + ext.escapeExtensionField(null); + }); } } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/builders/TestQueryTreeBuilder.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/builders/TestQueryTreeBuilder.java index b8b5610f41d..8f13c6470a4 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/builders/TestQueryTreeBuilder.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/builders/TestQueryTreeBuilder.java @@ -26,45 +26,39 @@ import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; public class TestQueryTreeBuilder extends LuceneTestCase { - + @Test public void testSetFieldBuilder() throws QueryNodeException { QueryTreeBuilder qtb = new QueryTreeBuilder(); qtb.setBuilder("field", new DummyBuilder()); Object result = qtb.build(new FieldQueryNode(new UnescapedCharSequence("field"), "foo", 0, 0)); assertEquals("OK", result); - + // LUCENE-4890 qtb = new QueryTreeBuilder(); qtb.setBuilder(DummyQueryNodeInterface.class, new DummyBuilder()); result = qtb.build(new DummyQueryNode()); assertEquals("OK", result); } - - private static interface DummyQueryNodeInterface extends QueryNode { - - } - - private static abstract class AbstractDummyQueryNode extends QueryNodeImpl implements DummyQueryNodeInterface { - - } - + + private static interface DummyQueryNodeInterface extends QueryNode {} + + private abstract static class AbstractDummyQueryNode extends QueryNodeImpl + implements DummyQueryNodeInterface {} + private static class DummyQueryNode extends AbstractDummyQueryNode { @Override public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) { return "DummyQueryNode"; } - } - + private static class DummyBuilder implements QueryBuilder { @Override public Object build(QueryNode queryNode) throws QueryNodeException { return "OK"; } - } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/nodes/TestQueryNode.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/nodes/TestQueryNode.java index b10d15f5c89..59edc6ca8fd 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/nodes/TestQueryNode.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/nodes/TestQueryNode.java @@ -18,33 +18,29 @@ package org.apache.lucene.queryparser.flexible.core.nodes; import java.util.Arrays; import java.util.Collections; - import org.apache.lucene.util.LuceneTestCase; public class TestQueryNode extends LuceneTestCase { - + /* LUCENE-2227 bug in QueryNodeImpl.add() */ public void testAddChildren() throws Exception { QueryNode nodeA = new FieldQueryNode("foo", "A", 0, 1); QueryNode nodeB = new FieldQueryNode("foo", "B", 1, 2); - BooleanQueryNode bq = new BooleanQueryNode( - Arrays.asList(nodeA)); + BooleanQueryNode bq = new BooleanQueryNode(Arrays.asList(nodeA)); bq.add(Arrays.asList(nodeB)); assertEquals(2, bq.getChildren().size()); } - + /* LUCENE-3045 bug in QueryNodeImpl.containsTag(String key)*/ public void testTags() throws Exception { QueryNode node = new FieldQueryNode("foo", "A", 0, 1); - + node.setTag("TaG", new Object()); assertTrue(node.getTagMap().size() > 0); assertTrue(node.containsTag("tAg")); assertTrue(node.getTag("tAg") != null); - } - /* LUCENE-5099 - QueryNodeProcessorImpl should set parent to null before returning on processing */ public void testRemoveFromParent() throws Exception { BooleanQueryNode booleanNode = new BooleanQueryNode(Collections.emptyList()); @@ -66,7 +62,7 @@ public class TestQueryNode extends LuceneTestCase { assertNull(fieldNode.getParent()); } - public void testRemoveChildren() throws Exception{ + public void testRemoveChildren() throws Exception { BooleanQueryNode booleanNode = new BooleanQueryNode(Collections.emptyList()); FieldQueryNode fieldNode = new FieldQueryNode("foo", "A", 0, 1); @@ -74,8 +70,7 @@ public class TestQueryNode extends LuceneTestCase { assertTrue(booleanNode.getChildren().size() == 1); booleanNode.removeChildren(fieldNode); - assertTrue(booleanNode.getChildren().size()==0); + assertTrue(booleanNode.getChildren().size() == 0); assertNull(fieldNode.getParent()); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/messages/TestNLS.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/messages/TestNLS.java index 611e917ea02..4cf2256291c 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/messages/TestNLS.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/messages/TestNLS.java @@ -17,16 +17,13 @@ package org.apache.lucene.queryparser.flexible.messages; import java.util.Locale; - import org.apache.lucene.util.LuceneTestCase; -/** - */ +/** */ public class TestNLS extends LuceneTestCase { public void testMessageLoading() { - Message invalidSyntax = new MessageImpl( - MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX"); - /* + Message invalidSyntax = new MessageImpl(MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX"); + /* * if the default locale is ja, you get ja as a fallback: * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader) */ @@ -35,71 +32,70 @@ public class TestNLS extends LuceneTestCase { } public void testMessageLoading_ja() { - Message invalidSyntax = new MessageImpl( - MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX"); - assertEquals("構文エラー: XXX", invalidSyntax - .getLocalizedMessage(Locale.JAPANESE)); + Message invalidSyntax = new MessageImpl(MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX"); + assertEquals("構文エラー: XXX", invalidSyntax.getLocalizedMessage(Locale.JAPANESE)); } public void testNLSLoading() { - String message = NLS - .getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, Locale.ENGLISH); - /* + String message = + NLS.getLocalizedMessage( + MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, Locale.ENGLISH); + /* * if the default locale is ja, you get ja as a fallback: * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader) */ if (!Locale.getDefault().getLanguage().equals("ja")) assertEquals("Truncated unicode escape sequence.", message); - message = NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX, Locale.ENGLISH, - "XXX"); - /* + message = + NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX, Locale.ENGLISH, "XXX"); + /* * if the default locale is ja, you get ja as a fallback: * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader) */ - if (!Locale.getDefault().getLanguage().equals("ja")) - assertEquals("Syntax Error: XXX", message); + if (!Locale.getDefault().getLanguage().equals("ja")) assertEquals("Syntax Error: XXX", message); } public void testNLSLoading_ja() { - String message = NLS.getLocalizedMessage( - MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, - Locale.JAPANESE); + String message = + NLS.getLocalizedMessage( + MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, Locale.JAPANESE); assertEquals("切り捨てられたユニコード・エスケープ・シーケンス。", message); - message = NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX, - Locale.JAPANESE, "XXX"); + message = + NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX, Locale.JAPANESE, "XXX"); assertEquals("構文エラー: XXX", message); } public void testNLSLoading_xx_XX() { Locale locale = new Locale("xx", "XX", ""); - String message = NLS.getLocalizedMessage( - MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, - locale); - /* + String message = + NLS.getLocalizedMessage( + MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, locale); + /* * if the default locale is ja, you get ja as a fallback: * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader) */ if (!Locale.getDefault().getLanguage().equals("ja")) assertEquals("Truncated unicode escape sequence.", message); - message = NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX, - locale, "XXX"); - /* + message = NLS.getLocalizedMessage(MessagesTestBundle.Q0001E_INVALID_SYNTAX, locale, "XXX"); + /* * if the default locale is ja, you get ja as a fallback: * see ResourceBundle.html#getBundle(java.lang.String, java.util.Locale, java.lang.ClassLoader) */ - if (!Locale.getDefault().getLanguage().equals("ja")) - assertEquals("Syntax Error: XXX", message); + if (!Locale.getDefault().getLanguage().equals("ja")) assertEquals("Syntax Error: XXX", message); } public void testMissingMessage() { Locale locale = Locale.ENGLISH; - String message = NLS.getLocalizedMessage( - MessagesTestBundle.Q0005E_MESSAGE_NOT_IN_BUNDLE, locale); + String message = + NLS.getLocalizedMessage(MessagesTestBundle.Q0005E_MESSAGE_NOT_IN_BUNDLE, locale); - assertEquals("Message with key:Q0005E_MESSAGE_NOT_IN_BUNDLE and locale: " - + locale.toLanguageTag() + " not found.", message); + assertEquals( + "Message with key:Q0005E_MESSAGE_NOT_IN_BUNDLE and locale: " + + locale.toLanguageTag() + + " not found.", + message); } } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java index f7b9bbc4818..7c5c1238847 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.TimeZone; - import org.apache.lucene.analysis.*; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; @@ -53,18 +52,15 @@ import org.junit.AfterClass; import org.junit.BeforeClass; /** - *

    * This test case tests {@link PrecedenceQueryParser}. - *

    - *

    - * It contains all tests from {@link QueryParserTestBase} - * with some adjusted to fit the precedence requirement, plus some precedence test cases. - *

    - * + * + *

    It contains all tests from {@link QueryParserTestBase} with some adjusted to fit the + * precedence requirement, plus some precedence test cases. + * * @see QueryParserTestBase */ -//TODO: refactor this to actually extend that class (QueryParserTestBase), overriding the tests -//that it adjusts to fit the precedence requirement, adding its extra tests. +// TODO: refactor this to actually extend that class (QueryParserTestBase), overriding the tests +// that it adjusts to fit the precedence requirement, adding its extra tests. public class TestPrecedenceQueryParser extends LuceneTestCase { public static Analyzer qpAnalyzer; @@ -81,8 +77,8 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public static final class QPTestFilter extends TokenFilter { /** - * Filter which discards the token 'stop' and which expands the token - * 'phrase' into 'phrase1 phrase2' + * Filter which discards the token 'stop' and which expands the token 'phrase' into 'phrase1 + * phrase2' */ public QPTestFilter(TokenStream in) { super(in); @@ -113,8 +109,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { termAtt.setEmpty().append("phrase1"); offsetAtt.setOffset(savedStart, savedEnd); return true; - } else if (!termAtt.toString().equals("stop")) - return true; + } else if (!termAtt.toString().equals("stop")) return true; return false; } @@ -132,7 +127,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { /** Filters MockTokenizer with StopFilter. */ @Override public final TokenStreamComponents createComponents(String fieldName) { - Tokenizer tokenizer = new MockTokenizer( MockTokenizer.SIMPLE, true); + Tokenizer tokenizer = new MockTokenizer(MockTokenizer.SIMPLE, true); return new TokenStreamComponents(tokenizer, new QPTestFilter(tokenizer)); } } @@ -146,8 +141,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { } public PrecedenceQueryParser getParser(Analyzer a) throws Exception { - if (a == null) - a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); + if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); PrecedenceQueryParser qp = new PrecedenceQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.OR); @@ -158,13 +152,11 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { return getParser(a).parse(query, "field"); } - public void assertQueryEquals(String query, Analyzer a, String result) - throws Exception { + public void assertQueryEquals(String query, Analyzer a, String result) throws Exception { Query q = getQuery(query, a); String s = q.toString("field"); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result - + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } @@ -187,27 +179,23 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { Query q = qp.parse(query, "field"); String s = q.toString("field"); if (!s.equals(result)) { - fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /" - + result + "/"); + fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } public Query getQueryDOA(String query, Analyzer a) throws Exception { - if (a == null) - a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); + if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); PrecedenceQueryParser qp = new PrecedenceQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); return qp.parse(query, "field"); } - public void assertQueryEqualsDOA(String query, Analyzer a, String result) - throws Exception { + public void assertQueryEqualsDOA(String query, Analyzer a, String result) throws Exception { Query q = getQueryDOA(query, a); String s = q.toString("field"); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result - + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } @@ -229,10 +217,8 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { assertQueryEquals("a || b", null, "a b"); assertQueryEquals("+term -term term", null, "+term -term term"); - assertQueryEquals("foo:term AND field:anotherTerm", null, - "+foo:term +anotherterm"); - assertQueryEquals("term AND \"phrase phrase\"", null, - "+term +\"phrase phrase\""); + assertQueryEquals("foo:term AND field:anotherTerm", null, "+foo:term +anotherterm"); + assertQueryEquals("term AND \"phrase phrase\"", null, "+term +\"phrase phrase\""); assertQueryEquals("\"hello there\"", null, "\"hello there\""); assertTrue(getQuery("a AND b", null) instanceof BooleanQuery); assertTrue(getQuery("hello", null) instanceof TermQuery); @@ -246,12 +232,13 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { assertQueryEquals("\"germ term\"^2.0", null, "(\"germ term\")^2.0"); assertQueryEquals("\"term germ\"^2", null, "(\"term germ\")^2.0"); - assertQueryEquals("(foo OR bar) AND (baz OR boo)", null, - "+(foo bar) +(baz boo)"); + assertQueryEquals("(foo OR bar) AND (baz OR boo)", null, "+(foo bar) +(baz boo)"); assertQueryEquals("((a OR b) AND NOT c) OR d", null, "(+(a b) -c) d"); - assertQueryEquals("+(apple \"steve jobs\") -(foo bar baz)", null, - "+(apple \"steve jobs\") -(foo bar baz)"); - assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, + assertQueryEquals( + "+(apple \"steve jobs\") -(foo bar baz)", null, "+(apple \"steve jobs\") -(foo bar baz)"); + assertQueryEquals( + "+title:(dog OR cat) -author:\"bob dole\"", + null, "+(title:dog title:cat) -author:\"bob dole\""); PrecedenceQueryParser qp = new PrecedenceQueryParser(); @@ -315,9 +302,11 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { fq = (FuzzyQuery) getQuery("term~", null); assertEquals(2, fq.getMaxEdits()); assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength()); - expectThrows(ParseException.class, () -> { - getQuery("term~1.1", null); // value > 1, throws exception - }); + expectThrows( + ParseException.class, + () -> { + getQuery("term~1.1", null); // value > 1, throws exception + }); assertTrue(getQuery("term*germ", null) instanceof WildcardQuery); /* @@ -350,12 +339,10 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { assertQueryEquals("term +stop term", qpAnalyzer, "term term"); assertQueryEquals("term -stop term", qpAnalyzer, "term term"); assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll"); - assertQueryEquals("term phrase term", qpAnalyzer, - "term (phrase1 phrase2) term"); + assertQueryEquals("term phrase term", qpAnalyzer, "term (phrase1 phrase2) term"); // note the parens in this next assertion differ from the original // QueryParser behavior - assertQueryEquals("term AND NOT phrase term", qpAnalyzer, - "(+term -(phrase1 phrase2)) term"); + assertQueryEquals("term AND NOT phrase term", qpAnalyzer, "(+term -(phrase1 phrase2)) term"); assertMatchNoDocsQuery("stop", qpAnalyzer); assertMatchNoDocsQuery("stop OR stop AND stop", qpAnalyzer); assertTrue(getQuery("term term term", qpAnalyzer) instanceof BooleanQuery); @@ -372,8 +359,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { assertQueryEquals("[ a TO z] OR bar", null, "[a TO z] bar"); assertQueryEquals("[ a TO z] AND bar", null, "+[a TO z] +bar"); assertQueryEquals("( bar blar { a TO z}) ", null, "bar blar {a TO z}"); - assertQueryEquals("gack ( bar blar { a TO z}) ", null, - "gack (bar blar {a TO z})"); + assertQueryEquals("gack ( bar blar { a TO z}) ", null, "gack (bar blar {a TO z})"); } private String escapeDateString(String s) { @@ -390,8 +376,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { return DateTools.dateToString(df.parse(s), DateTools.Resolution.DAY); } - private String getLocalizedDate(int year, int month, int day, - boolean extendLastDate) { + private String getLocalizedDate(int year, int month, int day, boolean extendLastDate) { // we use the default Locale/TZ since LuceneTestCase randomizes it DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, Locale.getDefault()); Calendar calendar = new GregorianCalendar(TimeZone.getDefault(), Locale.getDefault()); @@ -431,15 +416,20 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { // for this field no field specific date resolution has been set, // so verify if the default resolution is used - assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.MILLISECOND); + assertDateRangeQueryEquals( + qp, + defaultField, + startDate, + endDate, + endDateExpected.getTime(), + DateTools.Resolution.MILLISECOND); // verify if field specific date resolutions are used for these two fields - assertDateRangeQueryEquals(qp, monthField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.MONTH); + assertDateRangeQueryEquals( + qp, monthField, startDate, endDate, endDateExpected.getTime(), DateTools.Resolution.MONTH); - assertDateRangeQueryEquals(qp, hourField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.HOUR); + assertDateRangeQueryEquals( + qp, hourField, startDate, endDate, endDateExpected.getTime(), DateTools.Resolution.HOUR); } /** for testing DateTools support */ @@ -454,27 +444,37 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { return DateTools.dateToString(d, resolution); } - public void assertQueryEquals(PrecedenceQueryParser qp, String field, String query, - String result) throws Exception { + public void assertQueryEquals(PrecedenceQueryParser qp, String field, String query, String result) + throws Exception { Query q = qp.parse(query, field); String s = q.toString(field); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result - + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } - public void assertDateRangeQueryEquals(PrecedenceQueryParser qp, String field, - String startDate, String endDate, Date endDateInclusive, - DateTools.Resolution resolution) throws Exception { - assertQueryEquals(qp, field, field + ":[" + escapeDateString(startDate) - + " TO " + escapeDateString(endDate) + "]", "[" - + getDate(startDate, resolution) + " TO " - + getDate(endDateInclusive, resolution) + "]"); - assertQueryEquals(qp, field, field + ":{" + escapeDateString(startDate) - + " TO " + escapeDateString(endDate) + "}", "{" - + getDate(startDate, resolution) + " TO " - + getDate(endDate, resolution) + "}"); + public void assertDateRangeQueryEquals( + PrecedenceQueryParser qp, + String field, + String startDate, + String endDate, + Date endDateInclusive, + DateTools.Resolution resolution) + throws Exception { + assertQueryEquals( + qp, + field, + field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]", + "[" + + getDate(startDate, resolution) + + " TO " + + getDate(endDateInclusive, resolution) + + "]"); + assertQueryEquals( + qp, + field, + field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "}", + "{" + getDate(startDate, resolution) + " TO " + getDate(endDate, resolution) + "}"); } public void testEscaped() throws Exception { @@ -526,8 +526,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { assertQueryEqualsDOA("+weltbank\r\n+worlbank", null, "+weltbank +worlbank"); assertQueryEqualsDOA("weltbank \r\n+worlbank", null, "+weltbank +worlbank"); assertQueryEqualsDOA("weltbank \r\n +worlbank", null, "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \r \n +worlbank", null, - "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \r \n +worlbank", null, "+weltbank +worlbank"); assertQueryEqualsDOA("+weltbank\t+worlbank", null, "+weltbank +worlbank"); assertQueryEqualsDOA("weltbank \t+worlbank", null, "+weltbank +worlbank"); @@ -559,25 +558,33 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { q = qp.parse("\"on\"^1.0", "field"); assertNotNull(q); - q = getParser(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)).parse("the^3", - "field"); + q = + getParser( + new MockAnalyzer( + random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)) + .parse("the^3", "field"); assertNotNull(q); } public void testException() throws Exception { - expectThrows(QueryNodeParseException.class, () -> { - assertQueryEquals("\"some phrase", null, "abc"); - }); + expectThrows( + QueryNodeParseException.class, + () -> { + assertQueryEquals("\"some phrase", null, "abc"); + }); } // ParseException expected due to too many boolean clauses public void testBooleanQuery() throws Exception { IndexSearcher.setMaxClauseCount(2); - expectThrows(QueryNodeException.class, () -> { - getParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("one two three", "field"); - }); + expectThrows( + QueryNodeException.class, + () -> { + getParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) + .parse("one two three", "field"); + }); } - + // LUCENE-792 public void testNOT() throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); @@ -585,11 +592,12 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { } /** - * This test differs from the original QueryParser, showing how the precedence - * issue has been corrected. + * This test differs from the original QueryParser, showing how the precedence issue has been + * corrected. */ public void testPrecedence() throws Exception { - PrecedenceQueryParser parser = getParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + PrecedenceQueryParser parser = + getParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); Query query1 = parser.parse("A AND B OR C AND D", "field"); Query query2 = parser.parse("(A AND B) OR (C AND D)", "field"); assertEquals(query1, query2); @@ -613,7 +621,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { query1 = parser.parse("A OR NOT B AND C", "field"); query2 = parser.parse("A (-B +C)", "field"); assertEquals(query1, query2); - + parser.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); query1 = parser.parse("A AND B OR C AND D", "field"); query2 = parser.parse("(A AND B) OR (C AND D)", "field"); @@ -634,7 +642,6 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { query1 = parser.parse("A AND NOT B OR C", "field"); query2 = parser.parse("(+A -B) OR C", "field"); assertEquals(query1, query2); - } @Override @@ -642,5 +649,4 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { IndexSearcher.setMaxClauseCount(originalMaxClauses); super.tearDown(); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanOrQueryNodeBuilder.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanOrQueryNodeBuilder.java index aa21a274784..c214c921656 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanOrQueryNodeBuilder.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanOrQueryNodeBuilder.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.spans; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.builders.QueryTreeBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode; @@ -29,7 +28,6 @@ import org.apache.lucene.search.spans.SpanQuery; /** * This builder creates {@link SpanOrQuery}s from a {@link BooleanQueryNode}.
    *
    - * * It assumes that the {@link BooleanQueryNode} instance has at least one child. */ public class SpanOrQueryNodeBuilder implements StandardQueryBuilder { @@ -45,12 +43,9 @@ public class SpanOrQueryNodeBuilder implements StandardQueryBuilder { int i = 0; for (QueryNode child : children) { - spanQueries[i++] = (SpanQuery) child - .getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); + spanQueries[i++] = (SpanQuery) child.getTag(QueryTreeBuilder.QUERY_TREE_BUILDER_TAGID); } return new SpanOrQuery(spanQueries); - } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanTermQueryNodeBuilder.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanTermQueryNodeBuilder.java index d498308dd8e..d85deeee751 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanTermQueryNodeBuilder.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanTermQueryNodeBuilder.java @@ -23,19 +23,14 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.standard.builders.StandardQueryBuilder; import org.apache.lucene.search.spans.SpanTermQuery; -/** - * This builder creates {@link SpanTermQuery}s from a {@link FieldQueryNode} - * object. - */ +/** This builder creates {@link SpanTermQuery}s from a {@link FieldQueryNode} object. */ public class SpanTermQueryNodeBuilder implements StandardQueryBuilder { @Override public SpanTermQuery build(QueryNode node) throws QueryNodeException { FieldQueryNode fieldQueryNode = (FieldQueryNode) node; - return new SpanTermQuery(new Term(fieldQueryNode.getFieldAsString(), - fieldQueryNode.getTextAsString())); - + return new SpanTermQuery( + new Term(fieldQueryNode.getFieldAsString(), fieldQueryNode.getTextAsString())); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryConfigHandler.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryConfigHandler.java index 0d0a2ce2bc5..3827750dfd7 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryConfigHandler.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryConfigHandler.java @@ -23,13 +23,12 @@ import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; /** * This query config handler only adds the {@link UniqueFieldAttribute} to it.
    *
    - * * It does not return any configuration for a field in specific. */ public class SpansQueryConfigHandler extends QueryConfigHandler { - - final public static ConfigurationKey UNIQUE_FIELD = ConfigurationKey.newInstance(); - + + public static final ConfigurationKey UNIQUE_FIELD = ConfigurationKey.newInstance(); + public SpansQueryConfigHandler() { // empty constructor } @@ -39,7 +38,5 @@ public class SpansQueryConfigHandler extends QueryConfigHandler { // there is no field configuration, always return null return null; - } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryTreeBuilder.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryTreeBuilder.java index 7ad2c0df32f..c91cb42ec91 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryTreeBuilder.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryTreeBuilder.java @@ -25,27 +25,21 @@ import org.apache.lucene.queryparser.flexible.standard.builders.StandardQueryBui import org.apache.lucene.search.spans.SpanQuery; /** - * Sets up a query tree builder to build a span query tree from a query node - * tree.
    + * Sets up a query tree builder to build a span query tree from a query node tree.
    *
    - * * The defined map is:
    * - every BooleanQueryNode instance is delegated to the SpanOrQueryNodeBuilder
    * - every FieldQueryNode instance is delegated to the SpanTermQueryNodeBuilder
    - * */ -public class SpansQueryTreeBuilder extends QueryTreeBuilder implements - StandardQueryBuilder { +public class SpansQueryTreeBuilder extends QueryTreeBuilder implements StandardQueryBuilder { public SpansQueryTreeBuilder() { setBuilder(BooleanQueryNode.class, new SpanOrQueryNodeBuilder()); setBuilder(FieldQueryNode.class, new SpanTermQueryNodeBuilder()); - } @Override public SpanQuery build(QueryNode queryTree) throws QueryNodeException { return (SpanQuery) super.build(queryTree); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansValidatorQueryNodeProcessor.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansValidatorQueryNodeProcessor.java index bc67dadf82d..884aeed4396 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansValidatorQueryNodeProcessor.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansValidatorQueryNodeProcessor.java @@ -17,8 +17,6 @@ package org.apache.lucene.queryparser.flexible.spans; import java.util.List; - -import org.apache.lucene.queryparser.flexible.messages.MessageImpl; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages; import org.apache.lucene.queryparser.flexible.core.nodes.AndQueryNode; @@ -27,16 +25,15 @@ import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.OrQueryNode; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; +import org.apache.lucene.queryparser.flexible.messages.MessageImpl; /** - * Validates every query node in a query node tree. This processor will pass - * fine if the query nodes are only {@link BooleanQueryNode}s, - * {@link OrQueryNode}s or {@link FieldQueryNode}s, otherwise an exception will - * be thrown.
    + * Validates every query node in a query node tree. This processor will pass fine if the query nodes + * are only {@link BooleanQueryNode}s, {@link OrQueryNode}s or {@link FieldQueryNode}s, otherwise an + * exception will be thrown.
    *
    - * - * If they are {@link AndQueryNode} or an instance of anything else that - * implements {@link FieldQueryNode} the exception will also be thrown. + * If they are {@link AndQueryNode} or an instance of anything else that implements {@link + * FieldQueryNode} the exception will also be thrown. */ public class SpansValidatorQueryNodeProcessor extends QueryNodeProcessorImpl { @@ -44,28 +41,22 @@ public class SpansValidatorQueryNodeProcessor extends QueryNodeProcessorImpl { protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { - if (!((node instanceof BooleanQueryNode && !(node instanceof AndQueryNode)) || node - .getClass() == FieldQueryNode.class)) { - throw new QueryNodeException(new MessageImpl( - QueryParserMessages.NODE_ACTION_NOT_SUPPORTED)); + if (!((node instanceof BooleanQueryNode && !(node instanceof AndQueryNode)) + || node.getClass() == FieldQueryNode.class)) { + throw new QueryNodeException(new MessageImpl(QueryParserMessages.NODE_ACTION_NOT_SUPPORTED)); } return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParser.java index 0bcfedf067c..df850683031 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParser.java @@ -23,64 +23,55 @@ import org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorPipeline; import org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser; import org.apache.lucene.queryparser.flexible.standard.processors.WildcardQueryNodeProcessor; +import org.apache.lucene.search.Query; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; -import org.apache.lucene.search.Query; import org.apache.lucene.util.LuceneTestCase; /** * This test case demonstrates how the new query parser can be used.
    *
    - * - * It tests queries likes "term", "field:term" "term1 term2" "term1 OR term2", - * which are all already supported by the current syntax parser ( - * {@link StandardSyntaxParser}).
    + * It tests queries likes "term", "field:term" "term1 term2" "term1 OR term2", which are all already + * supported by the current syntax parser ( {@link StandardSyntaxParser}).
    *
    - * - * The goals is to create a new query parser that supports only the pair - * "field:term" or a list of pairs separated or not by an OR operator, and from - * this query generate {@link SpanQuery} objects instead of the regular - * {@link Query} objects. Basically, every pair will be converted to a - * {@link SpanTermQuery} object and if there are more than one pair they will be - * grouped by an {@link OrQueryNode}.
    + * The goals is to create a new query parser that supports only the pair "field:term" or a list of + * pairs separated or not by an OR operator, and from this query generate {@link SpanQuery} objects + * instead of the regular {@link Query} objects. Basically, every pair will be converted to a {@link + * SpanTermQuery} object and if there are more than one pair they will be grouped by an {@link + * OrQueryNode}.
    *
    - * - * Another functionality that will be added is the ability to convert every - * field defined in the query to an unique specific field.
    + * Another functionality that will be added is the ability to convert every field defined in the + * query to an unique specific field.
    *
    - * - * The query generation is divided in three different steps: parsing (syntax), - * processing (semantic) and building.
    + * The query generation is divided in three different steps: parsing (syntax), processing (semantic) + * and building.
    *
    - * - * The parsing phase, as already mentioned will be performed by the current - * query parser: {@link StandardSyntaxParser}.
    + * The parsing phase, as already mentioned will be performed by the current query parser: {@link + * StandardSyntaxParser}.
    *
    - * - * The processing phase will be performed by a processor pipeline which is - * compound by 2 processors: {@link SpansValidatorQueryNodeProcessor} and - * {@link UniqueFieldQueryNodeProcessor}. - * + * The processing phase will be performed by a processor pipeline which is compound by 2 processors: + * {@link SpansValidatorQueryNodeProcessor} and {@link UniqueFieldQueryNodeProcessor}. + * *

    - * 
    - *   {@link SpansValidatorQueryNodeProcessor}: as it's going to use the current 
    + *
    + *   {@link SpansValidatorQueryNodeProcessor}: as it's going to use the current
      *   query parser to parse the syntax, it will support more features than we want,
      *   this processor basically validates the query node tree generated by the parser
    - *   and just let got through the elements we want, all the other elements as 
    + *   and just let got through the elements we want, all the other elements as
      *   wildcards, range queries, etc...if found, an exception is thrown.
    - *   
    + *
      *   {@link UniqueFieldQueryNodeProcessor}: this processor will take care of reading
      *   what is the "unique field" from the configuration and convert every field defined
      *   in every pair to this "unique field". For that, a {@link SpansQueryConfigHandler} is
      *   used, which has the {@link UniqueFieldAttribute} defined in it.
      * 
    - * - * The building phase is performed by the {@link SpansQueryTreeBuilder}, which - * basically contains a map that defines which builder will be used to generate - * {@link SpanQuery} objects from {@link QueryNode} objects.
    + * + * The building phase is performed by the {@link SpansQueryTreeBuilder}, which basically contains a + * map that defines which builder will be used to generate {@link SpanQuery} objects from {@link + * QueryNode} objects.
    *
    - * + * * @see SpansQueryConfigHandler * @see SpansQueryTreeBuilder * @see SpansValidatorQueryNodeProcessor @@ -113,29 +104,25 @@ public class TestSpanQueryParser extends LuceneTestCase { this.spansQueryTreeBuilder = new SpansQueryTreeBuilder(); // set up the processor pipeline - this.spanProcessorPipeline - .setQueryConfigHandler(this.spanQueryConfigHandler); + this.spanProcessorPipeline.setQueryConfigHandler(this.spanQueryConfigHandler); this.spanProcessorPipeline.add(new WildcardQueryNodeProcessor()); this.spanProcessorPipeline.add(new SpansValidatorQueryNodeProcessor()); this.spanProcessorPipeline.add(new UniqueFieldQueryNodeProcessor()); - } public SpanQuery getSpanQuery(CharSequence query) throws QueryNodeException { return getSpanQuery("", query); } - public SpanQuery getSpanQuery(String uniqueField, CharSequence query) - throws QueryNodeException { - + public SpanQuery getSpanQuery(String uniqueField, CharSequence query) throws QueryNodeException { + this.spanQueryConfigHandler.set(SpansQueryConfigHandler.UNIQUE_FIELD, uniqueField); QueryNode queryTree = this.queryParser.parse(query, "defaultField"); queryTree = this.spanProcessorPipeline.process(queryTree); return this.spansQueryTreeBuilder.build(queryTree); - } public void testTermSpans() throws Exception { @@ -144,57 +131,64 @@ public class TestSpanQueryParser extends LuceneTestCase { assertTrue(getSpanQuery("field:term") instanceof SpanTermQuery); assertTrue(getSpanQuery("term") instanceof SpanTermQuery); - } public void testUniqueField() throws Exception { assertEquals(getSpanQuery("field", "term").toString(), "field:term"); assertEquals(getSpanQuery("field", "field:term").toString(), "field:term"); - assertEquals(getSpanQuery("field", "anotherField:term").toString(), - "field:term"); - + assertEquals(getSpanQuery("field", "anotherField:term").toString(), "field:term"); } public void testOrSpans() throws Exception { - assertEquals(getSpanQuery("term1 term2").toString(), - "spanOr([term1, term2])"); - assertEquals(getSpanQuery("term1 OR term2").toString(), - "spanOr([term1, term2])"); + assertEquals(getSpanQuery("term1 term2").toString(), "spanOr([term1, term2])"); + assertEquals(getSpanQuery("term1 OR term2").toString(), "spanOr([term1, term2])"); assertTrue(getSpanQuery("term1 term2") instanceof SpanOrQuery); assertTrue(getSpanQuery("term1 term2") instanceof SpanOrQuery); - } public void testQueryValidator() throws QueryNodeException { - expectThrows(QueryNodeException.class, () -> { - getSpanQuery("term*"); // wildcard queries should not be supported - }); + expectThrows( + QueryNodeException.class, + () -> { + getSpanQuery("term*"); // wildcard queries should not be supported + }); - expectThrows(QueryNodeException.class, () -> { - getSpanQuery("[a TO z]"); // range queries should not be supported - }); + expectThrows( + QueryNodeException.class, + () -> { + getSpanQuery("[a TO z]"); // range queries should not be supported + }); - expectThrows(QueryNodeException.class, () -> { - getSpanQuery("a~0.5"); // boost queries should not be supported - }); + expectThrows( + QueryNodeException.class, + () -> { + getSpanQuery("a~0.5"); // boost queries should not be supported + }); - expectThrows(QueryNodeException.class, () -> { - getSpanQuery("a^0.5"); // fuzzy queries should not be supported - }); + expectThrows( + QueryNodeException.class, + () -> { + getSpanQuery("a^0.5"); // fuzzy queries should not be supported + }); - expectThrows(QueryNodeException.class, () -> { - getSpanQuery("\"a b\""); // quoted queries should not be supported - }); + expectThrows( + QueryNodeException.class, + () -> { + getSpanQuery("\"a b\""); // quoted queries should not be supported + }); - expectThrows(QueryNodeException.class, () -> { - getSpanQuery("(a b)"); // parenthesized queries should not be supported - }); + expectThrows( + QueryNodeException.class, + () -> { + getSpanQuery("(a b)"); // parenthesized queries should not be supported + }); - expectThrows(QueryNodeException.class, () -> { - getSpanQuery("a AND b"); // AND queries should not be supported - }); + expectThrows( + QueryNodeException.class, + () -> { + getSpanQuery("a AND b"); // AND queries should not be supported + }); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParserSimpleSample.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParserSimpleSample.java index 65bb72ba2b9..ea0d759b027 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParserSimpleSample.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParserSimpleSample.java @@ -30,57 +30,47 @@ import org.apache.lucene.util.LuceneTestCase; /** * This test case demonstrates how the new query parser can be used.
    *
    - * - * It tests queries likes "term", "field:term" "term1 term2" "term1 OR term2", - * which are all already supported by the current syntax parser ( - * {@link StandardSyntaxParser}).
    + * It tests queries likes "term", "field:term" "term1 term2" "term1 OR term2", which are all already + * supported by the current syntax parser ( {@link StandardSyntaxParser}).
    *
    - * - * The goals is to create a new query parser that supports only the pair - * "field:term" or a list of pairs separated or not by an OR operator, and from - * this query generate {@link SpanQuery} objects instead of the regular - * {@link Query} objects. Basically, every pair will be converted to a - * {@link SpanTermQuery} object and if there are more than one pair they will be - * grouped by an {@link OrQueryNode}.
    + * The goals is to create a new query parser that supports only the pair "field:term" or a list of + * pairs separated or not by an OR operator, and from this query generate {@link SpanQuery} objects + * instead of the regular {@link Query} objects. Basically, every pair will be converted to a {@link + * SpanTermQuery} object and if there are more than one pair they will be grouped by an {@link + * OrQueryNode}.
    *
    - * - * Another functionality that will be added is the ability to convert every - * field defined in the query to an unique specific field.
    + * Another functionality that will be added is the ability to convert every field defined in the + * query to an unique specific field.
    *
    - * - * The query generation is divided in three different steps: parsing (syntax), - * processing (semantic) and building.
    + * The query generation is divided in three different steps: parsing (syntax), processing (semantic) + * and building.
    *
    - * - * The parsing phase, as already mentioned will be performed by the current - * query parser: {@link StandardSyntaxParser}.
    + * The parsing phase, as already mentioned will be performed by the current query parser: {@link + * StandardSyntaxParser}.
    *
    - * - * The processing phase will be performed by a processor pipeline which is - * compound by 2 processors: {@link SpansValidatorQueryNodeProcessor} and - * {@link UniqueFieldQueryNodeProcessor}. - * + * The processing phase will be performed by a processor pipeline which is compound by 2 processors: + * {@link SpansValidatorQueryNodeProcessor} and {@link UniqueFieldQueryNodeProcessor}. + * *
    - * 
    - *   {@link SpansValidatorQueryNodeProcessor}: as it's going to use the current 
    + *
    + *   {@link SpansValidatorQueryNodeProcessor}: as it's going to use the current
      *   query parser to parse the syntax, it will support more features than we want,
      *   this processor basically validates the query node tree generated by the parser
    - *   and just let got through the elements we want, all the other elements as 
    + *   and just let got through the elements we want, all the other elements as
      *   wildcards, range queries, etc...if found, an exception is thrown.
    - *   
    + *
      *   {@link UniqueFieldQueryNodeProcessor}: this processor will take care of reading
      *   what is the "unique field" from the configuration and convert every field defined
      *   in every pair to this "unique field". For that, a {@link SpansQueryConfigHandler} is
      *   used, which has the {@link UniqueFieldAttribute} defined in it.
      * 
    - * - * The building phase is performed by the {@link SpansQueryTreeBuilder}, which - * basically contains a map that defines which builder will be used to generate - * {@link SpanQuery} objects from {@link QueryNode} objects.
    + * + * The building phase is performed by the {@link SpansQueryTreeBuilder}, which basically contains a + * map that defines which builder will be used to generate {@link SpanQuery} objects from {@link + * QueryNode} objects.
    *
    - * + * * @see TestSpanQueryParser for a more advanced example - * * @see SpansQueryConfigHandler * @see SpansQueryTreeBuilder * @see SpansValidatorQueryNodeProcessor @@ -89,7 +79,6 @@ import org.apache.lucene.util.LuceneTestCase; * @see StandardSyntaxParser * @see UniqueFieldQueryNodeProcessor * @see UniqueFieldAttribute - * */ public class TestSpanQueryParserSimpleSample extends LuceneTestCase { @@ -106,8 +95,8 @@ public class TestSpanQueryParserSimpleSample extends LuceneTestCase { // set up the processor pipeline with the ConfigHandler // and create the pipeline for this simple demo - QueryNodeProcessorPipeline spanProcessorPipeline = new QueryNodeProcessorPipeline( - spanQueryConfigHandler); + QueryNodeProcessorPipeline spanProcessorPipeline = + new QueryNodeProcessorPipeline(spanQueryConfigHandler); // @see SpansValidatorQueryNodeProcessor spanProcessorPipeline.add(new SpansValidatorQueryNodeProcessor()); // @see UniqueFieldQueryNodeProcessor @@ -130,7 +119,5 @@ public class TestSpanQueryParserSimpleSample extends LuceneTestCase { assertTrue(spanquery instanceof SpanTermQuery); assertEquals(spanquery.toString(), "index:text"); - } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttribute.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttribute.java index 79824b2baa9..b7c454fc5c6 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttribute.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttribute.java @@ -20,10 +20,9 @@ import org.apache.lucene.queryparser.flexible.core.nodes.FieldableNode; import org.apache.lucene.util.Attribute; /** - * This attribute is used by the {@link UniqueFieldQueryNodeProcessor} - * processor. It holds a value that defines which is the unique field name that - * should be set in every {@link FieldableNode}. - * + * This attribute is used by the {@link UniqueFieldQueryNodeProcessor} processor. It holds a value + * that defines which is the unique field name that should be set in every {@link FieldableNode}. + * * @see UniqueFieldQueryNodeProcessor */ public interface UniqueFieldAttribute extends Attribute { diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttributeImpl.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttributeImpl.java index 5530858195c..058a8c89450 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttributeImpl.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttributeImpl.java @@ -21,14 +21,12 @@ import org.apache.lucene.util.AttributeImpl; import org.apache.lucene.util.AttributeReflector; /** - * This attribute is used by the {@link UniqueFieldQueryNodeProcessor} - * processor. It holds a value that defines which is the unique field name that - * should be set in every {@link FieldableNode}. - * + * This attribute is used by the {@link UniqueFieldQueryNodeProcessor} processor. It holds a value + * that defines which is the unique field name that should be set in every {@link FieldableNode}. + * * @see UniqueFieldQueryNodeProcessor */ -public class UniqueFieldAttributeImpl extends AttributeImpl implements - UniqueFieldAttribute { +public class UniqueFieldAttributeImpl extends AttributeImpl implements UniqueFieldAttribute { private CharSequence uniqueField; @@ -62,7 +60,6 @@ public class UniqueFieldAttributeImpl extends AttributeImpl implements UniqueFieldAttributeImpl uniqueFieldAttr = (UniqueFieldAttributeImpl) target; uniqueFieldAttr.uniqueField = uniqueField.toString(); - } @Override @@ -70,13 +67,10 @@ public class UniqueFieldAttributeImpl extends AttributeImpl implements if (other instanceof UniqueFieldAttributeImpl) { - return ((UniqueFieldAttributeImpl) other).uniqueField - .equals(this.uniqueField); - + return ((UniqueFieldAttributeImpl) other).uniqueField.equals(this.uniqueField); } return false; - } @Override @@ -88,5 +82,4 @@ public class UniqueFieldAttributeImpl extends AttributeImpl implements public void reflectWith(AttributeReflector reflector) { reflector.reflect(UniqueFieldAttribute.class, "uniqueField", uniqueField); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldQueryNodeProcessor.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldQueryNodeProcessor.java index e8de240ecec..e7a4a73370a 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldQueryNodeProcessor.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldQueryNodeProcessor.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.flexible.spans; import java.util.List; - import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler; import org.apache.lucene.queryparser.flexible.core.nodes.FieldableNode; @@ -25,12 +24,11 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl; /** - * This processor changes every field name of each {@link FieldableNode} query - * node contained in the query tree to the field name defined in the - * {@link UniqueFieldAttribute}. So, the {@link UniqueFieldAttribute} must be - * defined in the {@link QueryConfigHandler} object set in this processor, - * otherwise it throws an exception. - * + * This processor changes every field name of each {@link FieldableNode} query node contained in the + * query tree to the field name defined in the {@link UniqueFieldAttribute}. So, the {@link + * UniqueFieldAttribute} must be defined in the {@link QueryConfigHandler} object set in this + * processor, otherwise it throws an exception. + * * @see UniqueFieldAttribute */ public class UniqueFieldQueryNodeProcessor extends QueryNodeProcessorImpl { @@ -39,7 +37,6 @@ public class UniqueFieldQueryNodeProcessor extends QueryNodeProcessorImpl { protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override @@ -62,19 +59,14 @@ public class UniqueFieldQueryNodeProcessor extends QueryNodeProcessorImpl { String uniqueField = queryConfig.get(SpansQueryConfigHandler.UNIQUE_FIELD); fieldNode.setField(uniqueField); - } return node; - } @Override - protected List setChildrenOrder(List children) - throws QueryNodeException { + protected List setChildrenOrder(List children) throws QueryNodeException { return children; - } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java index 220ce021bb8..6df50644edc 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java @@ -27,11 +27,11 @@ import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfi import org.apache.lucene.util.LuceneTestCase; /** - * This test case is a copy of the core Lucene query parser test, it was adapted - * to use new QueryParserHelper instead of the old query parser. - * - * Test QueryParser's ability to deal with Analyzers that return more than one - * token per position or that return tokens with a position increment > 1. + * This test case is a copy of the core Lucene query parser test, it was adapted to use new + * QueryParserHelper instead of the old query parser. + * + *

    Test QueryParser's ability to deal with Analyzers that return more than one token per position + * or that return tokens with a position increment > 1. */ public class TestMultiAnalyzerQPHelper extends LuceneTestCase { @@ -47,59 +47,54 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase { assertEquals("foo", qp.parse("\"foo\"", "").toString()); assertEquals("foo foobar", qp.parse("foo foobar", "").toString()); assertEquals("\"foo foobar\"", qp.parse("\"foo foobar\"", "").toString()); - assertEquals("\"foo foobar blah\"", qp.parse("\"foo foobar blah\"", "") - .toString()); + assertEquals("\"foo foobar blah\"", qp.parse("\"foo foobar blah\"", "").toString()); // two tokens at the same position: assertEquals("(multi multi2) foo", qp.parse("multi foo", "").toString()); assertEquals("foo (multi multi2)", qp.parse("foo multi", "").toString()); - assertEquals("(multi multi2) (multi multi2)", qp.parse("multi multi", "") - .toString()); - assertEquals("+(foo (multi multi2)) +(bar (multi multi2))", qp.parse( - "+(foo multi) +(bar multi)", "").toString()); - assertEquals("+(foo (multi multi2)) field:\"bar (multi multi2)\"", qp - .parse("+(foo multi) field:\"bar multi\"", "").toString()); + assertEquals("(multi multi2) (multi multi2)", qp.parse("multi multi", "").toString()); + assertEquals( + "+(foo (multi multi2)) +(bar (multi multi2))", + qp.parse("+(foo multi) +(bar multi)", "").toString()); + assertEquals( + "+(foo (multi multi2)) field:\"bar (multi multi2)\"", + qp.parse("+(foo multi) field:\"bar multi\"", "").toString()); // phrases: - assertEquals("\"(multi multi2) foo\"", qp.parse("\"multi foo\"", "") - .toString()); - assertEquals("\"foo (multi multi2)\"", qp.parse("\"foo multi\"", "") - .toString()); - assertEquals("\"foo (multi multi2) foobar (multi multi2)\"", qp.parse( - "\"foo multi foobar multi\"", "").toString()); + assertEquals("\"(multi multi2) foo\"", qp.parse("\"multi foo\"", "").toString()); + assertEquals("\"foo (multi multi2)\"", qp.parse("\"foo multi\"", "").toString()); + assertEquals( + "\"foo (multi multi2) foobar (multi multi2)\"", + qp.parse("\"foo multi foobar multi\"", "").toString()); // fields: - assertEquals("(field:multi field:multi2) field:foo", qp.parse( - "field:multi field:foo", "").toString()); - assertEquals("field:\"(multi multi2) foo\"", qp.parse( - "field:\"multi foo\"", "").toString()); + assertEquals( + "(field:multi field:multi2) field:foo", qp.parse("field:multi field:foo", "").toString()); + assertEquals("field:\"(multi multi2) foo\"", qp.parse("field:\"multi foo\"", "").toString()); // three tokens at one position: - assertEquals("triplemulti multi3 multi2", qp.parse("triplemulti", "") - .toString()); - assertEquals("foo (triplemulti multi3 multi2) foobar", qp.parse( - "foo triplemulti foobar", "").toString()); + assertEquals("triplemulti multi3 multi2", qp.parse("triplemulti", "").toString()); + assertEquals( + "foo (triplemulti multi3 multi2) foobar", + qp.parse("foo triplemulti foobar", "").toString()); // phrase with non-default slop: - assertEquals("\"(multi multi2) foo\"~10", qp.parse("\"multi foo\"~10", "") - .toString()); + assertEquals("\"(multi multi2) foo\"~10", qp.parse("\"multi foo\"~10", "").toString()); // phrase with non-default boost: - assertEquals("(\"(multi multi2) foo\")^2.0", qp.parse("\"multi foo\"^2", "") - .toString()); + assertEquals("(\"(multi multi2) foo\")^2.0", qp.parse("\"multi foo\"^2", "").toString()); // phrase after changing default slop qp.setPhraseSlop(99); - assertEquals("\"(multi multi2) foo\"~99 bar", qp.parse("\"multi foo\" bar", - "").toString()); - assertEquals("\"(multi multi2) foo\"~99 \"foo bar\"~2", qp.parse( - "\"multi foo\" \"foo bar\"~2", "").toString()); + assertEquals("\"(multi multi2) foo\"~99 bar", qp.parse("\"multi foo\" bar", "").toString()); + assertEquals( + "\"(multi multi2) foo\"~99 \"foo bar\"~2", + qp.parse("\"multi foo\" \"foo bar\"~2", "").toString()); qp.setPhraseSlop(0); // non-default operator: qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); assertEquals("+(multi multi2) +foo", qp.parse("multi foo", "").toString()); - } // public void testMultiAnalyzerWithSubclassOfQueryParser() throws @@ -115,11 +110,11 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase { // assertEquals("\"(multi multi2) bar\"~99", // qp.getSuperFieldQuery("","multi bar").toString()); // - // + // // // ask sublcass to parse phrase with modified default slop // assertEquals("\"(multi multi2) foo\"~99 bar", // qp.parse("\"multi foo\" bar").toString()); - // + // // } public void testPosIncrementAnalyzer() throws QueryNodeException { @@ -127,17 +122,14 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase { qp.setAnalyzer(new PosIncrementAnalyzer()); assertEquals("quick brown", qp.parse("the quick brown", "").toString()); - assertEquals("\"? quick brown\"", qp.parse("\"the quick brown\"", "") - .toString()); - assertEquals("quick brown fox", qp.parse("the quick brown fox", "") - .toString()); - assertEquals("\"? quick brown fox\"", qp.parse("\"the quick brown fox\"", "") - .toString()); + assertEquals("\"? quick brown\"", qp.parse("\"the quick brown\"", "").toString()); + assertEquals("quick brown fox", qp.parse("the quick brown fox", "").toString()); + assertEquals("\"? quick brown fox\"", qp.parse("\"the quick brown fox\"", "").toString()); } /** - * Expands "multi" to "multi" and "multi2", both at the same position, and - * expands "triplemulti" to "triplemulti", "multi3", and "multi2". + * Expands "multi" to "multi" and "multi2", both at the same position, and expands "triplemulti" + * to "triplemulti", "multi3", and "multi2". */ private static class MultiAnalyzer extends Analyzer { @@ -155,7 +147,8 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase { private int prevEndOffset; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncrAtt = + addAttribute(PositionIncrementAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class); @@ -203,8 +196,8 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase { } /** - * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1). Does not work - * correctly for input other than "the quick brown ...". + * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1). Does not work correctly for input + * other than "the quick brown ...". */ private static class PosIncrementAnalyzer extends Analyzer { @@ -218,7 +211,8 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase { private static class TestPosIncrementFilter extends TokenFilter { private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncrAtt = + addAttribute(PositionIncrementAttribute.class); public TestPosIncrementFilter(TokenStream in) { super(in); @@ -239,7 +233,5 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase { } return false; } - } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java index 95e299af848..d36234045fb 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java @@ -20,7 +20,6 @@ import java.io.Reader; import java.io.StringReader; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.analysis.*; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -29,8 +28,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler; -import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -40,16 +39,16 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; /** - * This test case is a copy of the core Lucene query parser test, it was adapted - * to use new QueryParserHelper instead of the old query parser. - * - * Tests QueryParser. + * This test case is a copy of the core Lucene query parser test, it was adapted to use new + * QueryParserHelper instead of the old query parser. + * + *

    Tests QueryParser. */ public class TestMultiFieldQPHelper extends LuceneTestCase { /** - * test stop words parsing for both the non static form, and for the - * corresponding static form (qtxt, fields[]). + * test stop words parsing for both the non static form, and for the corresponding static form + * (qtxt, fields[]). */ public void testStopwordsParsing() throws Exception { assertStopQueryEquals("one", "b:one t:one"); @@ -63,8 +62,8 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { // verify parsing of query using a stopping analyzer private void assertStopQueryIsMatchNoDocsQuery(String qtxt) throws Exception { - String[] fields = { "b", "t" }; - Occur occur[] = { Occur.SHOULD, Occur.SHOULD }; + String[] fields = {"b", "t"}; + Occur occur[] = {Occur.SHOULD, Occur.SHOULD}; TestQPHelper.QPTestAnalyzer a = new TestQPHelper.QPTestAnalyzer(); StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); @@ -75,10 +74,9 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { } // verify parsing of query using a stopping analyzer - private void assertStopQueryEquals(String qtxt, String expectedRes) - throws Exception { - String[] fields = { "b", "t" }; - Occur occur[] = { Occur.SHOULD, Occur.SHOULD }; + private void assertStopQueryEquals(String qtxt, String expectedRes) throws Exception { + String[] fields = {"b", "t"}; + Occur occur[] = {Occur.SHOULD, Occur.SHOULD}; TestQPHelper.QPTestAnalyzer a = new TestQPHelper.QPTestAnalyzer(); StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); @@ -92,7 +90,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { } public void testSimple() throws Exception { - String[] fields = { "b", "t" }; + String[] fields = {"b", "t"}; StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); mfqp.setAnalyzer(new MockAnalyzer(random())); @@ -107,8 +105,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { assertEquals("+(b:one t:one) +(b:two t:two)", q.toString()); q = mfqp.parse("+one -two -three", null); - assertEquals("+(b:one t:one) -(b:two t:two) -(b:three t:three)", q - .toString()); + assertEquals("+(b:one t:one) -(b:two t:two) -(b:three t:three)", q.toString()); q = mfqp.parse("one^2 two", null); assertEquals("(b:one t:one)^2.0 (b:two t:two)", q.toString()); @@ -132,8 +129,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { assertEquals("b:\"foo bar\" t:\"foo bar\"", q.toString()); q = mfqp.parse("\"aa bb cc\" \"dd ee\"", null); - assertEquals("(b:\"aa bb cc\" t:\"aa bb cc\") (b:\"dd ee\" t:\"dd ee\")", q - .toString()); + assertEquals("(b:\"aa bb cc\" t:\"aa bb cc\") (b:\"dd ee\" t:\"dd ee\")", q.toString()); q = mfqp.parse("\"foo bar\"~4", null); assertEquals("b:\"foo bar\"~4 t:\"foo bar\"~4", q.toString()); @@ -152,16 +148,14 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { q = mfqp.parse("one two", null); assertEquals("+(b:one t:one) +(b:two t:two)", q.toString()); q = mfqp.parse("\"aa bb cc\" \"dd ee\"", null); - assertEquals("+(b:\"aa bb cc\" t:\"aa bb cc\") +(b:\"dd ee\" t:\"dd ee\")", - q.toString()); - + assertEquals("+(b:\"aa bb cc\" t:\"aa bb cc\") +(b:\"dd ee\" t:\"dd ee\")", q.toString()); } public void testBoostsSimple() throws Exception { - Map boosts = new HashMap<>(); + Map boosts = new HashMap<>(); boosts.put("b", Float.valueOf(5)); boosts.put("t", Float.valueOf(10)); - String[] fields = { "b", "t" }; + String[] fields = {"b", "t"}; StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); mfqp.setFieldsBoost(boosts); @@ -173,8 +167,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { // Check for AND q = mfqp.parse("one AND two", null); - assertEquals("+((b:one)^5.0 (t:one)^10.0) +((b:two)^5.0 (t:two)^10.0)", q - .toString()); + assertEquals("+((b:one)^5.0 (t:one)^10.0) +((b:two)^5.0 (t:two)^10.0)", q.toString()); // Check for OR q = mfqp.parse("one OR two", null); @@ -182,79 +175,79 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { // Check for AND and a field q = mfqp.parse("one AND two AND foo:test", null); - assertEquals("+((b:one)^5.0 (t:one)^10.0) +((b:two)^5.0 (t:two)^10.0) +foo:test", q - .toString()); + assertEquals("+((b:one)^5.0 (t:one)^10.0) +((b:two)^5.0 (t:two)^10.0) +foo:test", q.toString()); q = mfqp.parse("one^3 AND two^4", null); - assertEquals("+((b:one)^5.0 (t:one)^10.0)^3.0 +((b:two)^5.0 (t:two)^10.0)^4.0", - q.toString()); + assertEquals("+((b:one)^5.0 (t:one)^10.0)^3.0 +((b:two)^5.0 (t:two)^10.0)^4.0", q.toString()); } public void testStaticMethod1() throws QueryNodeException { - String[] fields = { "b", "t" }; - String[] queries = { "one", "two" }; + String[] fields = {"b", "t"}; + String[] queries = {"one", "two"}; Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer(random())); assertEquals("b:one t:two", q.toString()); - String[] queries2 = { "+one", "+two" }; + String[] queries2 = {"+one", "+two"}; q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer(random())); assertEquals("b:one t:two", q.toString()); - String[] queries3 = { "one", "+two" }; + String[] queries3 = {"one", "+two"}; q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer(random())); assertEquals("b:one t:two", q.toString()); - String[] queries4 = { "one +more", "+two" }; + String[] queries4 = {"one +more", "+two"}; q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer(random())); assertEquals("(b:one +b:more) t:two", q.toString()); - String[] queries5 = { "blah" }; + String[] queries5 = {"blah"}; // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - QueryParserUtil.parse(queries5, fields, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + QueryParserUtil.parse(queries5, fields, new MockAnalyzer(random())); + }); // check also with stop words for this static form (qtxts[], fields[]). TestQPHelper.QPTestAnalyzer stopA = new TestQPHelper.QPTestAnalyzer(); - String[] queries6 = { "((+stop))", "+((stop))" }; + String[] queries6 = {"((+stop))", "+((stop))"}; q = QueryParserUtil.parse(queries6, fields, stopA); assertEquals("MatchNoDocsQuery(\"\") MatchNoDocsQuery(\"\")", q.toString()); - //assertEquals(" ", q.toString()); + // assertEquals(" ", q.toString()); - String[] queries7 = { "one ((+stop)) +more", "+((stop)) +two" }; + String[] queries7 = {"one ((+stop)) +more", "+((stop)) +two"}; q = QueryParserUtil.parse(queries7, fields, stopA); assertEquals("(b:one +b:more) (+t:two)", q.toString()); } public void testStaticMethod2() throws QueryNodeException { - String[] fields = { "b", "t" }; - BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, - BooleanClause.Occur.MUST_NOT }; - Query q = QueryParserUtil.parse("one", fields, flags, - new MockAnalyzer(random())); + String[] fields = {"b", "t"}; + BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; + Query q = QueryParserUtil.parse("one", fields, flags, new MockAnalyzer(random())); assertEquals("+b:one -t:one", q.toString()); q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; + QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random())); + }); } public void testStaticMethod2Old() throws QueryNodeException { - String[] fields = { "b", "t" }; - BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, - BooleanClause.Occur.MUST_NOT }; + String[] fields = {"b", "t"}; + BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; StandardQueryParser parser = new StandardQueryParser(); parser.setMultiFields(fields); parser.setAnalyzer(new MockAnalyzer(random())); - Query q = QueryParserUtil.parse("one", fields, flags, - new MockAnalyzer(random()));// , fields, flags, new + Query q = + QueryParserUtil.parse( + "one", fields, flags, new MockAnalyzer(random())); // , fields, flags, new // MockAnalyzer()); assertEquals("+b:one -t:one", q.toString()); @@ -262,48 +255,50 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; + QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random())); + }); } public void testStaticMethod3() throws QueryNodeException { - String[] queries = { "one", "two", "three" }; - String[] fields = { "f1", "f2", "f3" }; - BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, - BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD }; - Query q = QueryParserUtil.parse(queries, fields, flags, - new MockAnalyzer(random())); + String[] queries = {"one", "two", "three"}; + String[] fields = {"f1", "f2", "f3"}; + BooleanClause.Occur[] flags = { + BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD + }; + Query q = QueryParserUtil.parse(queries, fields, flags, new MockAnalyzer(random())); assertEquals("+f1:one -f2:two f3:three", q.toString()); // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - QueryParserUtil - .parse(queries, fields, flags2, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; + QueryParserUtil.parse(queries, fields, flags2, new MockAnalyzer(random())); + }); } public void testStaticMethod3Old() throws QueryNodeException { - String[] queries = { "one", "two" }; - String[] fields = { "b", "t" }; - BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, - BooleanClause.Occur.MUST_NOT }; - Query q = QueryParserUtil.parse(queries, fields, flags, - new MockAnalyzer(random())); + String[] queries = {"one", "two"}; + String[] fields = {"b", "t"}; + BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; + Query q = QueryParserUtil.parse(queries, fields, flags, new MockAnalyzer(random())); assertEquals("+b:one -t:two", q.toString()); // expected exception, array length differs - expectThrows(IllegalArgumentException.class, () -> { - BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - QueryParserUtil - .parse(queries, fields, flags2, new MockAnalyzer(random())); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; + QueryParserUtil.parse(queries, fields, flags2, new MockAnalyzer(random())); + }); } public void testAnalyzerReturningNull() throws QueryNodeException { - String[] fields = new String[] { "f1", "f2", "f3" }; + String[] fields = new String[] {"f1", "f2", "f3"}; StandardQueryParser parser = new StandardQueryParser(); parser.setMultiFields(fields); parser.setAnalyzer(new AnalyzerReturningNull()); @@ -331,7 +326,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { StandardQueryParser mfqp = new StandardQueryParser(); - mfqp.setMultiFields(new String[] { "body" }); + mfqp.setMultiFields(new String[] {"body"}); mfqp.setAnalyzer(analyzer); mfqp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); Query q = mfqp.parse("the footest", null); @@ -343,9 +338,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { ramDir.close(); } - /** - * Return no tokens for field "f1". - */ + /** Return no tokens for field "f1". */ private static class AnalyzerReturningNull extends Analyzer { MockAnalyzer stdAnalyzer = new MockAnalyzer(random()); @@ -370,5 +363,4 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { return stdAnalyzer.createComponents(fieldName); } } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestPointQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestPointQueryParser.java index a301de0a7f1..e4010b774e0 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestPointQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestPointQueryParser.java @@ -20,7 +20,6 @@ import java.text.NumberFormat; import java.util.HashMap; import java.util.Locale; import java.util.Map; - import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.FloatPoint; import org.apache.lucene.document.IntPoint; @@ -30,53 +29,57 @@ import org.apache.lucene.util.LuceneTestCase; /** Simple test for point field integration into the flexible QP */ public class TestPointQueryParser extends LuceneTestCase { - + public void testIntegers() throws Exception { StandardQueryParser parser = new StandardQueryParser(); - Map pointsConfig = new HashMap<>(); - pointsConfig.put("intField", new PointsConfig(NumberFormat.getIntegerInstance(Locale.ROOT), Integer.class)); + Map pointsConfig = new HashMap<>(); + pointsConfig.put( + "intField", new PointsConfig(NumberFormat.getIntegerInstance(Locale.ROOT), Integer.class)); parser.setPointsConfigMap(pointsConfig); - - assertEquals(IntPoint.newRangeQuery("intField", 1, 3), - parser.parse("intField:[1 TO 3]", "body")); - assertEquals(IntPoint.newRangeQuery("intField", 1, 1), - parser.parse("intField:1", "body")); - } - - public void testLongs() throws Exception { - StandardQueryParser parser = new StandardQueryParser(); - Map pointsConfig = new HashMap<>(); - pointsConfig.put("longField", new PointsConfig(NumberFormat.getIntegerInstance(Locale.ROOT), Long.class)); - parser.setPointsConfigMap(pointsConfig); - - assertEquals(LongPoint.newRangeQuery("longField", 1, 3), - parser.parse("longField:[1 TO 3]", "body")); - assertEquals(LongPoint.newRangeQuery("longField", 1, 1), - parser.parse("longField:1", "body")); - } - - public void testFloats() throws Exception { - StandardQueryParser parser = new StandardQueryParser(); - Map pointsConfig = new HashMap<>(); - pointsConfig.put("floatField", new PointsConfig(NumberFormat.getNumberInstance(Locale.ROOT), Float.class)); - parser.setPointsConfigMap(pointsConfig); - - assertEquals(FloatPoint.newRangeQuery("floatField", 1.5F, 3.6F), - parser.parse("floatField:[1.5 TO 3.6]", "body")); - assertEquals(FloatPoint.newRangeQuery("floatField", 1.5F, 1.5F), - parser.parse("floatField:1.5", "body")); - } - - public void testDoubles() throws Exception { - StandardQueryParser parser = new StandardQueryParser(); - Map pointsConfig = new HashMap<>(); - pointsConfig.put("doubleField", new PointsConfig(NumberFormat.getNumberInstance(Locale.ROOT), Double.class)); - parser.setPointsConfigMap(pointsConfig); - - assertEquals(DoublePoint.newRangeQuery("doubleField", 1.5D, 3.6D), - parser.parse("doubleField:[1.5 TO 3.6]", "body")); - assertEquals(DoublePoint.newRangeQuery("doubleField", 1.5D, 1.5D), - parser.parse("doubleField:1.5", "body")); + + assertEquals( + IntPoint.newRangeQuery("intField", 1, 3), parser.parse("intField:[1 TO 3]", "body")); + assertEquals(IntPoint.newRangeQuery("intField", 1, 1), parser.parse("intField:1", "body")); } + public void testLongs() throws Exception { + StandardQueryParser parser = new StandardQueryParser(); + Map pointsConfig = new HashMap<>(); + pointsConfig.put( + "longField", new PointsConfig(NumberFormat.getIntegerInstance(Locale.ROOT), Long.class)); + parser.setPointsConfigMap(pointsConfig); + + assertEquals( + LongPoint.newRangeQuery("longField", 1, 3), parser.parse("longField:[1 TO 3]", "body")); + assertEquals(LongPoint.newRangeQuery("longField", 1, 1), parser.parse("longField:1", "body")); + } + + public void testFloats() throws Exception { + StandardQueryParser parser = new StandardQueryParser(); + Map pointsConfig = new HashMap<>(); + pointsConfig.put( + "floatField", new PointsConfig(NumberFormat.getNumberInstance(Locale.ROOT), Float.class)); + parser.setPointsConfigMap(pointsConfig); + + assertEquals( + FloatPoint.newRangeQuery("floatField", 1.5F, 3.6F), + parser.parse("floatField:[1.5 TO 3.6]", "body")); + assertEquals( + FloatPoint.newRangeQuery("floatField", 1.5F, 1.5F), parser.parse("floatField:1.5", "body")); + } + + public void testDoubles() throws Exception { + StandardQueryParser parser = new StandardQueryParser(); + Map pointsConfig = new HashMap<>(); + pointsConfig.put( + "doubleField", new PointsConfig(NumberFormat.getNumberInstance(Locale.ROOT), Double.class)); + parser.setPointsConfigMap(pointsConfig); + + assertEquals( + DoublePoint.newRangeQuery("doubleField", 1.5D, 3.6D), + parser.parse("doubleField:[1.5 TO 3.6]", "body")); + assertEquals( + DoublePoint.newRangeQuery("doubleField", 1.5D, 1.5D), + parser.parse("doubleField:1.5", "body")); + } } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java index bfef03b1910..787c336e069 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java @@ -27,7 +27,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.TimeZone; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; @@ -57,8 +56,8 @@ import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfi import org.apache.lucene.queryparser.flexible.standard.nodes.WildcardQueryNode; import org.apache.lucene.queryparser.flexible.standard.parser.ParseException; import org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser; -import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; @@ -85,10 +84,10 @@ import org.junit.BeforeClass; import org.junit.Ignore; /** - * This test case is a copy of the core Lucene query parser test, it was adapted - * to use new QueryParserHelper instead of the old query parser. - * - * Tests QueryParser. + * This test case is a copy of the core Lucene query parser test, it was adapted to use new + * QueryParserHelper instead of the old query parser. + * + *

    Tests QueryParser. */ // TODO: really this should extend QueryParserTestBase too! public class TestQPHelper extends LuceneTestCase { @@ -110,8 +109,8 @@ public class TestQPHelper extends LuceneTestCase { private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); /** - * Filter which discards the token 'stop' and which expands the token - * 'phrase' into 'phrase1 phrase2' + * Filter which discards the token 'stop' and which expands the token 'phrase' into 'phrase1 + * phrase2' */ public QPTestFilter(TokenStream in) { super(in); @@ -138,8 +137,7 @@ public class TestQPHelper extends LuceneTestCase { termAtt.setEmpty().append("phrase1"); offsetAtt.setOffset(savedStart, savedEnd); return true; - } else if (!termAtt.toString().equals("stop")) - return true; + } else if (!termAtt.toString().equals("stop")) return true; } return false; } @@ -165,36 +163,28 @@ public class TestQPHelper extends LuceneTestCase { public static class QPTestParser extends StandardQueryParser { public QPTestParser(Analyzer a) { - ((QueryNodeProcessorPipeline)getQueryNodeProcessor()) + ((QueryNodeProcessorPipeline) getQueryNodeProcessor()) .add(new QPTestParserQueryNodeProcessor()); this.setAnalyzer(a); - } - private static class QPTestParserQueryNodeProcessor extends - QueryNodeProcessorImpl { + private static class QPTestParserQueryNodeProcessor extends QueryNodeProcessorImpl { @Override - protected QueryNode postProcessNode(QueryNode node) - throws QueryNodeException { + protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException { return node; - } @Override - protected QueryNode preProcessNode(QueryNode node) - throws QueryNodeException { + protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException { if (node instanceof WildcardQueryNode || node instanceof FuzzyQueryNode) { - throw new QueryNodeException(new MessageImpl( - QueryParserMessages.EMPTY_MESSAGE)); - + throw new QueryNodeException(new MessageImpl(QueryParserMessages.EMPTY_MESSAGE)); } return node; - } @Override @@ -202,11 +192,8 @@ public class TestQPHelper extends LuceneTestCase { throws QueryNodeException { return children; - } - } - } private int originalMaxClauses; @@ -218,15 +205,13 @@ public class TestQPHelper extends LuceneTestCase { } public StandardQueryParser getParser(Analyzer a) throws Exception { - if (a == null) - a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); + if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.OR); return qp; - } public Query getQuery(String query, Analyzer a) throws Exception { @@ -239,13 +224,11 @@ public class TestQPHelper extends LuceneTestCase { return parser.parse(query, "field"); } - public void assertQueryEquals(String query, Analyzer a, String result) - throws Exception { + public void assertQueryEquals(String query, Analyzer a, String result) throws Exception { Query q = getQuery(query, a); String s = q.toString("field"); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result - + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } @@ -268,70 +251,61 @@ public class TestQPHelper extends LuceneTestCase { Query q = getQueryAllowLeadingWildcard(query, a); String s = q.toString("field"); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result - + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } - public void assertQueryEquals(StandardQueryParser qp, String field, - String query, String result) throws Exception { + public void assertQueryEquals(StandardQueryParser qp, String field, String query, String result) + throws Exception { Query q = qp.parse(query, field); String s = q.toString(field); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result - + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } - public void assertEscapedQueryEquals(String query, Analyzer a, String result) - throws Exception { + public void assertEscapedQueryEquals(String query, Analyzer a, String result) throws Exception { String escapedQuery = QueryParserUtil.escape(query); if (!escapedQuery.equals(result)) { - fail("Query /" + query + "/ yielded /" + escapedQuery + "/, expecting /" - + result + "/"); + fail("Query /" + query + "/ yielded /" + escapedQuery + "/, expecting /" + result + "/"); } } - public void assertWildcardQueryEquals(String query, - String result, boolean allowLeadingWildcard) throws Exception { + public void assertWildcardQueryEquals(String query, String result, boolean allowLeadingWildcard) + throws Exception { StandardQueryParser qp = getParser(null); qp.setAllowLeadingWildcard(allowLeadingWildcard); Query q = qp.parse(query, "field"); String s = q.toString("field"); if (!s.equals(result)) { - fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /" - + result + "/"); + fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } - public void assertWildcardQueryEquals(String query, - String result) throws Exception { + public void assertWildcardQueryEquals(String query, String result) throws Exception { assertWildcardQueryEquals(query, result, false); } public Query getQueryDOA(String query, Analyzer a) throws Exception { - if (a == null) - a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); + if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); return qp.parse(query, "field"); - } - public void assertQueryEqualsDOA(String query, Analyzer a, String result) - throws Exception { + public void assertQueryEqualsDOA(String query, Analyzer a, String result) throws Exception { Query q = getQueryDOA(query, a); String s = q.toString("field"); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result - + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } public void testConstantScoreAutoRewrite() throws Exception { - StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + StandardQueryParser qp = + new StandardQueryParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); Query q = qp.parse("foo*bar", "field"); assertTrue(q instanceof WildcardQuery); assertEquals(MultiTermQuery.CONSTANT_SCORE_REWRITE, ((MultiTermQuery) q).getRewriteMethod()); @@ -348,12 +322,11 @@ public class TestQPHelper extends LuceneTestCase { public void testCJK() throws Exception { // Test Ideographic Space - As wide as a CJK character cell (fullwidth) // used google to translate the word "term" to japanese -> ?? - assertQueryEquals("term\u3000term\u3000term", null, - "term\u0020term\u0020term"); + assertQueryEquals("term\u3000term\u3000term", null, "term\u0020term\u0020term"); assertQueryEqualsAllowLeadingWildcard("??\u3000??\u3000??", null, "??\u0020??\u0020??"); } - //individual CJK chars as terms, like StandardAnalyzer + // individual CJK chars as terms, like StandardAnalyzer private static class SimpleCJKTokenizer extends Tokenizer { private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); @@ -364,8 +337,7 @@ public class TestQPHelper extends LuceneTestCase { @Override public boolean incrementToken() throws IOException { int ch = input.read(); - if (ch < 0) - return false; + if (ch < 0) return false; clearAttributes(); termAtt.setEmpty().append((char) ch); return true; @@ -378,16 +350,16 @@ public class TestQPHelper extends LuceneTestCase { return new TokenStreamComponents(new SimpleCJKTokenizer()); } } - + public void testCJKTerm() throws Exception { // individual CJK chars as terms SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + BooleanQuery.Builder expected = new BooleanQuery.Builder(); expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD); expected.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD); assertEquals(expected.build(), getQuery("中国", analyzer)); - + expected = new BooleanQuery.Builder(); expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.MUST); BooleanQuery.Builder inner = new BooleanQuery.Builder(); @@ -395,13 +367,12 @@ public class TestQPHelper extends LuceneTestCase { inner.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD); expected.add(inner.build(), BooleanClause.Occur.MUST); assertEquals(expected.build(), getQuery("中 AND 中国", new SimpleCJKAnalyzer())); - } - + public void testCJKBoostedTerm() throws Exception { // individual CJK chars as terms SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + BooleanQuery.Builder expectedB = new BooleanQuery.Builder(); expectedB.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD); expectedB.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD); @@ -409,32 +380,32 @@ public class TestQPHelper extends LuceneTestCase { expected = new BoostQuery(expected, 0.5f); assertEquals(expected, getQuery("中国^0.5", analyzer)); } - + public void testCJKPhrase() throws Exception { // individual CJK chars as terms SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + PhraseQuery expected = new PhraseQuery("field", "中", "国"); - + assertEquals(expected, getQuery("\"中国\"", analyzer)); } - + public void testCJKBoostedPhrase() throws Exception { // individual CJK chars as terms SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + Query expected = new PhraseQuery("field", "中", "国"); expected = new BoostQuery(expected, 0.5f); - + assertEquals(expected, getQuery("\"中国\"^0.5", analyzer)); } - + public void testCJKSloppyPhrase() throws Exception { // individual CJK chars as terms - SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); + PhraseQuery expected = new PhraseQuery(3, "field", "中", "国"); - + assertEquals(expected, getQuery("\"中国\"~3", analyzer)); } @@ -442,13 +413,16 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("field=a", null, "a"); assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2"); assertQueryEquals("term term term", null, "term term term"); - assertQueryEquals("türm term term", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), + assertQueryEquals( + "türm term term", + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), "türm term term"); - assertQueryEquals("ümlaut", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), "ümlaut"); + assertQueryEquals( + "ümlaut", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), "ümlaut"); // FIXME: change MockAnalyzer to not extend CharTokenizer for this test - //assertQueryEquals("\"\"", new KeywordAnalyzer(), ""); - //assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:"); + // assertQueryEquals("\"\"", new KeywordAnalyzer(), ""); + // assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:"); assertQueryEquals("a AND b", null, "+a +b"); assertQueryEquals("(a AND b)", null, "+a +b"); @@ -474,10 +448,8 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("a OR -b", null, "a -b"); assertQueryEquals("+term -term term", null, "+term -term term"); - assertQueryEquals("foo:term AND field:anotherTerm", null, - "+foo:term +anotherterm"); - assertQueryEquals("term AND \"phrase phrase\"", null, - "+term +\"phrase phrase\""); + assertQueryEquals("foo:term AND field:anotherTerm", null, "+foo:term +anotherterm"); + assertQueryEquals("term AND \"phrase phrase\"", null, "+term +\"phrase phrase\""); assertQueryEquals("\"hello there\"", null, "\"hello there\""); assertTrue(getQuery("a AND b", null) instanceof BooleanQuery); assertTrue(getQuery("hello", null) instanceof TermQuery); @@ -491,12 +463,13 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("\"germ term\"^2.0", null, "(\"germ term\")^2.0"); assertQueryEquals("\"term germ\"^2", null, "(\"term germ\")^2.0"); - assertQueryEquals("(foo OR bar) AND (baz OR boo)", null, - "+(foo bar) +(baz boo)"); + assertQueryEquals("(foo OR bar) AND (baz OR boo)", null, "+(foo bar) +(baz boo)"); assertQueryEquals("((a OR b) AND NOT c) OR d", null, "(+(a b) -c) d"); - assertQueryEquals("+(apple \"steve jobs\") -(foo bar baz)", null, - "+(apple \"steve jobs\") -(foo bar baz)"); - assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, + assertQueryEquals( + "+(apple \"steve jobs\") -(foo bar baz)", null, "+(apple \"steve jobs\") -(foo bar baz)"); + assertQueryEquals( + "+title:(dog OR cat) -author:\"bob dole\"", + null, "+(title:dog title:cat) -author:\"bob dole\""); } @@ -548,18 +521,18 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("!term", null, "-term"); assertQueryEquals("NOT term", null, "-term"); } - + public void testNegationInParentheses() throws Exception { - assertQueryEquals("(-a)", null, "-a"); - assertQueryEquals("(!a)", null, "-a"); - assertQueryEquals("(NOT a)", null, "-a"); - assertQueryEquals("a (!b)", null, "a (-b)"); - assertQueryEquals("+a +(!b)", null, "+a +(-b)"); - assertQueryEquals("a AND (!b)", null, "+a +(-b)"); - assertQueryEquals("a (NOT b)", null, "a (-b)"); - assertQueryEquals("a AND (NOT b)", null, "+a +(-b)"); + assertQueryEquals("(-a)", null, "-a"); + assertQueryEquals("(!a)", null, "-a"); + assertQueryEquals("(NOT a)", null, "-a"); + assertQueryEquals("a (!b)", null, "a (-b)"); + assertQueryEquals("+a +(!b)", null, "+a +(-b)"); + assertQueryEquals("a AND (!b)", null, "+a +(-b)"); + assertQueryEquals("a (NOT b)", null, "a (-b)"); + assertQueryEquals("a AND (NOT b)", null, "+a +(-b)"); } - + public void testWildcard() throws Exception { assertQueryEquals("term*", null, "term*"); assertQueryEquals("term*^2", null, "(term*)^2.0"); @@ -616,13 +589,17 @@ public class TestQPHelper extends LuceneTestCase { // : "" assertWildcardQueryEquals("[A TO C]", "[a TO c]"); // Test suffix queries: first disallow - expectThrows(QueryNodeException.class, () -> { - assertWildcardQueryEquals("*Term", "*term"); - }); + expectThrows( + QueryNodeException.class, + () -> { + assertWildcardQueryEquals("*Term", "*term"); + }); - expectThrows(QueryNodeException.class, () -> { - assertWildcardQueryEquals("?Term", "?term"); - }); + expectThrows( + QueryNodeException.class, + () -> { + assertWildcardQueryEquals("?Term", "?term"); + }); // Test suffix queries: then allow assertWildcardQueryEquals("*Term", "*term", true); @@ -650,11 +627,9 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("term -(stop) term", qpAnalyzer, "term term"); assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll"); - assertQueryEquals("term phrase term", qpAnalyzer, - "term (phrase1 phrase2) term"); + assertQueryEquals("term phrase term", qpAnalyzer, "term (phrase1 phrase2) term"); - assertQueryEquals("term AND NOT phrase term", qpAnalyzer, - "+term -(phrase1 phrase2) term"); + assertQueryEquals("term AND NOT phrase term", qpAnalyzer, "+term -(phrase1 phrase2) term"); assertMatchNoDocsQuery("stop^3", qpAnalyzer); assertMatchNoDocsQuery("stop", qpAnalyzer); @@ -670,29 +645,33 @@ public class TestQPHelper extends LuceneTestCase { public void testRange() throws Exception { assertQueryEquals("[ a TO z]", null, "[a TO z]"); - assertEquals(MultiTermQuery.CONSTANT_SCORE_REWRITE, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod()); + assertEquals( + MultiTermQuery.CONSTANT_SCORE_REWRITE, + ((TermRangeQuery) getQuery("[ a TO z]", null)).getRewriteMethod()); StandardQueryParser qp = new StandardQueryParser(); - + qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); - assertEquals(MultiTermQuery.SCORING_BOOLEAN_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]", "field")).getRewriteMethod()); + assertEquals( + MultiTermQuery.SCORING_BOOLEAN_REWRITE, + ((TermRangeQuery) qp.parse("[ a TO z]", "field")).getRewriteMethod()); // test open ranges assertQueryEquals("[ a TO * ]", null, "[a TO *]"); assertQueryEquals("[ * TO z ]", null, "[* TO z]"); assertQueryEquals("[ * TO * ]", null, "[* TO *]"); - + assertQueryEquals("field>=a", null, "[a TO *]"); assertQueryEquals("field>a", null, "{a TO *]"); assertQueryEquals("field<=a", null, "[* TO a]"); assertQueryEquals("field dateRes = new HashMap<>(); - - // set a field specific date resolution + Map dateRes = new HashMap<>(); + + // set a field specific date resolution dateRes.put(monthField, DateTools.Resolution.MONTH); qp.setDateResolutionMap(dateRes); @@ -766,27 +743,45 @@ public class TestQPHelper extends LuceneTestCase { // for this field no field specific date resolution has been set, // so verify if the default resolution is used - assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.MILLISECOND); + assertDateRangeQueryEquals( + qp, + defaultField, + startDate, + endDate, + endDateExpected.getTime(), + DateTools.Resolution.MILLISECOND); // verify if field specific date resolutions are used for these two // fields - assertDateRangeQueryEquals(qp, monthField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.MONTH); + assertDateRangeQueryEquals( + qp, monthField, startDate, endDate, endDateExpected.getTime(), DateTools.Resolution.MONTH); - assertDateRangeQueryEquals(qp, hourField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.HOUR); + assertDateRangeQueryEquals( + qp, hourField, startDate, endDate, endDateExpected.getTime(), DateTools.Resolution.HOUR); } - public void assertDateRangeQueryEquals(StandardQueryParser qp, - String field, String startDate, String endDate, Date endDateInclusive, - DateTools.Resolution resolution) throws Exception { - assertQueryEquals(qp, field, field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) - + "]", "[" + getDate(startDate, resolution) + " TO " - + getDate(endDateInclusive, resolution) + "]"); - assertQueryEquals(qp, field, field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) - + "}", "{" + getDate(startDate, resolution) + " TO " - + getDate(endDate, resolution) + "}"); + public void assertDateRangeQueryEquals( + StandardQueryParser qp, + String field, + String startDate, + String endDate, + Date endDateInclusive, + DateTools.Resolution resolution) + throws Exception { + assertQueryEquals( + qp, + field, + field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]", + "[" + + getDate(startDate, resolution) + + " TO " + + getDate(endDateInclusive, resolution) + + "]"); + assertQueryEquals( + qp, + field, + field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "}", + "{" + getDate(startDate, resolution) + " TO " + getDate(endDate, resolution) + "}"); } public void testEscaped() throws Exception { @@ -797,7 +792,7 @@ public class TestQPHelper extends LuceneTestCase { * assertQueryEquals("\\[brackets", null, "brackets"); * assertQueryEquals("\\\\", a, "\\\\"); assertQueryEquals("\\+blah", a, * "\\+blah"); assertQueryEquals("\\(blah", a, "\\(blah"); - * + * * assertQueryEquals("\\-blah", a, "\\-blah"); assertQueryEquals("\\!blah", * a, "\\!blah"); assertQueryEquals("\\{blah", a, "\\{blah"); * assertQueryEquals("\\}blah", a, "\\}blah"); assertQueryEquals("\\:blah", @@ -814,7 +809,7 @@ public class TestQPHelper extends LuceneTestCase { */ assertQueryEquals("\\*", a, "*"); - + assertQueryEquals("\\a", a, "a"); assertQueryEquals("a\\-b:c", a, "a-b:c"); @@ -851,7 +846,8 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals( "[\"c\\:\\\\temp\\\\\\~foo0.txt\" TO \"c\\:\\\\temp\\\\\\~foo9.txt\"]", - a, "[c:\\temp\\~foo0.txt TO c:\\temp\\~foo9.txt]"); + a, + "[c:\\temp\\~foo0.txt TO c:\\temp\\~foo9.txt]"); assertQueryEquals("a\\\\\\+b", a, "a\\+b"); @@ -950,8 +946,7 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEqualsDOA("+weltbank\r\n+worlbank", null, "+weltbank +worlbank"); assertQueryEqualsDOA("weltbank \r\n+worlbank", null, "+weltbank +worlbank"); assertQueryEqualsDOA("weltbank \r\n +worlbank", null, "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \r \n +worlbank", null, - "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \r \n +worlbank", null, "+weltbank +worlbank"); assertQueryEqualsDOA("+weltbank\t+worlbank", null, "+weltbank +worlbank"); assertQueryEqualsDOA("weltbank \t+worlbank", null, "+weltbank +worlbank"); @@ -984,7 +979,8 @@ public class TestQPHelper extends LuceneTestCase { assertNotNull(q); StandardQueryParser qp2 = new StandardQueryParser(); - qp2.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)); + qp2.setAnalyzer( + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)); q = qp2.parse("the^3", "field"); // "the" is a stop word so the result is an empty query: @@ -994,9 +990,11 @@ public class TestQPHelper extends LuceneTestCase { } public void assertQueryNodeException(String queryString) throws Exception { - expectThrows(QueryNodeException.class, () -> { - getQuery(queryString, null); - }); + expectThrows( + QueryNodeException.class, + () -> { + getQuery(queryString, null); + }); } public void testException() throws Exception { @@ -1006,37 +1004,43 @@ public class TestQPHelper extends LuceneTestCase { assertQueryNodeException("foo bar))"); assertQueryNodeException("field:term:with:colon some more terms"); assertQueryNodeException("(sub query)^5.0^2.0 plus more"); - assertQueryNodeException("secret AND illegal) AND access:confidential"); + assertQueryNodeException("secret AND illegal) AND access:confidential"); } // Wildcard queries should not be allowed public void testCustomQueryParserWildcard() { - expectThrows(QueryNodeException.class, () -> { - new QPTestParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("a?t", "contents"); - }); + expectThrows( + QueryNodeException.class, + () -> { + new QPTestParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) + .parse("a?t", "contents"); + }); } // Fuzzy queries should not be allowed" public void testCustomQueryParserFuzzy() throws Exception { - expectThrows(QueryNodeException.class, () -> { - new QPTestParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents"); - }); + expectThrows( + QueryNodeException.class, + () -> { + new QPTestParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) + .parse("xunit~", "contents"); + }); } // too many boolean clauses, so ParseException is expected public void testBooleanQuery() throws Exception { IndexSearcher.setMaxClauseCount(2); - expectThrows(QueryNodeException.class, () -> { - StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + expectThrows( + QueryNodeException.class, + () -> { + StandardQueryParser qp = new StandardQueryParser(); + qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); - qp.parse("one two three", "field"); - }); + qp.parse("one two three", "field"); + }); } - /** - * This test differs from TestPrecedenceQueryParser - */ + /** This test differs from TestPrecedenceQueryParser */ public void testPrecedence() throws Exception { StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); @@ -1047,35 +1051,35 @@ public class TestQPHelper extends LuceneTestCase { assertEquals(query1, query2); } -// Todo: Convert from DateField to DateUtil -// public void testLocalDateFormat() throws IOException, QueryNodeException { -// Directory ramDir = newDirectory(); -// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); -// addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); -// addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); -// iw.close(); -// IndexSearcher is = new IndexSearcher(ramDir, true); -// assertHits(1, "[12/1/2005 TO 12/3/2005]", is); -// assertHits(2, "[12/1/2005 TO 12/4/2005]", is); -// assertHits(1, "[12/3/2005 TO 12/4/2005]", is); -// assertHits(1, "{12/1/2005 TO 12/3/2005}", is); -// assertHits(1, "{12/1/2005 TO 12/4/2005}", is); -// assertHits(0, "{12/3/2005 TO 12/4/2005}", is); -// is.close(); -// ramDir.close(); -// } -// -// private void addDateDoc(String content, int year, int month, int day, -// int hour, int minute, int second, IndexWriter iw) throws IOException { -// Document d = new Document(); -// d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED)); -// Calendar cal = Calendar.getInstance(Locale.ENGLISH); -// cal.set(year, month - 1, day, hour, minute, second); -// d.add(newField("date", DateField.dateToString(cal.getTime()), -// Field.Store.YES, Field.Index.NOT_ANALYZED)); -// iw.addDocument(d); -// } - + // Todo: Convert from DateField to DateUtil + // public void testLocalDateFormat() throws IOException, QueryNodeException { + // Directory ramDir = newDirectory(); + // IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(new MockAnalyzer(random, + // MockTokenizer.WHITESPACE, false))); + // addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); + // addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); + // iw.close(); + // IndexSearcher is = new IndexSearcher(ramDir, true); + // assertHits(1, "[12/1/2005 TO 12/3/2005]", is); + // assertHits(2, "[12/1/2005 TO 12/4/2005]", is); + // assertHits(1, "[12/3/2005 TO 12/4/2005]", is); + // assertHits(1, "{12/1/2005 TO 12/3/2005}", is); + // assertHits(1, "{12/1/2005 TO 12/4/2005}", is); + // assertHits(0, "{12/3/2005 TO 12/4/2005}", is); + // is.close(); + // ramDir.close(); + // } + // + // private void addDateDoc(String content, int year, int month, int day, + // int hour, int minute, int second, IndexWriter iw) throws IOException { + // Document d = new Document(); + // d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED)); + // Calendar cal = Calendar.getInstance(Locale.ENGLISH); + // cal.set(year, month - 1, day, hour, minute, second); + // d.add(newField("date", DateField.dateToString(cal.getTime()), + // Field.Store.YES, Field.Index.NOT_ANALYZED)); + // iw.addDocument(d); + // } public void testStarParsing() throws Exception { // final int[] type = new int[1]; @@ -1139,11 +1143,11 @@ public class TestQPHelper extends LuceneTestCase { // assertEquals(1,type[0]); } - + public void testRegexps() throws Exception { StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true)); - final String df = "field" ; + final String df = "field"; RegexpQuery q = new RegexpQuery(new Term("field", "[a-z][123]")); assertEquals(q, qp.parse("/[a-z][123]/", df)); assertEquals(q, qp.parse("/[A-Z][123]/", df)); @@ -1151,38 +1155,44 @@ public class TestQPHelper extends LuceneTestCase { qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); assertEquals(new BoostQuery(q, 0.5f), qp.parse("/[A-Z][123]/^0.5", df)); - assertEquals(MultiTermQuery.SCORING_BOOLEAN_REWRITE, ((RegexpQuery) (((BoostQuery) qp.parse("/[A-Z][123]/^0.5", df)).getQuery())).getRewriteMethod()); + assertEquals( + MultiTermQuery.SCORING_BOOLEAN_REWRITE, + ((RegexpQuery) (((BoostQuery) qp.parse("/[A-Z][123]/^0.5", df)).getQuery())) + .getRewriteMethod()); qp.setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE); - + Query escaped = new RegexpQuery(new Term("field", "[a-z]\\/[123]")); assertEquals(escaped, qp.parse("/[a-z]\\/[123]/", df)); Query escaped2 = new RegexpQuery(new Term("field", "[a-z]\\*[123]")); assertEquals(escaped2, qp.parse("/[a-z]\\*[123]/", df)); - + BooleanQuery.Builder complex = new BooleanQuery.Builder(); complex.add(new RegexpQuery(new Term("field", "[a-z]\\/[123]")), Occur.MUST); complex.add(new TermQuery(new Term("path", "/etc/init.d/")), Occur.MUST); complex.add(new TermQuery(new Term("field", "/etc/init[.]d/lucene/")), Occur.SHOULD); - assertEquals(complex.build(), qp.parse("/[a-z]\\/[123]/ AND path:\"/etc/init.d/\" OR \"/etc\\/init\\[.\\]d/lucene/\" ", df)); - + assertEquals( + complex.build(), + qp.parse( + "/[a-z]\\/[123]/ AND path:\"/etc/init.d/\" OR \"/etc\\/init\\[.\\]d/lucene/\" ", df)); + Query re = new RegexpQuery(new Term("field", "http.*")); assertEquals(re, qp.parse("field:/http.*/", df)); assertEquals(re, qp.parse("/http.*/", df)); - + re = new RegexpQuery(new Term("field", "http~0.5")); assertEquals(re, qp.parse("field:/http~0.5/", df)); assertEquals(re, qp.parse("/http~0.5/", df)); // fuzzy op doesn't apply to regexps. assertQueryNodeException("/http/~2"); - + re = new RegexpQuery(new Term("field", "boo")); assertEquals(re, qp.parse("field:/boo/", df)); assertEquals(re, qp.parse("/boo/", df)); - + assertEquals(new TermQuery(new Term("field", "/boo/")), qp.parse("\"/boo/\"", df)); assertEquals(new TermQuery(new Term("field", "/boo/")), qp.parse("\\/boo\\/", df)); - + BooleanQuery.Builder two = new BooleanQuery.Builder(); two.add(new RegexpQuery(new Term("field", "foo")), Occur.SHOULD); two.add(new RegexpQuery(new Term("field", "bar")), Occur.SHOULD); @@ -1201,13 +1211,12 @@ public class TestQPHelper extends LuceneTestCase { result = qp.parse("a:woo OR a:the", "a"); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a TermQuery", result instanceof TermQuery); - result = qp.parse( - "(fieldX:xxxxx OR fieldy:xxxxxxxx)^2 AND (fieldx:the OR fieldy:foo)", - "a"); - Query expected = new BooleanQuery.Builder() - .add(new TermQuery(new Term("fieldX", "xxxxx")), Occur.SHOULD) - .add(new TermQuery(new Term("fieldy", "xxxxxxxx")), Occur.SHOULD) - .build(); + result = qp.parse("(fieldX:xxxxx OR fieldy:xxxxxxxx)^2 AND (fieldx:the OR fieldy:foo)", "a"); + Query expected = + new BooleanQuery.Builder() + .add(new TermQuery(new Term("fieldX", "xxxxx")), Occur.SHOULD) + .add(new TermQuery(new Term("fieldy", "xxxxxxxx")), Occur.SHOULD) + .build(); expected = new BoostQuery(expected, 2f); assertEquals(expected, result); } @@ -1221,7 +1230,7 @@ public class TestQPHelper extends LuceneTestCase { String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; // 0 2 5 7 8 - int expectedPositions[] = { 1, 3, 4, 6, 9 }; + int expectedPositions[] = {1, 3, 4, 6, 9}; PhraseQuery pq = (PhraseQuery) qp.parse(qtxt, "a"); // System.out.println("Query text: "+qtxt); // System.out.println("Result: "+pq); @@ -1229,8 +1238,8 @@ public class TestQPHelper extends LuceneTestCase { int pos[] = pq.getPositions(); for (int i = 0; i < t.length; i++) { // System.out.println(i+". "+t[i]+" pos: "+pos[i]); - assertEquals("term " + i + " = " + t[i] + " has wrong term-position!", - expectedPositions[i], pos[i]); + assertEquals( + "term " + i + " = " + t[i] + " has wrong term-position!", expectedPositions[i], pos[i]); } } @@ -1265,7 +1274,8 @@ public class TestQPHelper extends LuceneTestCase { private static class CannedTokenizer extends Tokenizer { private int upto = 0; - private final PositionIncrementAttribute posIncr = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncr = + addAttribute(PositionIncrementAttribute.class); private final CharTermAttribute term = addAttribute(CharTermAttribute.class); public CannedTokenizer() { @@ -1317,7 +1327,7 @@ public class TestQPHelper extends LuceneTestCase { w.addDocument(doc); IndexReader r = DirectoryReader.open(w); IndexSearcher s = newSearcher(r); - + Query q = new StandardQueryParser(new CannedAnalyzer()).parse("\"a\"", "field"); assertTrue(q instanceof MultiPhraseQuery); assertEquals(1, s.search(q, 10).totalHits.value); @@ -1335,15 +1345,19 @@ public class TestQPHelper extends LuceneTestCase { parser.setAnalyzer(new MockAnalyzer(random())); BooleanQuery.Builder exp = new BooleanQuery.Builder(); - exp.add(new BooleanClause(new RegexpQuery(new Term("b", "ab.+")), BooleanClause.Occur.SHOULD));//TODO spezification? was "MUST" - exp.add(new BooleanClause(new RegexpQuery(new Term("t", "ab.+")), BooleanClause.Occur.SHOULD));//TODO spezification? was "MUST" + exp.add( + new BooleanClause( + new RegexpQuery(new Term("b", "ab.+")), + BooleanClause.Occur.SHOULD)); // TODO spezification? was "MUST" + exp.add( + new BooleanClause( + new RegexpQuery(new Term("t", "ab.+")), + BooleanClause.Occur.SHOULD)); // TODO spezification? was "MUST" assertEquals(exp.build(), parser.parse("/ab.+/", null)); RegexpQuery regexpQueryexp = new RegexpQuery(new Term("test", "[abc]?[0-9]")); assertEquals(regexpQueryexp, parser.parse("test:/[abc]?[0-9]/", null)); - } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestStandardQP.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestStandardQP.java index 7e50eeb6631..ed1eed169b9 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestStandardQP.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestStandardQP.java @@ -31,11 +31,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.WildcardQuery; -/** - * Tests QueryParser. - */ +/** Tests QueryParser. */ public class TestStandardQP extends QueryParserTestBase { - + public StandardQueryParser getParser(Analyzer a) throws Exception { if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); StandardQueryParser qp = new StandardQueryParser(a); @@ -43,31 +41,30 @@ public class TestStandardQP extends QueryParserTestBase { return qp; } - + public Query parse(String query, StandardQueryParser qp) throws Exception { return qp.parse(query, getDefaultField()); } - + @Override - public CommonQueryParserConfiguration getParserConfig(Analyzer a) - throws Exception { + public CommonQueryParserConfiguration getParserConfig(Analyzer a) throws Exception { return getParser(a); } - + @Override - public Query getQuery(String query, CommonQueryParserConfiguration cqpC) - throws Exception { + public Query getQuery(String query, CommonQueryParserConfiguration cqpC) throws Exception { assert cqpC != null : "Parameter must not be null"; - assert (cqpC instanceof StandardQueryParser) : "Parameter must be instance of StandardQueryParser"; + assert (cqpC instanceof StandardQueryParser) + : "Parameter must be instance of StandardQueryParser"; StandardQueryParser qp = (StandardQueryParser) cqpC; return parse(query, qp); } - + @Override public Query getQuery(String query, Analyzer a) throws Exception { return parse(query, getParser(a)); } - + @Override public boolean isQueryParserException(Exception exception) { return exception instanceof QueryNodeException; @@ -79,45 +76,43 @@ public class TestStandardQP extends QueryParserTestBase { StandardQueryParser qp = (StandardQueryParser) cqpC; qp.setDefaultOperator(Operator.OR); } - + @Override public void setDefaultOperatorAND(CommonQueryParserConfiguration cqpC) { assert (cqpC instanceof StandardQueryParser); StandardQueryParser qp = (StandardQueryParser) cqpC; qp.setDefaultOperator(Operator.AND); } - + @Override - public void setAutoGeneratePhraseQueries(CommonQueryParserConfiguration cqpC, - boolean value) { + public void setAutoGeneratePhraseQueries(CommonQueryParserConfiguration cqpC, boolean value) { throw new UnsupportedOperationException(); } - + @Override - public void setDateResolution(CommonQueryParserConfiguration cqpC, - CharSequence field, Resolution value) { + public void setDateResolution( + CommonQueryParserConfiguration cqpC, CharSequence field, Resolution value) { assert (cqpC instanceof StandardQueryParser); StandardQueryParser qp = (StandardQueryParser) cqpC; qp.getDateResolutionMap().put(field, value); } - - @Override public void testOperatorVsWhitespace() throws Exception { // LUCENE-2566 is not implemented for StandardQueryParser // TODO implement LUCENE-2566 and remove this (override)method - Analyzer a = new Analyzer() { - @Override - public TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, false)); - } - }; + Analyzer a = + new Analyzer() { + @Override + public TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, false)); + } + }; assertQueryEquals("a - b", a, "a -b"); assertQueryEquals("a + b", a, "a +b"); assertQueryEquals("a ! b", a, "a -b"); } - + @Override public void testRangeWithPhrase() throws Exception { // StandardSyntaxParser does not differentiate between a term and a @@ -126,36 +121,38 @@ public class TestStandardQP extends QueryParserTestBase { // wasEscaped=true ? assertQueryEquals("[\\* TO \"*\"]", null, "[\\* TO *]"); } - + @Override public void testEscapedVsQuestionMarkAsWildcard() throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a:b\\-?c", a, "a:b-?c"); assertQueryEquals("a:b\\+?c", a, "a:b+?c"); assertQueryEquals("a:b\\:?c", a, "a:b:?c"); - + assertQueryEquals("a:b\\\\?c", a, "a:b\\?c"); } - + @Override public void testEscapedWildcard() throws Exception { - CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); - WildcardQuery q = new WildcardQuery(new Term("field", "foo?ba?r"));//TODO not correct!! + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + WildcardQuery q = new WildcardQuery(new Term("field", "foo?ba?r")); // TODO not correct!! assertEquals(q, getQuery("foo\\?ba?r", qp)); } - + @Override public void testAutoGeneratePhraseQueriesOn() throws Exception { - expectThrows(UnsupportedOperationException.class, () -> { - setAutoGeneratePhraseQueries(getParser(null), true); - super.testAutoGeneratePhraseQueriesOn(); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + setAutoGeneratePhraseQueries(getParser(null), true); + super.testAutoGeneratePhraseQueriesOn(); + }); } - + @Override - public void testStarParsing() throws Exception { - } - + public void testStarParsing() throws Exception {} + @Override public void testDefaultOperator() throws Exception { StandardQueryParser qp = getParser(new MockAnalyzer(random())); @@ -166,28 +163,24 @@ public class TestStandardQP extends QueryParserTestBase { setDefaultOperatorOR(qp); assertEquals(StandardQueryConfigHandler.Operator.OR, qp.getDefaultOperator()); } - - + @Override public void testNewFieldQuery() throws Exception { /** ordinary behavior, synonyms form uncoordinated boolean query */ StandardQueryParser dumb = getParser(new Analyzer1()); BooleanQuery.Builder expanded = new BooleanQuery.Builder(); - expanded.add(new TermQuery(new Term("field", "dogs")), - BooleanClause.Occur.SHOULD); - expanded.add(new TermQuery(new Term("field", "dog")), - BooleanClause.Occur.SHOULD); - assertEquals(expanded.build(), dumb.parse("\"dogs\"","field")); + expanded.add(new TermQuery(new Term("field", "dogs")), BooleanClause.Occur.SHOULD); + expanded.add(new TermQuery(new Term("field", "dog")), BooleanClause.Occur.SHOULD); + assertEquals(expanded.build(), dumb.parse("\"dogs\"", "field")); /** even with the phrase operator the behavior is the same */ - assertEquals(expanded.build(), dumb.parse("dogs","field")); - - /** - * custom behavior, the synonyms are expanded, unless you use quote operator - */ - //TODO test something like "SmartQueryParser()" + assertEquals(expanded.build(), dumb.parse("dogs", "field")); + + /** custom behavior, the synonyms are expanded, unless you use quote operator */ + // TODO test something like "SmartQueryParser()" } - // TODO: Remove this specialization once the flexible standard parser gets multi-word synonym support + // TODO: Remove this specialization once the flexible standard parser gets multi-word synonym + // support @Override public void testQPA() throws Exception { super.testQPA(); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java index 169caa272ea..11185a5fa40 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java @@ -16,10 +16,20 @@ */ package org.apache.lucene.queryparser.simple; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.AND_OPERATOR; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.ESCAPE_OPERATOR; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.FUZZY_OPERATOR; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.NEAR_OPERATOR; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.NOT_OPERATOR; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.OR_OPERATOR; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.PHRASE_OPERATOR; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.PRECEDENCE_OPERATORS; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.PREFIX_OPERATOR; +import static org.apache.lucene.queryparser.simple.SimpleQueryParser.WHITESPACE_OPERATOR; + import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -38,23 +48,12 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.AND_OPERATOR; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.ESCAPE_OPERATOR; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.FUZZY_OPERATOR; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.NOT_OPERATOR; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.OR_OPERATOR; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.PHRASE_OPERATOR; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.PRECEDENCE_OPERATORS; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.PREFIX_OPERATOR; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.NEAR_OPERATOR; -import static org.apache.lucene.queryparser.simple.SimpleQueryParser.WHITESPACE_OPERATOR; - /** Tests for {@link SimpleQueryParser} */ public class TestSimpleQueryParser extends LuceneTestCase { /** - * helper to parse a query with whitespace+lowercase analyzer across "field", - * with default operator of MUST + * helper to parse a query with whitespace+lowercase analyzer across "field", with default + * operator of MUST */ private Query parse(String text) { Analyzer analyzer = new MockAnalyzer(random()); @@ -64,13 +63,13 @@ public class TestSimpleQueryParser extends LuceneTestCase { } /** - * helper to parse a query with whitespace+lowercase analyzer across "field", - * with default operator of MUST + * helper to parse a query with whitespace+lowercase analyzer across "field", with default + * operator of MUST */ private Query parse(String text, int flags) { Analyzer analyzer = new MockAnalyzer(random()); - SimpleQueryParser parser = new SimpleQueryParser(analyzer, - Collections.singletonMap("field", 1f), flags); + SimpleQueryParser parser = + new SimpleQueryParser(analyzer, Collections.singletonMap("field", 1f), flags); parser.setDefaultOperator(Occur.MUST); return parser.parse(text); } @@ -93,11 +92,13 @@ public class TestSimpleQueryParser extends LuceneTestCase { assertEquals(regular, parse("foobar~1a")); BooleanQuery.Builder bool = new BooleanQuery.Builder(); - FuzzyQuery fuzzy = new FuzzyQuery(new Term("field", "foo"), LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE); + FuzzyQuery fuzzy = + new FuzzyQuery(new Term("field", "foo"), LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE); bool.add(fuzzy, Occur.MUST); bool.add(new TermQuery(new Term("field", "bar")), Occur.MUST); - assertEquals(bool.build(), parse("foo~" + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE + 1 + " bar")); + assertEquals( + bool.build(), parse("foo~" + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE + 1 + " bar")); } /** test a simple phrase */ @@ -413,7 +414,8 @@ public class TestSimpleQueryParser extends LuceneTestCase { assertEquals(expected.build(), parse("(star wars) | empire | (strikes back)")); assertEquals(expected.build(), parse("(star + wars) |empire | (strikes + back)")); - assertEquals(expected.build(), parse("(star + | wars |) | ----empire | + --(strikes + | --back) \\")); + assertEquals( + expected.build(), parse("(star + | wars |) | ----empire | + --(strikes + | --back) \\")); } public void testComplex05() throws Exception { @@ -436,13 +438,15 @@ public class TestSimpleQueryParser extends LuceneTestCase { inner3.add(inner4.build(), Occur.MUST); inner2.add(inner3.build(), Occur.SHOULD); - + expected.add(inner1.build(), Occur.SHOULD); expected.add(inner2.build(), Occur.SHOULD); - + assertEquals(expected.build(), parse("(star wars) | (empire | (strikes back -jarjar))")); assertEquals(expected.build(), parse("(star + wars) |(empire | (strikes + back -jarjar) () )")); - assertEquals(expected.build(), parse("(star + | wars |) | --(--empire | + --(strikes + | --back + -jarjar) \"\" ) \"")); + assertEquals( + expected.build(), + parse("(star + | wars |) | --(--empire | + --(strikes + | --back + -jarjar) \"\" ) \"")); } public void testComplex06() throws Exception { @@ -466,13 +470,16 @@ public class TestSimpleQueryParser extends LuceneTestCase { expected.add(inner1.build(), Occur.MUST); assertEquals(expected.build(), parse("star (wars | (empire | strikes back jar\\+\\|jar))")); - assertEquals(expected.build(), parse("star + (wars |(empire | strikes + back jar\\+\\|jar) () )")); - assertEquals(expected.build(), parse("star + (| wars | | --(--empire | + --strikes + | --back + jar\\+\\|jar) \"\" ) \"")); + assertEquals( + expected.build(), parse("star + (wars |(empire | strikes + back jar\\+\\|jar) () )")); + assertEquals( + expected.build(), + parse("star + (| wars | | --(--empire | + --strikes + | --back + jar\\+\\|jar) \"\" ) \"")); } /** test a term with field weights */ public void testWeightedTerm() throws Exception { - Map weights = new LinkedHashMap<>(); + Map weights = new LinkedHashMap<>(); weights.put("field0", 5f); weights.put("field1", 10f); @@ -491,7 +498,7 @@ public class TestSimpleQueryParser extends LuceneTestCase { /** test a more complex query with field weights */ public void testWeightedOR() throws Exception { - Map weights = new LinkedHashMap<>(); + Map weights = new LinkedHashMap<>(); weights.put("field0", 5f); weights.put("field1", 10f); @@ -522,9 +529,8 @@ public class TestSimpleQueryParser extends LuceneTestCase { /** helper to parse a query with keyword analyzer across "field" */ private Query parseKeyword(String text, int flags) { Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); - SimpleQueryParser parser = new SimpleQueryParser(analyzer, - Collections.singletonMap("field", 1f), - flags); + SimpleQueryParser parser = + new SimpleQueryParser(analyzer, Collections.singletonMap("field", 1f), flags); return parser.parse(text); } @@ -611,7 +617,7 @@ public class TestSimpleQueryParser extends LuceneTestCase { } public void testRandomQueries2() throws Exception { - char chars[] = new char[] { 'a', '1', '|', '&', ' ', '(', ')', '"', '-', '~'}; + char chars[] = new char[] {'a', '1', '|', '&', ' ', '(', ')', '"', '-', '~'}; StringBuilder sb = new StringBuilder(); for (int i = 0; i < 1000; i++) { sb.setLength(0); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java index ddb2570e73b..6690f5f93d3 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryparser.surround.query; import java.io.IOException; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; @@ -52,8 +51,10 @@ public class BooleanQueryTst { this.testCase = testCase; this.qf = qf; } - - public void setVerbose(boolean verbose) {this.verbose = verbose;} + + public void setVerbose(boolean verbose) { + this.verbose = verbose; + } class TestCollector extends SimpleCollector { // FIXME: use check hits from Lucene tests int totalMatched; @@ -75,7 +76,7 @@ public class BooleanQueryTst { protected void doSetNextReader(LeafReaderContext context) throws IOException { docBase = context.docBase; } - + @Override public void collect(int docNr) throws IOException { float score = scorer.score(); @@ -85,7 +86,7 @@ public class BooleanQueryTst { Assert.assertTrue(queryText + ": too many hits", totalMatched < expectedDocNrs.length); int i; for (i = 0; i < expectedDocNrs.length; i++) { - if ((! encountered[i]) && (expectedDocNrs[i] == docNr)) { + if ((!encountered[i]) && (expectedDocNrs[i] == docNr)) { encountered[i] = true; break; } @@ -95,7 +96,7 @@ public class BooleanQueryTst { } totalMatched++; } - + @Override public ScoreMode scoreMode() { return ScoreMode.COMPLETE; @@ -108,15 +109,15 @@ public class BooleanQueryTst { public void doTest() throws Exception { - if (verbose) { - System.out.println(""); - System.out.println("Query: " + queryText); + if (verbose) { + System.out.println(""); + System.out.println("Query: " + queryText); } - + SrndQuery lq = QueryParser.parse(queryText); - + /* if (verbose) System.out.println("Srnd: " + lq.toString()); */ - + Query query = lq.makeLuceneQueryField(fieldName, qf); /* if (verbose) System.out.println("Lucene: " + query.toString()); */ @@ -131,5 +132,3 @@ public class BooleanQueryTst { tc.checkNrHits(); } } - - diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/ExceptionQueryTst.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/ExceptionQueryTst.java index b54f68d29a1..dc35b09e3ca 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/ExceptionQueryTst.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/ExceptionQueryTst.java @@ -19,11 +19,10 @@ package org.apache.lucene.queryparser.surround.query; import org.apache.lucene.queryparser.surround.parser.ParseException; import org.apache.lucene.queryparser.surround.parser.QueryParser; - public class ExceptionQueryTst { private String queryText; private boolean verbose; - + public ExceptionQueryTst(String queryText, boolean verbose) { this.queryText = queryText; this.verbose = verbose; @@ -39,27 +38,23 @@ public class ExceptionQueryTst { } } catch (ParseException e) { if (verbose) { - System.out.println("Parse exception for query:\n" - + queryText + "\n" - + e.getMessage()); + System.out.println("Parse exception for query:\n" + queryText + "\n" + e.getMessage()); } pass = true; } - if (! pass) { + if (!pass) { failQueries.append(queryText); failQueries.append("\nParsed as: "); failQueries.append(lq.toString()); failQueries.append("\n"); } } - + public static String getFailQueries(String[] exceptionQueries, boolean verbose) { StringBuilder failQueries = new StringBuilder(); - for (int i = 0; i < exceptionQueries.length; i++ ) { - new ExceptionQueryTst( exceptionQueries[i], verbose).doTest(failQueries); + for (int i = 0; i < exceptionQueries.length; i++) { + new ExceptionQueryTst(exceptionQueries[i], verbose).doTest(failQueries); } return failQueries.toString(); } } - - diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java index 88614e8dfb4..fde5f7a1f8f 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java @@ -17,22 +17,21 @@ package org.apache.lucene.queryparser.surround.query; import java.util.Random; - -import org.apache.lucene.document.Field; -import org.apache.lucene.store.ByteBuffersDirectory; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockDirectoryWrapper; public class SingleFieldTestDb { private Directory db; private String[] docs; private String fieldName; - + public SingleFieldTestDb(Random random, String[] documents, String fName) { try { db = new MockDirectoryWrapper(random, new ByteBuffersDirectory()); @@ -49,10 +48,16 @@ public class SingleFieldTestDb { throw new Error(ioe); } } - - Directory getDb() {return db;} - String[] getDocs() {return docs;} - String getFieldname() {return fieldName;} + + Directory getDb() { + return db; + } + + String[] getDocs() { + return docs; + } + + String getFieldname() { + return fieldName; + } } - - diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java index b1b7ef93bc0..e0a24fa89c5 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java @@ -22,12 +22,9 @@ import org.apache.lucene.search.QueryUtils; import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; -/** - * - * - **/ +/** */ public class SrndQueryTest extends LuceneTestCase { - + void checkEqualParsings(String s1, String s2) throws Exception { String fieldName = "foo"; BasicQueryFactory qf = new BasicQueryFactory(16); @@ -39,8 +36,8 @@ public class SrndQueryTest extends LuceneTestCase { @Test public void testHashEquals() throws Exception { - //grab some sample queries from Test02Boolean and Test03Distance and - //check there hashes and equals + // grab some sample queries from Test02Boolean and Test03Distance and + // check there hashes and equals checkEqualParsings("word1 w word2", " word1 w word2 "); checkEqualParsings("2N(w1,w2,w3)", " 2N(w1, w2 , w3)"); checkEqualParsings("abc?", " abc? "); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java index f61ebd220df..f7cda89f77d 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java @@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.surround.query; import junit.framework.TestSuite; import junit.textui.TestRunner; - import org.apache.lucene.util.LuceneTestCase; public class Test01Exceptions extends LuceneTestCase { @@ -64,6 +63,3 @@ public class Test01Exceptions extends LuceneTestCase { } } } - - - diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java index 298dbdc72d0..fcb731e7e86 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java @@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.surround.query; import junit.framework.TestSuite; import junit.textui.TestRunner; - import org.apache.lucene.util.LuceneTestCase; public class Test02Boolean extends LuceneTestCase { @@ -43,85 +42,129 @@ public class Test02Boolean extends LuceneTestCase { super.setUp(); db1 = new SingleFieldTestDb(random(), docs1, fieldName); } - + SingleFieldTestDb db1; public void normalTest1(String query, int[] expdnrs) throws Exception { - BooleanQueryTst bqt = new BooleanQueryTst( query, expdnrs, db1, fieldName, this, - new BasicQueryFactory(maxBasicQueries)); + BooleanQueryTst bqt = + new BooleanQueryTst( + query, expdnrs, db1, fieldName, this, new BasicQueryFactory(maxBasicQueries)); bqt.setVerbose(verbose); bqt.doTest(); } public void test02Terms01() throws Exception { - int[] expdnrs = {0}; normalTest1( "word1", expdnrs); + int[] expdnrs = {0}; + normalTest1("word1", expdnrs); } + public void test02Terms02() throws Exception { - int[] expdnrs = {0, 1, 3}; normalTest1( "word*", expdnrs); + int[] expdnrs = {0, 1, 3}; + normalTest1("word*", expdnrs); } + public void test02Terms03() throws Exception { - int[] expdnrs = {2}; normalTest1( "ord2", expdnrs); + int[] expdnrs = {2}; + normalTest1("ord2", expdnrs); } + public void test02Terms04() throws Exception { - int[] expdnrs = {}; normalTest1( "kxork*", expdnrs); + int[] expdnrs = {}; + normalTest1("kxork*", expdnrs); } + public void test02Terms05() throws Exception { - int[] expdnrs = {0, 1, 3}; normalTest1( "wor*", expdnrs); + int[] expdnrs = {0, 1, 3}; + normalTest1("wor*", expdnrs); } + public void test02Terms06() throws Exception { - int[] expdnrs = {}; normalTest1( "ab", expdnrs); + int[] expdnrs = {}; + normalTest1("ab", expdnrs); } - + public void test02Terms10() throws Exception { - int[] expdnrs = {}; normalTest1( "abc?", expdnrs); + int[] expdnrs = {}; + normalTest1("abc?", expdnrs); } + public void test02Terms13() throws Exception { - int[] expdnrs = {0,1,3}; normalTest1( "word?", expdnrs); + int[] expdnrs = {0, 1, 3}; + normalTest1("word?", expdnrs); } + public void test02Terms14() throws Exception { - int[] expdnrs = {0,1,3}; normalTest1( "w?rd?", expdnrs); + int[] expdnrs = {0, 1, 3}; + normalTest1("w?rd?", expdnrs); } + public void test02Terms20() throws Exception { - int[] expdnrs = {0,1,3}; normalTest1( "w*rd?", expdnrs); + int[] expdnrs = {0, 1, 3}; + normalTest1("w*rd?", expdnrs); } + public void test02Terms21() throws Exception { - int[] expdnrs = {3}; normalTest1( "w*rd??", expdnrs); + int[] expdnrs = {3}; + normalTest1("w*rd??", expdnrs); } + public void test02Terms22() throws Exception { - int[] expdnrs = {3}; normalTest1( "w*?da?", expdnrs); + int[] expdnrs = {3}; + normalTest1("w*?da?", expdnrs); } + public void test02Terms23() throws Exception { - int[] expdnrs = {}; normalTest1( "w?da?", expdnrs); + int[] expdnrs = {}; + normalTest1("w?da?", expdnrs); } - + public void test03And01() throws Exception { - int[] expdnrs = {0}; normalTest1( "word1 AND word2", expdnrs); + int[] expdnrs = {0}; + normalTest1("word1 AND word2", expdnrs); } + public void test03And02() throws Exception { - int[] expdnrs = {3}; normalTest1( "word* and ord*", expdnrs); + int[] expdnrs = {3}; + normalTest1("word* and ord*", expdnrs); } + public void test03And03() throws Exception { - int[] expdnrs = {0}; normalTest1( "and(word1,word2)", expdnrs); + int[] expdnrs = {0}; + normalTest1("and(word1,word2)", expdnrs); } + public void test04Or01() throws Exception { - int[] expdnrs = {0, 3}; normalTest1( "word1 or word2", expdnrs); + int[] expdnrs = {0, 3}; + normalTest1("word1 or word2", expdnrs); } + public void test04Or02() throws Exception { - int[] expdnrs = {0, 1, 2, 3}; normalTest1( "word* OR ord*", expdnrs); + int[] expdnrs = {0, 1, 2, 3}; + normalTest1("word* OR ord*", expdnrs); } + public void test04Or03() throws Exception { - int[] expdnrs = {0, 3}; normalTest1( "OR (word1, word2)", expdnrs); + int[] expdnrs = {0, 3}; + normalTest1("OR (word1, word2)", expdnrs); } + public void test05Not01() throws Exception { - int[] expdnrs = {3}; normalTest1( "word2 NOT word1", expdnrs); + int[] expdnrs = {3}; + normalTest1("word2 NOT word1", expdnrs); } + public void test05Not02() throws Exception { - int[] expdnrs = {0}; normalTest1( "word2* not ord*", expdnrs); + int[] expdnrs = {0}; + normalTest1("word2* not ord*", expdnrs); } + public void test06AndOr01() throws Exception { - int[] expdnrs = {0}; normalTest1( "(word1 or ab)and or(word2,xyz, defg)", expdnrs); + int[] expdnrs = {0}; + normalTest1("(word1 or ab)and or(word2,xyz, defg)", expdnrs); } + public void test07AndOrNot02() throws Exception { - int[] expdnrs = {0}; normalTest1( "or( word2* not ord*, and(xyz,def))", expdnrs); + int[] expdnrs = {0}; + normalTest1("or( word2* not ord*, and(xyz,def))", expdnrs); } } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java index 9dca811b438..9edbc7e7be6 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java @@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.surround.query; import junit.framework.TestSuite; import junit.textui.TestRunner; - import org.apache.lucene.util.LuceneTestCase; public class Test03Distance extends LuceneTestCase { @@ -28,8 +27,8 @@ public class Test03Distance extends LuceneTestCase { boolean verbose = false; int maxBasicQueries = 16; - - String [] exceptionQueries = { + + String[] exceptionQueries = { "(aa and bb) w cc", "(aa or bb) w (cc and dd)", "(aa opt bb) w cc", @@ -40,7 +39,7 @@ public class Test03Distance extends LuceneTestCase { "(aa or (bi:bb)) w cc", "(aa or (bb and dd)) w cc" }; - + public void test00Exceptions() throws Exception { String m = ExceptionQueryTst.getFailQueries(exceptionQueries, verbose); if (m.length() > 0) { @@ -67,10 +66,11 @@ public class Test03Distance extends LuceneTestCase { db2 = new SingleFieldTestDb(random(), docs2, fieldName); db3 = new SingleFieldTestDb(random(), docs3, fieldName); } - + private void distanceTst(String query, int[] expdnrs, SingleFieldTestDb db) throws Exception { - BooleanQueryTst bqt = new BooleanQueryTst( query, expdnrs, db, fieldName, this, - new BasicQueryFactory(maxBasicQueries)); + BooleanQueryTst bqt = + new BooleanQueryTst( + query, expdnrs, db, fieldName, this, new BasicQueryFactory(maxBasicQueries)); bqt.setVerbose(verbose); bqt.doTest(); } @@ -78,156 +78,216 @@ public class Test03Distance extends LuceneTestCase { public void distanceTest1(String query, int[] expdnrs) throws Exception { distanceTst(query, expdnrs, db1); } - + public void test0W01() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word1 w word2", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word1 w word2", expdnrs); } + public void test0N01() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word1 n word2", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word1 n word2", expdnrs); } - public void test0N01r() throws Exception { /* r reverse */ - int[] expdnrs = {0}; distanceTest1( "word2 n word1", expdnrs); + + public void test0N01r() throws Exception { + /* r reverse */ + int[] expdnrs = {0}; + distanceTest1("word2 n word1", expdnrs); } - + public void test0W02() throws Exception { - int[] expdnrs = {}; distanceTest1( "word2 w word1", expdnrs); + int[] expdnrs = {}; + distanceTest1("word2 w word1", expdnrs); } - + public void test0W03() throws Exception { - int[] expdnrs = {}; distanceTest1( "word2 2W word1", expdnrs); + int[] expdnrs = {}; + distanceTest1("word2 2W word1", expdnrs); } + public void test0N03() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word2 2N word1", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word2 2N word1", expdnrs); } + public void test0N03r() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word1 2N word2", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word1 2N word2", expdnrs); } - + public void test0W04() throws Exception { - int[] expdnrs = {}; distanceTest1( "word2 3w word1", expdnrs); + int[] expdnrs = {}; + distanceTest1("word2 3w word1", expdnrs); } public void test0N04() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word2 3n word1", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word2 3n word1", expdnrs); } + public void test0N04r() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word1 3n word2", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word1 3n word2", expdnrs); } public void test0W05() throws Exception { - int[] expdnrs = {}; distanceTest1( "orda1 w orda3", expdnrs); + int[] expdnrs = {}; + distanceTest1("orda1 w orda3", expdnrs); } + public void test0W06() throws Exception { - int[] expdnrs = {3}; distanceTest1( "orda1 2w orda3", expdnrs); + int[] expdnrs = {3}; + distanceTest1("orda1 2w orda3", expdnrs); } - + public void test1Wtrunc01() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word1* w word2", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word1* w word2", expdnrs); } + public void test1Wtrunc02() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word* w word2", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word* w word2", expdnrs); } + public void test1Wtrunc02r() throws Exception { - int[] expdnrs = {0,3}; distanceTest1( "word2 w word*", expdnrs); + int[] expdnrs = {0, 3}; + distanceTest1("word2 w word*", expdnrs); } + public void test1Ntrunc02() throws Exception { - int[] expdnrs = {0,3}; distanceTest1( "word* n word2", expdnrs); + int[] expdnrs = {0, 3}; + distanceTest1("word* n word2", expdnrs); } + public void test1Ntrunc02r() throws Exception { - int[] expdnrs = {0,3}; distanceTest1( "word2 n word*", expdnrs); + int[] expdnrs = {0, 3}; + distanceTest1("word2 n word*", expdnrs); } public void test1Wtrunc03() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word1* w word2*", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word1* w word2*", expdnrs); } + public void test1Ntrunc03() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word1* N word2*", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word1* N word2*", expdnrs); } - + public void test1Wtrunc04() throws Exception { - int[] expdnrs = {}; distanceTest1( "kxork* w kxor*", expdnrs); + int[] expdnrs = {}; + distanceTest1("kxork* w kxor*", expdnrs); } + public void test1Ntrunc04() throws Exception { - int[] expdnrs = {}; distanceTest1( "kxork* 99n kxor*", expdnrs); + int[] expdnrs = {}; + distanceTest1("kxork* 99n kxor*", expdnrs); } public void test1Wtrunc05() throws Exception { - int[] expdnrs = {}; distanceTest1( "word2* 2W word1*", expdnrs); + int[] expdnrs = {}; + distanceTest1("word2* 2W word1*", expdnrs); } + public void test1Ntrunc05() throws Exception { - int[] expdnrs = {0}; distanceTest1( "word2* 2N word1*", expdnrs); + int[] expdnrs = {0}; + distanceTest1("word2* 2N word1*", expdnrs); } public void test1Wtrunc06() throws Exception { - int[] expdnrs = {3}; distanceTest1( "ord* W word*", expdnrs); - } - public void test1Ntrunc06() throws Exception { - int[] expdnrs = {3}; distanceTest1( "ord* N word*", expdnrs); - } - public void test1Ntrunc06r() throws Exception { - int[] expdnrs = {3}; distanceTest1( "word* N ord*", expdnrs); - } - - public void test1Wtrunc07() throws Exception { - int[] expdnrs = {3}; distanceTest1( "(orda2 OR orda3) W word*", expdnrs); - } - public void test1Wtrunc08() throws Exception { - int[] expdnrs = {3}; distanceTest1( "(orda2 OR orda3) W (word2 OR worda3)", expdnrs); - } - public void test1Wtrunc09() throws Exception { - int[] expdnrs = {3}; distanceTest1( "(orda2 OR orda3) 2W (word2 OR worda3)", expdnrs); - } - public void test1Ntrunc09() throws Exception { - int[] expdnrs = {3}; distanceTest1( "(orda2 OR orda3) 2N (word2 OR worda3)", expdnrs); + int[] expdnrs = {3}; + distanceTest1("ord* W word*", expdnrs); } - String[] docs2 = { - "w1 w2 w3 w4 w5", - "w1 w3 w2 w3", - "" - }; + public void test1Ntrunc06() throws Exception { + int[] expdnrs = {3}; + distanceTest1("ord* N word*", expdnrs); + } + + public void test1Ntrunc06r() throws Exception { + int[] expdnrs = {3}; + distanceTest1("word* N ord*", expdnrs); + } + + public void test1Wtrunc07() throws Exception { + int[] expdnrs = {3}; + distanceTest1("(orda2 OR orda3) W word*", expdnrs); + } + + public void test1Wtrunc08() throws Exception { + int[] expdnrs = {3}; + distanceTest1("(orda2 OR orda3) W (word2 OR worda3)", expdnrs); + } + + public void test1Wtrunc09() throws Exception { + int[] expdnrs = {3}; + distanceTest1("(orda2 OR orda3) 2W (word2 OR worda3)", expdnrs); + } + + public void test1Ntrunc09() throws Exception { + int[] expdnrs = {3}; + distanceTest1("(orda2 OR orda3) 2N (word2 OR worda3)", expdnrs); + } + + String[] docs2 = {"w1 w2 w3 w4 w5", "w1 w3 w2 w3", ""}; SingleFieldTestDb db2; - + public void distanceTest2(String query, int[] expdnrs) throws Exception { distanceTst(query, expdnrs, db2); } - + public void test2Wprefix01() throws Exception { - int[] expdnrs = {0}; distanceTest2( "W (w1, w2, w3)", expdnrs); + int[] expdnrs = {0}; + distanceTest2("W (w1, w2, w3)", expdnrs); } + public void test2Nprefix01a() throws Exception { - int[] expdnrs = {0,1}; distanceTest2( "N(w1, w2, w3)", expdnrs); + int[] expdnrs = {0, 1}; + distanceTest2("N(w1, w2, w3)", expdnrs); } + public void test2Nprefix01b() throws Exception { - int[] expdnrs = {0,1}; distanceTest2( "N(w3, w1, w2)", expdnrs); + int[] expdnrs = {0, 1}; + distanceTest2("N(w3, w1, w2)", expdnrs); } - + public void test2Wprefix02() throws Exception { - int[] expdnrs = {0,1}; distanceTest2( "2W(w1,w2,w3)", expdnrs); + int[] expdnrs = {0, 1}; + distanceTest2("2W(w1,w2,w3)", expdnrs); } public void test2Nprefix02a() throws Exception { - int[] expdnrs = {0,1}; distanceTest2( "2N(w1,w2,w3)", expdnrs); + int[] expdnrs = {0, 1}; + distanceTest2("2N(w1,w2,w3)", expdnrs); } + public void test2Nprefix02b() throws Exception { - int[] expdnrs = {0,1}; distanceTest2( "2N(w2,w3,w1)", expdnrs); + int[] expdnrs = {0, 1}; + distanceTest2("2N(w2,w3,w1)", expdnrs); } public void test2Wnested01() throws Exception { - int[] expdnrs = {0}; distanceTest2( "w1 W w2 W w3", expdnrs); + int[] expdnrs = {0}; + distanceTest2("w1 W w2 W w3", expdnrs); } + public void test2Nnested01() throws Exception { - int[] expdnrs = {0}; distanceTest2( "w1 N w2 N w3", expdnrs); + int[] expdnrs = {0}; + distanceTest2("w1 N w2 N w3", expdnrs); } - + public void test2Wnested02() throws Exception { - int[] expdnrs = {0,1}; distanceTest2( "w1 2W w2 2W w3", expdnrs); + int[] expdnrs = {0, 1}; + distanceTest2("w1 2W w2 2W w3", expdnrs); } + public void test2Nnested02() throws Exception { - int[] expdnrs = {0,1}; distanceTest2( "w1 2N w2 2N w3", expdnrs); + int[] expdnrs = {0, 1}; + distanceTest2("w1 2N w2 2N w3", expdnrs); } - + String[] docs3 = { "low pressure temperature inversion and rain", "when the temperature has a negative height above a depression no precipitation gradient is expected", @@ -242,10 +302,13 @@ public class Test03Distance extends LuceneTestCase { } public void test3Example01() throws Exception { - int[] expdnrs = {0,2}; // query does not match doc 1 because "gradient" is in wrong place there. - distanceTest3("50n((low w pressure*) or depression*," - + "5n(temperat*, (invers* or (negativ* 3n gradient*)))," - + "rain* or precipitat*)", - expdnrs); + int[] expdnrs = { + 0, 2 + }; // query does not match doc 1 because "gradient" is in wrong place there. + distanceTest3( + "50n((low w pressure*) or depression*," + + "5n(temperat*, (invers* or (negativ* 3n gradient*)))," + + "rain* or precipitat*)", + expdnrs); } } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java index 4b4e061ea21..789b8aaa00d 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java @@ -23,7 +23,6 @@ import java.util.Date; import java.util.GregorianCalendar; import java.util.Locale; import java.util.TimeZone; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.MockAnalyzer; @@ -71,13 +70,11 @@ import org.apache.lucene.util.automaton.RegExp; import org.junit.AfterClass; import org.junit.BeforeClass; -/** - * Base Test class for QueryParser subclasses - */ +/** Base Test class for QueryParser subclasses */ // TODO: it would be better to refactor the parts that are specific really // to the core QP and subclass/use the parts that are not in the flexible QP public abstract class QueryParserTestBase extends LuceneTestCase { - + public static Analyzer qpAnalyzer; @BeforeClass @@ -89,14 +86,14 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public static void afterClass() { qpAnalyzer = null; } - + public static final class QPTestFilter extends TokenFilter { CharTermAttribute termAtt; OffsetAttribute offsetAtt; - + /** - * Filter which discards the token 'stop' and which expands the - * token 'phrase' into 'phrase1 phrase2' + * Filter which discards the token 'stop' and which expands the token 'phrase' into 'phrase1 + * phrase2' */ public QPTestFilter(TokenStream in) { super(in); @@ -124,8 +121,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { termAtt.setEmpty().append("phrase1"); offsetAtt.setOffset(savedStart, savedEnd); return true; - } else if (!termAtt.toString().equals("stop")) - return true; + } else if (!termAtt.toString().equals("stop")) return true; } return false; } @@ -142,14 +138,14 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } private int originalMaxClauses; - + private String defaultField = "field"; - - protected String getDefaultField(){ + + protected String getDefaultField() { return defaultField; } - protected void setDefaultField(String defaultField){ + protected void setDefaultField(String defaultField) { this.defaultField = defaultField; } @@ -165,26 +161,28 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public abstract void setDefaultOperatorAND(CommonQueryParserConfiguration cqpC); - public abstract void setAutoGeneratePhraseQueries(CommonQueryParserConfiguration cqpC, boolean value); + public abstract void setAutoGeneratePhraseQueries( + CommonQueryParserConfiguration cqpC, boolean value); - public abstract void setDateResolution(CommonQueryParserConfiguration cqpC, CharSequence field, DateTools.Resolution value); + public abstract void setDateResolution( + CommonQueryParserConfiguration cqpC, CharSequence field, DateTools.Resolution value); - public abstract Query getQuery(String query, CommonQueryParserConfiguration cqpC) throws Exception; + public abstract Query getQuery(String query, CommonQueryParserConfiguration cqpC) + throws Exception; public abstract Query getQuery(String query, Analyzer a) throws Exception; - + public abstract boolean isQueryParserException(Exception exception); public Query getQuery(String query) throws Exception { - return getQuery(query, (Analyzer)null); + return getQuery(query, (Analyzer) null); } public void assertQueryEquals(String query, Analyzer a, String result) throws Exception { Query q = getQuery(query, a); String s = q.toString("field"); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s - + "/, expecting /" + result + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } @@ -202,69 +200,61 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } } - public void assertQueryEquals(CommonQueryParserConfiguration cqpC, String field, String query, String result) - throws Exception { + public void assertQueryEquals( + CommonQueryParserConfiguration cqpC, String field, String query, String result) + throws Exception { Query q = getQuery(query, cqpC); String s = q.toString(field); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s - + "/, expecting /" + result + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } - - public void assertEscapedQueryEquals(String query, Analyzer a, String result) - throws Exception { + + public void assertEscapedQueryEquals(String query, Analyzer a, String result) throws Exception { String escapedQuery = QueryParserBase.escape(query); if (!escapedQuery.equals(result)) { - fail("Query /" + query + "/ yielded /" + escapedQuery - + "/, expecting /" + result + "/"); + fail("Query /" + query + "/ yielded /" + escapedQuery + "/, expecting /" + result + "/"); } } public void assertWildcardQueryEquals(String query, String result, boolean allowLeadingWildcard) - throws Exception { + throws Exception { CommonQueryParserConfiguration cqpC = getParserConfig(null); cqpC.setAllowLeadingWildcard(allowLeadingWildcard); Query q = getQuery(query, cqpC); String s = q.toString("field"); if (!s.equals(result)) { - fail("WildcardQuery /" + query + "/ yielded /" + s - + "/, expecting /" + result + "/"); + fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } - public void assertWildcardQueryEquals(String query, String result) - throws Exception { + public void assertWildcardQueryEquals(String query, String result) throws Exception { assertWildcardQueryEquals(query, result, false); } - public Query getQueryDOA(String query, Analyzer a) - throws Exception { - if (a == null) - a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); + public Query getQueryDOA(String query, Analyzer a) throws Exception { + if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); CommonQueryParserConfiguration qp = getParserConfig(a); setDefaultOperatorAND(qp); return getQuery(query, qp); } - public void assertQueryEqualsDOA(String query, Analyzer a, String result) - throws Exception { + public void assertQueryEqualsDOA(String query, Analyzer a, String result) throws Exception { Query q = getQueryDOA(query, a); String s = q.toString("field"); if (!s.equals(result)) { - fail("Query /" + query + "/ yielded /" + s - + "/, expecting /" + result + "/"); + fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result + "/"); } } public void testCJK() throws Exception { - // Test Ideographic Space - As wide as a CJK character cell (fullwidth) - // used google to translate the word "term" to japanese -> 用語 - assertQueryEquals("term\u3000term\u3000term", null, "term\u0020term\u0020term"); - assertQueryEquals("用語\u3000用語\u3000用語", null, "用語\u0020用語\u0020用語"); + // Test Ideographic Space - As wide as a CJK character cell (fullwidth) + // used google to translate the word "term" to japanese -> 用語 + assertQueryEquals("term\u3000term\u3000term", null, "term\u0020term\u0020term"); + assertQueryEquals("用語\u3000用語\u3000用語", null, "用語\u0020用語\u0020用語"); } - //individual CJK chars as terms, like StandardAnalyzer + // individual CJK chars as terms, like StandardAnalyzer protected static class SimpleCJKTokenizer extends Tokenizer { private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); @@ -275,8 +265,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { @Override public final boolean incrementToken() throws IOException { int ch = input.read(); - if (ch < 0) - return false; + if (ch < 0) return false; clearAttributes(); termAtt.setEmpty().append((char) ch); return true; @@ -292,19 +281,19 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testCJKTerm() throws Exception { // individual CJK chars as terms - SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); + BooleanQuery.Builder expected = new BooleanQuery.Builder(); expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD); expected.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD); - + assertEquals(expected.build(), getQuery("中国", analyzer)); } - + public void testCJKBoostedTerm() throws Exception { // individual CJK chars as terms SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + BooleanQuery.Builder expectedB = new BooleanQuery.Builder(); expectedB.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD); expectedB.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD); @@ -313,46 +302,46 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertEquals(expected, getQuery("中国^0.5", analyzer)); } - + public void testCJKPhrase() throws Exception { // individual CJK chars as terms SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + PhraseQuery expected = new PhraseQuery("field", "中", "国"); - + assertEquals(expected, getQuery("\"中国\"", analyzer)); } - + public void testCJKBoostedPhrase() throws Exception { // individual CJK chars as terms SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + Query expected = new PhraseQuery("field", "中", "国"); expected = new BoostQuery(expected, 0.5f); - + assertEquals(expected, getQuery("\"中国\"^0.5", analyzer)); } - + public void testCJKSloppyPhrase() throws Exception { // individual CJK chars as terms SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + PhraseQuery expected = new PhraseQuery(3, "field", "中", "国"); - + assertEquals(expected, getQuery("\"中国\"~3", analyzer)); } - + public void testAutoGeneratePhraseQueriesOn() throws Exception { // individual CJK chars as terms - SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); - + SimpleCJKAnalyzer analyzer = new SimpleCJKAnalyzer(); + PhraseQuery expected = new PhraseQuery("field", "中", "国"); CommonQueryParserConfiguration qp = getParserConfig(analyzer); if (qp instanceof QueryParser) { // Always true, since TestStandardQP overrides this method - ((QueryParser)qp).setSplitOnWhitespace(true); // LUCENE-7533 + ((QueryParser) qp).setSplitOnWhitespace(true); // LUCENE-7533 } setAutoGeneratePhraseQueries(qp, true); - assertEquals(expected, getQuery("中国",qp)); + assertEquals(expected, getQuery("中国", qp)); } public void testSimple() throws Exception { @@ -362,8 +351,8 @@ public abstract class QueryParserTestBase extends LuceneTestCase { // FIXME: enhance MockAnalyzer to be able to support this // it must no longer extend CharTokenizer - //assertQueryEquals("\"\"", new KeywordAnalyzer(), ""); - //assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:"); + // assertQueryEquals("\"\"", new KeywordAnalyzer(), ""); + // assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:"); assertQueryEquals("a AND b", null, "+a +b"); assertQueryEquals("(a AND b)", null, "+a +b"); @@ -372,19 +361,17 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("a AND -b", null, "+a -b"); assertQueryEquals("a AND !b", null, "+a -b"); assertQueryEquals("a && b", null, "+a +b"); -// assertQueryEquals("a && ! b", null, "+a -b"); + // assertQueryEquals("a && ! b", null, "+a -b"); assertQueryEquals("a OR b", null, "a b"); assertQueryEquals("a || b", null, "a b"); assertQueryEquals("a OR !b", null, "a -b"); -// assertQueryEquals("a OR ! b", null, "a -b"); + // assertQueryEquals("a OR ! b", null, "a -b"); assertQueryEquals("a OR -b", null, "a -b"); assertQueryEquals("+term -term term", null, "+term -term term"); - assertQueryEquals("foo:term AND field:anotherTerm", null, - "+foo:term +anotherterm"); - assertQueryEquals("term AND \"phrase phrase\"", null, - "+term +\"phrase phrase\""); + assertQueryEquals("foo:term AND field:anotherTerm", null, "+foo:term +anotherterm"); + assertQueryEquals("term AND \"phrase phrase\"", null, "+term +\"phrase phrase\""); assertQueryEquals("\"hello there\"", null, "\"hello there\""); assertTrue(getQuery("a AND b") instanceof BooleanQuery); assertTrue(getQuery("hello") instanceof TermQuery); @@ -398,33 +385,33 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("\"germ term\"^2.0", null, "(\"germ term\")^2.0"); assertQueryEquals("\"term germ\"^2", null, "(\"term germ\")^2.0"); - assertQueryEquals("(foo OR bar) AND (baz OR boo)", null, - "+(foo bar) +(baz boo)"); - assertQueryEquals("((a OR b) AND NOT c) OR d", null, - "(+(a b) -c) d"); - assertQueryEquals("+(apple \"steve jobs\") -(foo bar baz)", null, - "+(apple \"steve jobs\") -(foo bar baz)"); - assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, - "+(title:dog title:cat) -author:\"bob dole\""); - + assertQueryEquals("(foo OR bar) AND (baz OR boo)", null, "+(foo bar) +(baz boo)"); + assertQueryEquals("((a OR b) AND NOT c) OR d", null, "(+(a b) -c) d"); + assertQueryEquals( + "+(apple \"steve jobs\") -(foo bar baz)", null, "+(apple \"steve jobs\") -(foo bar baz)"); + assertQueryEquals( + "+title:(dog OR cat) -author:\"bob dole\"", + null, + "+(title:dog title:cat) -author:\"bob dole\""); } public abstract void testDefaultOperator() throws Exception; - - - public void testOperatorVsWhitespace() throws Exception { //LUCENE-2566 - // +,-,! should be directly adjacent to operand (i.e. not separated by whitespace) to be treated as an operator - Analyzer a = new Analyzer() { - @Override - public TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, false)); - } - }; + + public void testOperatorVsWhitespace() throws Exception { // LUCENE-2566 + // +,-,! should be directly adjacent to operand (i.e. not separated by whitespace) to be treated + // as an operator + Analyzer a = + new Analyzer() { + @Override + public TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, false)); + } + }; assertQueryEquals("a - b", a, "a - b"); assertQueryEquals("a + b", a, "a + b"); - assertQueryEquals("a ! b", a, "a ! b"); + assertQueryEquals("a ! b", a, "a ! b"); } - + public void testPunct() throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a&b", a, "a&b"); @@ -441,7 +428,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testNumber() throws Exception { -// The numbers go away because SimpleAnalzyer ignores them + // The numbers go away because SimpleAnalzyer ignores them assertMatchNoDocsQuery("3", null); assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); @@ -467,28 +454,28 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertTrue(((BoostQuery) getQuery("term*^2")).getQuery() instanceof PrefixQuery); assertTrue(getQuery("term~") instanceof FuzzyQuery); assertTrue(getQuery("term~0.7") instanceof FuzzyQuery); - FuzzyQuery fq = (FuzzyQuery)getQuery("term~0.7"); + FuzzyQuery fq = (FuzzyQuery) getQuery("term~0.7"); assertEquals(1, fq.getMaxEdits()); assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength()); - fq = (FuzzyQuery)getQuery("term~"); + fq = (FuzzyQuery) getQuery("term~"); assertEquals(2, fq.getMaxEdits()); assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength()); - + assertParseException("term~1.1"); // value > 1, throws exception assertTrue(getQuery("term*germ") instanceof WildcardQuery); -/* Tests to see that wild card terms are (or are not) properly - * lower-cased with propery parser configuration - */ -// First prefix queries: + /* Tests to see that wild card terms are (or are not) properly + * lower-cased with propery parser configuration + */ + // First prefix queries: // by default, convert to lowercase: assertWildcardQueryEquals("Term*", "term*"); // explicitly set lowercase: assertWildcardQueryEquals("term*", "term*"); assertWildcardQueryEquals("Term*", "term*"); assertWildcardQueryEquals("TERM*", "term*"); -// Then 'full' wildcard queries: + // Then 'full' wildcard queries: // by default, convert to lowercase: assertWildcardQueryEquals("Te?m", "te?m"); // explicitly set lowercase: @@ -496,38 +483,44 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertWildcardQueryEquals("Te?m", "te?m"); assertWildcardQueryEquals("TE?M", "te?m"); assertWildcardQueryEquals("Te?m*gerM", "te?m*germ"); -// Fuzzy queries: + // Fuzzy queries: assertWildcardQueryEquals("Term~", "term~2"); -// Range queries: + // Range queries: assertWildcardQueryEquals("[A TO C]", "[a TO c]"); // Test suffix queries: first disallow - Exception ex = expectThrows(Exception.class, () -> { - assertWildcardQueryEquals("*Term", "*term", false); - }); + Exception ex = + expectThrows( + Exception.class, + () -> { + assertWildcardQueryEquals("*Term", "*term", false); + }); assertTrue(isQueryParserException(ex)); - ex = expectThrows(Exception.class, () -> { - assertWildcardQueryEquals("?Term", "?term"); - }); + ex = + expectThrows( + Exception.class, + () -> { + assertWildcardQueryEquals("?Term", "?term"); + }); assertTrue(isQueryParserException(ex)); // Test suffix queries: then allow assertWildcardQueryEquals("*Term", "*term", true); assertWildcardQueryEquals("?Term", "?term", true); } - + public void testLeadingWildcardType() throws Exception { CommonQueryParserConfiguration cqpC = getParserConfig(null); cqpC.setAllowLeadingWildcard(true); - assertEquals(WildcardQuery.class, getQuery("t*erm*",cqpC).getClass()); - assertEquals(WildcardQuery.class, getQuery("?term*",cqpC).getClass()); - assertEquals(WildcardQuery.class, getQuery("*term*",cqpC).getClass()); + assertEquals(WildcardQuery.class, getQuery("t*erm*", cqpC).getClass()); + assertEquals(WildcardQuery.class, getQuery("?term*", cqpC).getClass()); + assertEquals(WildcardQuery.class, getQuery("*term*", cqpC).getClass()); } public void testQPA() throws Exception { assertQueryEquals("term term^3.0 term", qpAnalyzer, "term (term)^3.0 term"); assertQueryEquals("term stop^3.0 term", qpAnalyzer, "term term"); - + assertQueryEquals("term term term", qpAnalyzer, "term term term"); assertQueryEquals("term +stop term", qpAnalyzer, "term term"); assertQueryEquals("term -stop term", qpAnalyzer, "term term"); @@ -535,14 +528,13 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("drop AND (stop) AND roll", qpAnalyzer, "+drop +roll"); assertQueryEquals("term +(stop) term", qpAnalyzer, "term term"); assertQueryEquals("term -(stop) term", qpAnalyzer, "term term"); - + assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll"); -// TODO: Re-enable once flexible standard parser gets multi-word synonym support -// assertQueryEquals("term phrase term", qpAnalyzer, -// "term phrase1 phrase2 term"); - assertQueryEquals("term AND NOT phrase term", qpAnalyzer, - "+term -(phrase1 phrase2) term"); + // TODO: Re-enable once flexible standard parser gets multi-word synonym support + // assertQueryEquals("term phrase term", qpAnalyzer, + // "term phrase1 phrase2 term"); + assertQueryEquals("term AND NOT phrase term", qpAnalyzer, "+term -(phrase1 phrase2) term"); assertMatchNoDocsQuery("stop^3", qpAnalyzer); assertMatchNoDocsQuery("stop", qpAnalyzer); assertMatchNoDocsQuery("(stop)^3", qpAnalyzer); @@ -553,39 +545,43 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertMatchNoDocsQuery("((stop))", qpAnalyzer); assertTrue(getQuery("term term term", qpAnalyzer) instanceof BooleanQuery); assertTrue(getQuery("term +stop", qpAnalyzer) instanceof TermQuery); - + CommonQueryParserConfiguration cqpc = getParserConfig(qpAnalyzer); setDefaultOperatorAND(cqpc); -// TODO: Re-enable once flexible standard parser gets multi-word synonym support -// assertQueryEquals(cqpc, "field", "term phrase term", -// "+term +phrase1 +phrase2 +term"); - assertQueryEquals(cqpc, "field", "phrase", - "+phrase1 +phrase2"); + // TODO: Re-enable once flexible standard parser gets multi-word synonym support + // assertQueryEquals(cqpc, "field", "term phrase term", + // "+term +phrase1 +phrase2 +term"); + assertQueryEquals(cqpc, "field", "phrase", "+phrase1 +phrase2"); } public void testRange() throws Exception { assertQueryEquals("[ a TO z]", null, "[a TO z]"); assertQueryEquals("[ a TO z}", null, "[a TO z}"); - assertQueryEquals("{ a TO z]", null, "{a TO z]"); + assertQueryEquals("{ a TO z]", null, "{a TO z]"); - assertEquals(MultiTermQuery.CONSTANT_SCORE_REWRITE, ((TermRangeQuery)getQuery("[ a TO z]")).getRewriteMethod()); + assertEquals( + MultiTermQuery.CONSTANT_SCORE_REWRITE, + ((TermRangeQuery) getQuery("[ a TO z]")).getRewriteMethod()); + + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); - CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); - qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); - assertEquals(MultiTermQuery.SCORING_BOOLEAN_REWRITE,((TermRangeQuery)getQuery("[ a TO z]", qp)).getRewriteMethod()); - + assertEquals( + MultiTermQuery.SCORING_BOOLEAN_REWRITE, + ((TermRangeQuery) getQuery("[ a TO z]", qp)).getRewriteMethod()); + // test open ranges assertQueryEquals("[ a TO * ]", null, "[a TO *]"); assertQueryEquals("[ * TO z ]", null, "[* TO z]"); assertQueryEquals("[ * TO * ]", null, "[* TO *]"); - + // mixing exclude and include bounds assertQueryEquals("{ a TO z ]", null, "{a TO z]"); assertQueryEquals("[ a TO z }", null, "[a TO z}"); assertQueryEquals("{ a TO * ]", null, "{a TO *]"); assertQueryEquals("[ * TO z }", null, "[* TO z}"); - + assertQueryEquals("[ a TO z ]", null, "[a TO z]"); assertQueryEquals("{ a TO z}", null, "{a TO z}"); assertQueryEquals("{ a TO z }", null, "{a TO z}"); @@ -595,14 +591,14 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("( bar blar { a TO z}) ", null, "bar blar {a TO z}"); assertQueryEquals("gack ( bar blar { a TO z}) ", null, "gack (bar blar {a TO z})"); - assertQueryEquals("[* TO Z]",null,"[* TO z]"); - assertQueryEquals("[A TO *]",null,"[a TO *]"); - assertQueryEquals("[* TO *]",null,"[* TO *]"); - } + assertQueryEquals("[* TO Z]", null, "[* TO z]"); + assertQueryEquals("[A TO *]", null, "[a TO *]"); + assertQueryEquals("[* TO *]", null, "[* TO *]"); + } public void testRangeWithPhrase() throws Exception { - assertQueryEquals("[\\* TO \"*\"]",null,"[\\* TO \\*]"); - assertQueryEquals("[\"*\" TO *]",null,"[\\* TO *]"); + assertQueryEquals("[\\* TO \"*\"]", null, "[\\* TO \\*]"); + assertQueryEquals("[\"*\" TO *]", null, "[\\* TO *]"); } public void testRangeQueryEndpointTO() throws Exception { @@ -635,10 +631,11 @@ public abstract class QueryParserTestBase extends LuceneTestCase { // " TO " is required between range endpoints - Class exceptionClass = this instanceof TestQueryParser - ? org.apache.lucene.queryparser.classic.ParseException.class - : org.apache.lucene.queryparser.flexible.standard.parser.ParseException.class; - + Class exceptionClass = + this instanceof TestQueryParser + ? org.apache.lucene.queryparser.classic.ParseException.class + : org.apache.lucene.queryparser.flexible.standard.parser.ParseException.class; + expectThrows(exceptionClass, () -> getQuery("{A B}")); expectThrows(exceptionClass, () -> getQuery("[A B}")); expectThrows(exceptionClass, () -> getQuery("{A B]")); @@ -662,19 +659,19 @@ public abstract class QueryParserTestBase extends LuceneTestCase { return s; } } - + /** for testing DateTools support */ private String getDate(String s, DateTools.Resolution resolution) throws Exception { // we use the default Locale since LuceneTestCase randomizes it DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, Locale.getDefault()); - return getDate(df.parse(s), resolution); + return getDate(df.parse(s), resolution); } - + /** for testing DateTools support */ private String getDate(Date d, DateTools.Resolution resolution) { - return DateTools.dateToString(d, resolution); + return DateTools.dateToString(d, resolution); } - + private String getLocalizedDate(int year, int month, int day) { // we use the default Locale/TZ since LuceneTestCase randomizes it DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, Locale.getDefault()); @@ -701,40 +698,61 @@ public abstract class QueryParserTestBase extends LuceneTestCase { final String hourField = "hour"; Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); CommonQueryParserConfiguration qp = getParserConfig(a); - + // set a field specific date resolution setDateResolution(qp, monthField, DateTools.Resolution.MONTH); - + // set default date resolution to MILLISECOND qp.setDateResolution(DateTools.Resolution.MILLISECOND); - - // set second field specific date resolution + + // set second field specific date resolution setDateResolution(qp, hourField, DateTools.Resolution.HOUR); // for this field no field specific date resolution has been set, // so verify if the default resolution is used - assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.MILLISECOND); + assertDateRangeQueryEquals( + qp, + defaultField, + startDate, + endDate, + endDateExpected.getTime(), + DateTools.Resolution.MILLISECOND); // verify if field specific date resolutions are used for these two fields - assertDateRangeQueryEquals(qp, monthField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.MONTH); + assertDateRangeQueryEquals( + qp, monthField, startDate, endDate, endDateExpected.getTime(), DateTools.Resolution.MONTH); - assertDateRangeQueryEquals(qp, hourField, startDate, endDate, - endDateExpected.getTime(), DateTools.Resolution.HOUR); + assertDateRangeQueryEquals( + qp, hourField, startDate, endDate, endDateExpected.getTime(), DateTools.Resolution.HOUR); } - - public void assertDateRangeQueryEquals(CommonQueryParserConfiguration cqpC, String field, String startDate, String endDate, - Date endDateInclusive, DateTools.Resolution resolution) throws Exception { - assertQueryEquals(cqpC, field, field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]", - "[" + getDate(startDate, resolution) + " TO " + getDate(endDateInclusive, resolution) + "]"); - assertQueryEquals(cqpC, field, field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "}", - "{" + getDate(startDate, resolution) + " TO " + getDate(endDate, resolution) + "}"); + + public void assertDateRangeQueryEquals( + CommonQueryParserConfiguration cqpC, + String field, + String startDate, + String endDate, + Date endDateInclusive, + DateTools.Resolution resolution) + throws Exception { + assertQueryEquals( + cqpC, + field, + field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]", + "[" + + getDate(startDate, resolution) + + " TO " + + getDate(endDateInclusive, resolution) + + "]"); + assertQueryEquals( + cqpC, + field, + field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "}", + "{" + getDate(startDate, resolution) + " TO " + getDate(endDate, resolution) + "}"); } public void testEscaped() throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); - + /*assertQueryEquals("\\[brackets", a, "\\[brackets"); assertQueryEquals("\\[brackets", null, "brackets"); assertQueryEquals("\\\\", a, "\\\\"); @@ -760,7 +778,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { //assertQueryEquals("foo \\AND bar", a, "foo \\AND bar");*/ assertQueryEquals("\\a", a, "a"); - + assertQueryEquals("a\\-b:c", a, "a-b:c"); assertQueryEquals("a\\+b:c", a, "a+b:c"); assertQueryEquals("a\\:b:c", a, "a:b:c"); @@ -786,40 +804,42 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("[ a\\: TO a\\~ ]", null, "[a: TO a~]"); assertQueryEquals("[ a\\\\ TO a\\* ]", null, "[a\\ TO a*]"); - assertQueryEquals("[\"c\\:\\\\temp\\\\\\~foo0.txt\" TO \"c\\:\\\\temp\\\\\\~foo9.txt\"]", a, - "[c:\\temp\\~foo0.txt TO c:\\temp\\~foo9.txt]"); - + assertQueryEquals( + "[\"c\\:\\\\temp\\\\\\~foo0.txt\" TO \"c\\:\\\\temp\\\\\\~foo9.txt\"]", + a, + "[c:\\temp\\~foo0.txt TO c:\\temp\\~foo9.txt]"); + assertQueryEquals("a\\\\\\+b", a, "a\\+b"); - + assertQueryEquals("a \\\"b c\\\" d", a, "a \"b c\" d"); assertQueryEquals("\"a \\\"b c\\\" d\"", a, "\"a \"b c\" d\""); assertQueryEquals("\"a \\+b c d\"", a, "\"a +b c d\""); - + assertQueryEquals("c\\:\\\\temp\\\\\\~foo.txt", a, "c:\\temp\\~foo.txt"); - + assertParseException("XY\\"); // there must be a character after the escape char - + // test unicode escaping assertQueryEquals("a\\u0062c", a, "abc"); assertQueryEquals("XY\\u005a", a, "XYZ"); assertQueryEquals("XY\\u005A", a, "XYZ"); assertQueryEquals("\"a \\\\\\u0028\\u0062\\\" c\"", a, "\"a \\(b\" c\""); - - assertParseException("XY\\u005G"); // test non-hex character in escaped unicode sequence - assertParseException("XY\\u005"); // test incomplete escaped unicode sequence - + + assertParseException("XY\\u005G"); // test non-hex character in escaped unicode sequence + assertParseException("XY\\u005"); // test incomplete escaped unicode sequence + // Tests bug LUCENE-800 assertQueryEquals("(item:\\\\ item:ABCD\\\\)", a, "item:\\ item:ABCD\\"); - assertParseException("(item:\\\\ item:ABCD\\\\))"); // unmatched closing paranthesis + assertParseException("(item:\\\\ item:ABCD\\\\))"); // unmatched closing paranthesis assertQueryEquals("\\*", a, "*"); - assertQueryEquals("\\\\", a, "\\"); // escaped backslash - + assertQueryEquals("\\\\", a, "\\"); // escaped backslash + assertParseException("\\"); // a backslash must always be escaped - + // LUCENE-1189 - assertQueryEquals("(\"a\\\\\") or (\"b\")", a ,"a\\ or b"); + assertQueryEquals("(\"a\\\\\") or (\"b\")", a, "a\\ or b"); } - + public void testEscapedVsQuestionMarkAsWildcard() throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a:b\\-?c", a, "a:b\\-?c"); @@ -828,7 +848,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("a:b\\\\?c", a, "a:b\\\\?c"); } - + public void testQueryStringEscaping() throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); @@ -862,50 +882,34 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertEscapedQueryEquals("[ a - TO a+ ]", null, "\\[ a \\- TO a\\+ \\]"); assertEscapedQueryEquals("[ a : TO a~ ]", null, "\\[ a \\: TO a\\~ \\]"); assertEscapedQueryEquals("[ a\\ TO a* ]", null, "\\[ a\\\\ TO a\\* \\]"); - + // LUCENE-881 assertEscapedQueryEquals("|| abc ||", a, "\\|\\| abc \\|\\|"); assertEscapedQueryEquals("&& abc &&", a, "\\&\\& abc \\&\\&"); } - - public void testTabNewlineCarriageReturn() - throws Exception { - assertQueryEqualsDOA("+weltbank +worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("+weltbank\n+worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \n+worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \n +worlbank", null, - "+weltbank +worlbank"); + public void testTabNewlineCarriageReturn() throws Exception { + assertQueryEqualsDOA("+weltbank +worlbank", null, "+weltbank +worlbank"); - assertQueryEqualsDOA("+weltbank\r+worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \r+worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \r +worlbank", null, - "+weltbank +worlbank"); + assertQueryEqualsDOA("+weltbank\n+worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \n+worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \n +worlbank", null, "+weltbank +worlbank"); - assertQueryEqualsDOA("+weltbank\r\n+worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \r\n+worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \r\n +worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \r \n +worlbank", null, - "+weltbank +worlbank"); + assertQueryEqualsDOA("+weltbank\r+worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \r+worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \r +worlbank", null, "+weltbank +worlbank"); - assertQueryEqualsDOA("+weltbank\t+worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \t+worlbank", null, - "+weltbank +worlbank"); - assertQueryEqualsDOA("weltbank \t +worlbank", null, - "+weltbank +worlbank"); + assertQueryEqualsDOA("+weltbank\r\n+worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \r\n+worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \r\n +worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \r \n +worlbank", null, "+weltbank +worlbank"); + + assertQueryEqualsDOA("+weltbank\t+worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \t+worlbank", null, "+weltbank +worlbank"); + assertQueryEqualsDOA("weltbank \t +worlbank", null, "+weltbank +worlbank"); } - public void testSimpleDAO() - throws Exception { + public void testSimpleDAO() throws Exception { assertQueryEqualsDOA("term term term", null, "+term +term +term"); assertQueryEqualsDOA("term +term term", null, "+term +term +term"); assertQueryEqualsDOA("term term +term", null, "+term +term +term"); @@ -913,23 +917,23 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEqualsDOA("-term term term", null, "-term +term +term"); } - public void testBoost() - throws Exception { + public void testBoost() throws Exception { CharacterRunAutomaton stopWords = new CharacterRunAutomaton(Automata.makeString("on")); Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords); CommonQueryParserConfiguration qp = getParserConfig(oneStopAnalyzer); - Query q = getQuery("on^1.0",qp); + Query q = getQuery("on^1.0", qp); assertNotNull(q); - q = getQuery("\"hello\"^2.0",qp); + q = getQuery("\"hello\"^2.0", qp); assertNotNull(q); assertEquals(((BoostQuery) q).getBoost(), (float) 2.0, (float) 0.5); - q = getQuery("hello^2.0",qp); + q = getQuery("hello^2.0", qp); assertNotNull(q); assertEquals(((BoostQuery) q).getBoost(), (float) 2.0, (float) 0.5); - q = getQuery("\"on\"^1.0",qp); + q = getQuery("\"on\"^1.0", qp); assertNotNull(q); - Analyzer a2 = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); + Analyzer a2 = + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); CommonQueryParserConfiguration qp2 = getParserConfig(a2); q = getQuery("the^3", qp2); // "the" is a stop word so the result is an empty query: @@ -942,7 +946,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { try { getQuery(queryString); } catch (Exception expected) { - if(isQueryParserException(expected)){ + if (isQueryParserException(expected)) { return; } } @@ -953,7 +957,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { try { getQuery(queryString, a); } catch (Exception expected) { - if(isQueryParserException(expected)){ + if (isQueryParserException(expected)) { return; } } @@ -975,158 +979,179 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertParseException("one two three", purWhitespaceAnalyzer); } - /** - * This test differs from TestPrecedenceQueryParser - */ + /** This test differs from TestPrecedenceQueryParser */ public void testPrecedence() throws Exception { - CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); Query query1 = getQuery("A AND B OR C AND D", qp); Query query2 = getQuery("+A +B +C +D", qp); assertEquals(query1, query2); } -// Todo: convert this from DateField to DateUtil -// public void testLocalDateFormat() throws IOException, ParseException { -// Directory ramDir = newDirectory(); -// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); -// addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); -// addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); -// iw.close(); -// IndexSearcher is = new IndexSearcher(ramDir, true); -// assertHits(1, "[12/1/2005 TO 12/3/2005]", is); -// assertHits(2, "[12/1/2005 TO 12/4/2005]", is); -// assertHits(1, "[12/3/2005 TO 12/4/2005]", is); -// assertHits(1, "{12/1/2005 TO 12/3/2005}", is); -// assertHits(1, "{12/1/2005 TO 12/4/2005}", is); -// assertHits(0, "{12/3/2005 TO 12/4/2005}", is); -// is.close(); -// ramDir.close(); -// } -// -// private void addDateDoc(String content, int year, int month, -// int day, int hour, int minute, int second, IndexWriter iw) throws IOException { -// Document d = new Document(); -// d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED)); -// Calendar cal = Calendar.getInstance(Locale.ENGLISH); -// cal.set(year, month - 1, day, hour, minute, second); -// d.add(newField("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED)); -// iw.addDocument(d); -// } + // Todo: convert this from DateField to DateUtil + // public void testLocalDateFormat() throws IOException, ParseException { + // Directory ramDir = newDirectory(); + // IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(new MockAnalyzer(random, + // MockTokenizer.WHITESPACE, false))); + // addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); + // addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); + // iw.close(); + // IndexSearcher is = new IndexSearcher(ramDir, true); + // assertHits(1, "[12/1/2005 TO 12/3/2005]", is); + // assertHits(2, "[12/1/2005 TO 12/4/2005]", is); + // assertHits(1, "[12/3/2005 TO 12/4/2005]", is); + // assertHits(1, "{12/1/2005 TO 12/3/2005}", is); + // assertHits(1, "{12/1/2005 TO 12/4/2005}", is); + // assertHits(0, "{12/3/2005 TO 12/4/2005}", is); + // is.close(); + // ramDir.close(); + // } + // + // private void addDateDoc(String content, int year, int month, + // int day, int hour, int minute, int second, IndexWriter iw) throws + // IOException { + // Document d = new Document(); + // d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED)); + // Calendar cal = Calendar.getInstance(Locale.ENGLISH); + // cal.set(year, month - 1, day, hour, minute, second); + // d.add(newField("date", DateField.dateToString(cal.getTime()), Field.Store.YES, + // Field.Index.NOT_ANALYZED)); + // iw.addDocument(d); + // } public abstract void testStarParsing() throws Exception; public void testEscapedWildcard() throws Exception { - CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); WildcardQuery q = new WildcardQuery(new Term("field", "foo\\?ba?r")); assertEquals(q, getQuery("foo\\?ba?r", qp)); } - + public void testRegexps() throws Exception { - CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true)); + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true)); RegexpQuery q = new RegexpQuery(new Term("field", "[a-z][123]")); - assertEquals(q, getQuery("/[a-z][123]/",qp)); - assertEquals(q, getQuery("/[A-Z][123]/",qp)); - assertEquals(new BoostQuery(q, 0.5f), getQuery("/[A-Z][123]/^0.5",qp)); + assertEquals(q, getQuery("/[a-z][123]/", qp)); + assertEquals(q, getQuery("/[A-Z][123]/", qp)); + assertEquals(new BoostQuery(q, 0.5f), getQuery("/[A-Z][123]/^0.5", qp)); qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); - assertTrue(getQuery("/[A-Z][123]/^0.5",qp) instanceof BoostQuery); - assertTrue(((BoostQuery) getQuery("/[A-Z][123]/^0.5",qp)).getQuery() instanceof RegexpQuery); - assertEquals(MultiTermQuery.SCORING_BOOLEAN_REWRITE, ((RegexpQuery) ((BoostQuery) getQuery("/[A-Z][123]/^0.5",qp)).getQuery()).getRewriteMethod()); - assertEquals(new BoostQuery(q, 0.5f), getQuery("/[A-Z][123]/^0.5",qp)); + assertTrue(getQuery("/[A-Z][123]/^0.5", qp) instanceof BoostQuery); + assertTrue(((BoostQuery) getQuery("/[A-Z][123]/^0.5", qp)).getQuery() instanceof RegexpQuery); + assertEquals( + MultiTermQuery.SCORING_BOOLEAN_REWRITE, + ((RegexpQuery) ((BoostQuery) getQuery("/[A-Z][123]/^0.5", qp)).getQuery()) + .getRewriteMethod()); + assertEquals(new BoostQuery(q, 0.5f), getQuery("/[A-Z][123]/^0.5", qp)); qp.setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE); - + Query escaped = new RegexpQuery(new Term("field", "[a-z]\\/[123]")); - assertEquals(escaped, getQuery("/[a-z]\\/[123]/",qp)); + assertEquals(escaped, getQuery("/[a-z]\\/[123]/", qp)); Query escaped2 = new RegexpQuery(new Term("field", "[a-z]\\*[123]")); - assertEquals(escaped2, getQuery("/[a-z]\\*[123]/",qp)); - + assertEquals(escaped2, getQuery("/[a-z]\\*[123]/", qp)); + BooleanQuery.Builder complex = new BooleanQuery.Builder(); complex.add(new RegexpQuery(new Term("field", "[a-z]\\/[123]")), Occur.MUST); complex.add(new TermQuery(new Term("path", "/etc/init.d/")), Occur.MUST); complex.add(new TermQuery(new Term("field", "/etc/init[.]d/lucene/")), Occur.SHOULD); - assertEquals(complex.build(), getQuery("/[a-z]\\/[123]/ AND path:\"/etc/init.d/\" OR \"/etc\\/init\\[.\\]d/lucene/\" ",qp)); - + assertEquals( + complex.build(), + getQuery( + "/[a-z]\\/[123]/ AND path:\"/etc/init.d/\" OR \"/etc\\/init\\[.\\]d/lucene/\" ", qp)); + Query re = new RegexpQuery(new Term("field", "http.*")); - assertEquals(re, getQuery("field:/http.*/",qp)); - assertEquals(re, getQuery("/http.*/",qp)); - + assertEquals(re, getQuery("field:/http.*/", qp)); + assertEquals(re, getQuery("/http.*/", qp)); + re = new RegexpQuery(new Term("field", "http~0.5")); - assertEquals(re, getQuery("field:/http~0.5/",qp)); - assertEquals(re, getQuery("/http~0.5/",qp)); - + assertEquals(re, getQuery("field:/http~0.5/", qp)); + assertEquals(re, getQuery("/http~0.5/", qp)); + re = new RegexpQuery(new Term("field", "boo")); - assertEquals(re, getQuery("field:/boo/",qp)); - assertEquals(re, getQuery("/boo/",qp)); - - assertEquals(new TermQuery(new Term("field", "/boo/")), getQuery("\"/boo/\"",qp)); - assertEquals(new TermQuery(new Term("field", "/boo/")), getQuery("\\/boo\\/",qp)); - + assertEquals(re, getQuery("field:/boo/", qp)); + assertEquals(re, getQuery("/boo/", qp)); + + assertEquals(new TermQuery(new Term("field", "/boo/")), getQuery("\"/boo/\"", qp)); + assertEquals(new TermQuery(new Term("field", "/boo/")), getQuery("\\/boo\\/", qp)); + BooleanQuery.Builder two = new BooleanQuery.Builder(); two.add(new RegexpQuery(new Term("field", "foo")), Occur.SHOULD); two.add(new RegexpQuery(new Term("field", "bar")), Occur.SHOULD); - assertEquals(two.build(), getQuery("field:/foo/ field:/bar/",qp)); - assertEquals(two.build(), getQuery("/foo/ /bar/",qp)); + assertEquals(two.build(), getQuery("field:/foo/ field:/bar/", qp)); + assertEquals(two.build(), getQuery("/foo/ /bar/", qp)); } - + public void testStopwords() throws Exception { CharacterRunAutomaton stopSet = new CharacterRunAutomaton(new RegExp("the|foo").toAutomaton()); - CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet)); - Query result = getQuery("field:the OR field:foo",qp); + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet)); + Query result = getQuery("field:the OR field:foo", qp); assertNotNull("result is null and it shouldn't be", result); - assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery || result instanceof MatchNoDocsQuery); + assertTrue( + "result is not a BooleanQuery", + result instanceof BooleanQuery || result instanceof MatchNoDocsQuery); if (result instanceof BooleanQuery) { assertEquals(0, ((BooleanQuery) result).clauses().size()); } - result = getQuery("field:woo OR field:the",qp); + result = getQuery("field:woo OR field:the", qp); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a TermQuery", result instanceof TermQuery); - result = getQuery("(fieldX:xxxxx OR fieldy:xxxxxxxx)^2 AND (fieldx:the OR fieldy:foo)",qp); + result = getQuery("(fieldX:xxxxx OR fieldy:xxxxxxxx)^2 AND (fieldx:the OR fieldy:foo)", qp); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a BoostQuery", result instanceof BoostQuery); result = ((BoostQuery) result).getQuery(); assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery); if (VERBOSE) System.out.println("Result: " + result); - assertTrue(((BooleanQuery) result).clauses().size() + " does not equal: " + 2, ((BooleanQuery) result).clauses().size() == 2); + assertTrue( + ((BooleanQuery) result).clauses().size() + " does not equal: " + 2, + ((BooleanQuery) result).clauses().size() == 2); } public void testPositionIncrement() throws Exception { - CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)); + CommonQueryParserConfiguration qp = + getParserConfig( + new MockAnalyzer( + random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)); qp.setEnablePositionIncrements(true); String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; // 0 2 5 7 8 - int expectedPositions[] = {1,3,4,6,9}; - PhraseQuery pq = (PhraseQuery) getQuery(qtxt,qp); - //System.out.println("Query text: "+qtxt); - //System.out.println("Result: "+pq); + int expectedPositions[] = {1, 3, 4, 6, 9}; + PhraseQuery pq = (PhraseQuery) getQuery(qtxt, qp); + // System.out.println("Query text: "+qtxt); + // System.out.println("Result: "+pq); Term t[] = pq.getTerms(); int pos[] = pq.getPositions(); for (int i = 0; i < t.length; i++) { - //System.out.println(i+". "+t[i]+" pos: "+pos[i]); - assertEquals("term "+i+" = "+t[i]+" has wrong term-position!",expectedPositions[i],pos[i]); + // System.out.println(i+". "+t[i]+" pos: "+pos[i]); + assertEquals( + "term " + i + " = " + t[i] + " has wrong term-position!", expectedPositions[i], pos[i]); } } public void testMatchAllDocs() throws Exception { - CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); - assertEquals(new MatchAllDocsQuery(), getQuery("*:*",qp)); - assertEquals(new MatchAllDocsQuery(), getQuery("(*:*)",qp)); - BooleanQuery bq = (BooleanQuery)getQuery("+*:* -*:*",qp); + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + assertEquals(new MatchAllDocsQuery(), getQuery("*:*", qp)); + assertEquals(new MatchAllDocsQuery(), getQuery("(*:*)", qp)); + BooleanQuery bq = (BooleanQuery) getQuery("+*:* -*:*", qp); assertEquals(2, bq.clauses().size()); for (BooleanClause clause : bq) { assertTrue(clause.getQuery() instanceof MatchAllDocsQuery); } } - + @SuppressWarnings("unused") private void assertHits(int expected, String query, IndexSearcher is) throws Exception { String oldDefaultField = getDefaultField(); setDefaultField("date"); - CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); qp.setLocale(Locale.ENGLISH); - Query q = getQuery(query,qp); + Query q = getQuery(query, qp); ScoreDoc[] hits = is.search(q, 1000).scoreDocs; assertEquals(expected, hits.length); - setDefaultField( oldDefaultField ); + setDefaultField(oldDefaultField); } @Override @@ -1140,7 +1165,8 @@ public abstract class QueryParserTestBase extends LuceneTestCase { // "match" public void testPositionIncrements() throws Exception { Directory dir = newDirectory(); - Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); + Analyzer a = + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a)); Document doc = new Document(); doc.add(newTextField("field", "the wizard of ozzy", Field.Store.NO)); @@ -1148,8 +1174,8 @@ public abstract class QueryParserTestBase extends LuceneTestCase { IndexReader r = DirectoryReader.open(w); w.close(); IndexSearcher s = newSearcher(r); - - Query q = getQuery("\"wizard of ozzy\"",a); + + Query q = getQuery("\"wizard of ozzy\"", a); assertEquals(1, s.search(q, 1).totalHits.value); r.close(); dir.close(); @@ -1157,32 +1183,32 @@ public abstract class QueryParserTestBase extends LuceneTestCase { /** whitespace+lowercase analyzer with synonyms */ protected static class Analyzer1 extends Analyzer { - public Analyzer1(){ + public Analyzer1() { super(); } + @Override public TokenStreamComponents createComponents(String fieldName) { - Tokenizer tokenizer = new MockTokenizer( MockTokenizer.WHITESPACE, true); + Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true); return new TokenStreamComponents(tokenizer, new MockSynonymFilter(tokenizer)); } } - + /** whitespace+lowercase analyzer without synonyms */ protected static class Analyzer2 extends Analyzer { - public Analyzer2(){ + public Analyzer2() { super(); } + @Override public TokenStreamComponents createComponents(String fieldName) { return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, true)); } } - + public abstract void testNewFieldQuery() throws Exception; - - /** - * Mock collation analyzer: indexes terms as "collated" + term - */ + + /** Mock collation analyzer: indexes terms as "collated" + term */ private static class MockCollationFilter extends TokenFilter { private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); @@ -1200,37 +1226,40 @@ public abstract class QueryParserTestBase extends LuceneTestCase { return false; } } - } + private static class MockCollationAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName) { Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true); return new TokenStreamComponents(tokenizer, new MockCollationFilter(tokenizer)); } + @Override protected TokenStream normalize(String fieldName, TokenStream in) { return new MockCollationFilter(new LowerCaseFilter(in)); } } - + public void testCollatedRange() throws Exception { CommonQueryParserConfiguration qp = getParserConfig(new MockCollationAnalyzer()); - Query expected = TermRangeQuery.newStringRange(getDefaultField(), "collatedabc", "collateddef", true, true); + Query expected = + TermRangeQuery.newStringRange(getDefaultField(), "collatedabc", "collateddef", true, true); Query actual = getQuery("[abc TO def]", qp); assertEquals(expected, actual); } public void testDistanceAsEditsParsing() throws Exception { - FuzzyQuery q = (FuzzyQuery) getQuery("foobar~2",new MockAnalyzer(random())); + FuzzyQuery q = (FuzzyQuery) getQuery("foobar~2", new MockAnalyzer(random())); assertEquals(2, q.getMaxEdits()); } public void testPhraseQueryToString() throws Exception { - Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); + Analyzer analyzer = + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); CommonQueryParserConfiguration qp = getParserConfig(analyzer); qp.setEnablePositionIncrements(true); - PhraseQuery q = (PhraseQuery)getQuery("\"this hi this is a test is\"", qp); + PhraseQuery q = (PhraseQuery) getQuery("\"this hi this is a test is\"", qp); assertEquals("field:\"? hi ? ? ? test\"", q.toString()); } @@ -1242,22 +1271,24 @@ public abstract class QueryParserTestBase extends LuceneTestCase { qp.setAllowLeadingWildcard(true); String prefixQueries[][] = { - {"a*", "ab*", "abc*",}, - {"h*", "hi*", "hij*", "\\\\7*"}, - {"o*", "op*", "opq*", "\\\\\\\\*"}, + { + "a*", "ab*", "abc*", + }, + {"h*", "hi*", "hij*", "\\\\7*"}, + {"o*", "op*", "opq*", "\\\\\\\\*"}, }; String wildcardQueries[][] = { - {"*a*", "*ab*", "*abc**", "ab*e*", "*g?", "*f?1", "abc**"}, - {"*h*", "*hi*", "*hij**", "hi*k*", "*n?", "*m?1", "hij**"}, - {"*o*", "*op*", "*opq**", "op*q*", "*u?", "*t?1", "opq**"}, + {"*a*", "*ab*", "*abc**", "ab*e*", "*g?", "*f?1", "abc**"}, + {"*h*", "*hi*", "*hij**", "hi*k*", "*n?", "*m?1", "hij**"}, + {"*o*", "*op*", "*opq**", "op*q*", "*u?", "*t?1", "opq**"}, }; - // test queries that must be prefix queries + // test queries that must be prefix queries for (int i = 0; i < prefixQueries.length; i++) { for (int j = 0; j < prefixQueries[i].length; j++) { String queryString = prefixQueries[i][j]; - Query q = getQuery(queryString,qp); + Query q = getQuery(queryString, qp); assertEquals(PrefixQuery.class, q.getClass()); } } @@ -1266,7 +1297,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { for (int i = 0; i < wildcardQueries.length; i++) { for (int j = 0; j < wildcardQueries[i].length; j++) { String qtxt = wildcardQueries[i][j]; - Query q = getQuery(qtxt,qp); + Query q = getQuery(qtxt, qp); assertEquals(WildcardQuery.class, q.getClass()); } } @@ -1275,29 +1306,29 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testPhraseQueryPositionIncrements() throws Exception { CharacterRunAutomaton stopStopList = - new CharacterRunAutomaton(new RegExp("[sS][tT][oO][pP]").toAutomaton()); + new CharacterRunAutomaton(new RegExp("[sS][tT][oO][pP]").toAutomaton()); - CommonQueryParserConfiguration qp - = getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList)); + CommonQueryParserConfiguration qp = + getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList)); qp.setEnablePositionIncrements(true); PhraseQuery.Builder phraseQuery = new PhraseQuery.Builder(); phraseQuery.add(new Term("field", "1")); phraseQuery.add(new Term("field", "2"), 2); - assertEquals(phraseQuery.build(), getQuery("\"1 stop 2\"",qp)); + assertEquals(phraseQuery.build(), getQuery("\"1 stop 2\"", qp)); } public void testMatchAllQueryParsing() throws Exception { // test simple parsing of MatchAllDocsQuery String oldDefaultField = getDefaultField(); setDefaultField("key"); - CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random())); - assertEquals(new MatchAllDocsQuery(), getQuery(new MatchAllDocsQuery().toString(),qp)); + CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random())); + assertEquals(new MatchAllDocsQuery(), getQuery(new MatchAllDocsQuery().toString(), qp)); // test parsing with non-default boost Query query = new MatchAllDocsQuery(); query = new BoostQuery(query, 2.3f); - assertEquals(query, getQuery(query.toString(),qp)); + assertEquals(query, getQuery(query.toString(), qp)); setDefaultField(oldDefaultField); } @@ -1305,8 +1336,10 @@ public abstract class QueryParserTestBase extends LuceneTestCase { String query = "(field1:[1 TO *] AND field1:[* TO 2]) AND field2:(z)"; BooleanQuery.Builder q = new BooleanQuery.Builder(); BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add(TermRangeQuery.newStringRange("field1", "1", null, true, true), BooleanClause.Occur.MUST); - bq.add(TermRangeQuery.newStringRange("field1", null, "2", true, true), BooleanClause.Occur.MUST); + bq.add( + TermRangeQuery.newStringRange("field1", "1", null, true, true), BooleanClause.Occur.MUST); + bq.add( + TermRangeQuery.newStringRange("field1", null, "2", true, true), BooleanClause.Occur.MUST); q.add(bq.build(), BooleanClause.Occur.MUST); q.add(new TermQuery(new Term("field2", "z")), BooleanClause.Occur.MUST); assertEquals(q.build(), getQuery(query, new MockAnalyzer(random()))); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java index 475688875bc..9911f9aa8be 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java @@ -16,6 +16,11 @@ */ package org.apache.lucene.queryparser.xml; +import java.io.BufferedReader; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -27,12 +32,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import java.io.BufferedReader; -import java.io.Closeable; -import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; - class CoreParserTestIndexData implements Closeable { final Directory dir; @@ -40,8 +39,11 @@ class CoreParserTestIndexData implements Closeable { final IndexSearcher searcher; CoreParserTestIndexData(Analyzer analyzer) throws Exception { - BufferedReader d = new BufferedReader(new InputStreamReader( - TestCoreParser.class.getResourceAsStream("reuters21578.txt"), StandardCharsets.US_ASCII)); + BufferedReader d = + new BufferedReader( + new InputStreamReader( + TestCoreParser.class.getResourceAsStream("reuters21578.txt"), + StandardCharsets.US_ASCII)); dir = LuceneTestCase.newDirectory(); IndexWriter writer = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(analyzer)); String line = d.readLine(); @@ -67,6 +69,4 @@ class CoreParserTestIndexData implements Closeable { reader.close(); dir.close(); } - } - diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCoreParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCoreParser.java index 4faf6e84b1f..06c05bab6de 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCoreParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCoreParser.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.queryparser.xml; +import java.io.IOException; +import java.io.InputStream; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; @@ -32,12 +34,9 @@ import org.apache.lucene.util.LuceneTestCase; import org.junit.AfterClass; import org.xml.sax.SAXException; -import java.io.IOException; -import java.io.InputStream; - public class TestCoreParser extends LuceneTestCase { - final private static String defaultField = "contents"; + private static final String defaultField = "contents"; private static Analyzer analyzer; private static CoreParser coreParser; @@ -45,8 +44,10 @@ public class TestCoreParser extends LuceneTestCase { private static CoreParserTestIndexData indexData; protected Analyzer newAnalyzer() { - // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT): - return new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); + // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work + // with CURRENT): + return new MockAnalyzer( + random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); } protected CoreParser newCoreParser(String defaultField, Analyzer analyzer) { @@ -69,20 +70,21 @@ public class TestCoreParser extends LuceneTestCase { } public void test_DOCTYPE_TermQueryXML() throws ParserException, IOException { - SAXException saxe = LuceneTestCase.expectThrows(ParserException.class, SAXException.class, - () -> parse("DOCTYPE_TermQuery.xml")); + SAXException saxe = + LuceneTestCase.expectThrows( + ParserException.class, SAXException.class, () -> parse("DOCTYPE_TermQuery.xml")); assertTrue(saxe.getMessage().startsWith("External Entity resolving unsupported:")); } public void test_ENTITY_TermQueryXML() throws ParserException, IOException { - SAXException saxe = LuceneTestCase.expectThrows(ParserException.class, SAXException.class, - () -> parse("ENTITY_TermQuery.xml")); + SAXException saxe = + LuceneTestCase.expectThrows( + ParserException.class, SAXException.class, () -> parse("ENTITY_TermQuery.xml")); assertTrue(saxe.getMessage().startsWith("External Entity resolving unsupported:")); } public void testTermQueryEmptyXML() throws ParserException, IOException { - parseShouldFail("TermQueryEmpty.xml", - "TermQuery has no text"); + parseShouldFail("TermQueryEmpty.xml", "TermQuery has no text"); } public void testTermsQueryXML() throws ParserException, IOException { @@ -94,11 +96,11 @@ public class TestCoreParser extends LuceneTestCase { Query q = parse("BooleanQuery.xml"); dumpResults("BooleanQuery", q, 5); } - + public void testDisjunctionMaxQueryXML() throws ParserException, IOException { Query q = parse("DisjunctionMaxQuery.xml"); assertTrue(q instanceof DisjunctionMaxQuery); - DisjunctionMaxQuery d = (DisjunctionMaxQuery)q; + DisjunctionMaxQuery d = (DisjunctionMaxQuery) q; assertEquals(0.0f, d.getTieBreakerMultiplier(), 0.0001f); assertEquals(2, d.getDisjuncts().size()); DisjunctionMaxQuery ndq = (DisjunctionMaxQuery) d.getDisjuncts().get(1); @@ -148,13 +150,13 @@ public class TestCoreParser extends LuceneTestCase { Exception expectedException = new NumberFormatException("For input string: \"\""); try { Query q = parse("SpanNearQueryWithoutSlop.xml"); - fail("got query "+q+" instead of expected exception "+expectedException); + fail("got query " + q + " instead of expected exception " + expectedException); } catch (Exception e) { assertEquals(expectedException.toString(), e.toString()); } try { SpanQuery sq = parseAsSpan("SpanNearQueryWithoutSlop.xml"); - fail("got span query "+sq+" instead of expected exception "+expectedException); + fail("got span query " + sq + " instead of expected exception " + expectedException); } catch (Exception e) { assertEquals(expectedException.toString(), e.toString()); } @@ -174,7 +176,7 @@ public class TestCoreParser extends LuceneTestCase { Query q = parse("NestedBooleanQuery.xml"); dumpResults("Nested Boolean query", q, 5); } - + public void testPointRangeQuery() throws ParserException, IOException { Query q = parse("PointRangeQuery.xml"); dumpResults("PointRangeQuery", q, 5); @@ -195,7 +197,7 @@ public class TestCoreParser extends LuceneTestCase { dumpResults("PointRangeQueryWithoutRange", q, 5); } - //================= Helper methods =================================== + // ================= Helper methods =================================== protected String defaultField() { return defaultField; @@ -220,7 +222,7 @@ public class TestCoreParser extends LuceneTestCase { try { indexData = new CoreParserTestIndexData(analyzer()); } catch (Exception e) { - fail("caught Exception "+e); + fail("caught Exception " + e); } } return indexData; @@ -234,7 +236,8 @@ public class TestCoreParser extends LuceneTestCase { return indexData().searcher; } - protected void parseShouldFail(String xmlFileName, String expectedParserExceptionMessage) throws IOException { + protected void parseShouldFail(String xmlFileName, String expectedParserExceptionMessage) + throws IOException { Query q = null; ParserException pe = null; try { @@ -242,10 +245,12 @@ public class TestCoreParser extends LuceneTestCase { } catch (ParserException e) { pe = e; } - assertNull("for "+xmlFileName+" unexpectedly got "+q, q); - assertNotNull("expected a ParserException for "+xmlFileName, pe); - assertEquals("expected different ParserException for "+xmlFileName, - expectedParserExceptionMessage, pe.getMessage()); + assertNull("for " + xmlFileName + " unexpectedly got " + q, q); + assertNotNull("expected a ParserException for " + xmlFileName, pe); + assertEquals( + "expected different ParserException for " + xmlFileName, + expectedParserExceptionMessage, + pe.getMessage()); } protected Query parse(String xmlFileName) throws ParserException, IOException { @@ -253,7 +258,7 @@ public class TestCoreParser extends LuceneTestCase { } protected SpanQuery parseAsSpan(String xmlFileName) throws ParserException, IOException { - return (SpanQuery)implParse(xmlFileName, true); + return (SpanQuery) implParse(xmlFileName, true); } private Query implParse(String xmlFileName, boolean span) throws ParserException, IOException { @@ -273,13 +278,29 @@ public class TestCoreParser extends LuceneTestCase { protected void dumpResults(String qType, Query q, int numDocs) throws IOException { if (VERBOSE) { - System.out.println("TEST: qType=" + qType + " numDocs=" + numDocs + " " + q.getClass().getCanonicalName() + " query=" + q); + System.out.println( + "TEST: qType=" + + qType + + " numDocs=" + + numDocs + + " " + + q.getClass().getCanonicalName() + + " query=" + + q); } final IndexSearcher searcher = searcher(); TopDocs hits = searcher.search(q, numDocs); final boolean producedResults = (hits.totalHits.value > 0); if (!producedResults) { - System.out.println("TEST: qType=" + qType + " numDocs=" + numDocs + " " + q.getClass().getCanonicalName() + " query=" + q); + System.out.println( + "TEST: qType=" + + qType + + " numDocs=" + + numDocs + + " " + + q.getClass().getCanonicalName() + + " query=" + + q); } if (VERBOSE) { ScoreDoc[] scoreDocs = hits.scoreDocs; diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCorePlusExtensionsParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCorePlusExtensionsParser.java index 8b096170625..46262b27815 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCorePlusExtensionsParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCorePlusExtensionsParser.java @@ -28,11 +28,10 @@ public class TestCorePlusExtensionsParser extends TestCorePlusQueriesParser { public void testFuzzyLikeThisQueryXML() throws Exception { Query q = parse("FuzzyLikeThisQuery.xml"); - //show rewritten fuzzyLikeThisQuery - see what is being matched on + // show rewritten fuzzyLikeThisQuery - see what is being matched on if (VERBOSE) { System.out.println(rewrite(q)); } dumpResults("FuzzyLikeThis", q, 5); } - } diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCorePlusQueriesParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCorePlusQueriesParser.java index 0288d2cb083..b70a7e6b05c 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCorePlusQueriesParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCorePlusQueriesParser.java @@ -30,5 +30,4 @@ public class TestCorePlusQueriesParser extends TestCoreParser { Query q = parse("LikeThisQuery.xml"); dumpResults("like this", q, 5); } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java index 11ea15db6c2..27bb1d43a3c 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java @@ -21,7 +21,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.replicator.ReplicationClient.ReplicationHandler; @@ -30,43 +29,41 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.util.InfoStream; /** - * A {@link ReplicationHandler} for replication of an index and taxonomy pair. - * See {@link IndexReplicationHandler} for more detail. This handler ensures - * that the search and taxonomy indexes are replicated in a consistent way. - *

    - * NOTE: if you intend to recreate a taxonomy index, you should make sure - * to reopen an IndexSearcher and TaxonomyReader pair via the provided callback, - * to guarantee that both indexes are in sync. This handler does not prevent - * replicating such index and taxonomy pairs, and if they are reopened by a - * different thread, unexpected errors can occur, as well as inconsistency - * between the taxonomy and index readers. - * + * A {@link ReplicationHandler} for replication of an index and taxonomy pair. See {@link + * IndexReplicationHandler} for more detail. This handler ensures that the search and taxonomy + * indexes are replicated in a consistent way. + * + *

    NOTE: if you intend to recreate a taxonomy index, you should make sure to reopen an + * IndexSearcher and TaxonomyReader pair via the provided callback, to guarantee that both indexes + * are in sync. This handler does not prevent replicating such index and taxonomy pairs, and if they + * are reopened by a different thread, unexpected errors can occur, as well as inconsistency between + * the taxonomy and index readers. + * * @see IndexReplicationHandler - * * @lucene.experimental */ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler { - + /** - * The component used to log messages to the {@link InfoStream#getDefault() - * default} {@link InfoStream}. + * The component used to log messages to the {@link InfoStream#getDefault() default} {@link + * InfoStream}. */ public static final String INFO_STREAM_COMPONENT = "IndexAndTaxonomyReplicationHandler"; private final Directory indexDir; private final Directory taxoDir; private final Callable callback; - - private volatile Map> currentRevisionFiles; + + private volatile Map> currentRevisionFiles; private volatile String currentVersion; private volatile InfoStream infoStream = InfoStream.getDefault(); /** - * Constructor with the given index directory and callback to notify when the - * indexes were updated. + * Constructor with the given index directory and callback to notify when the indexes were + * updated. */ - public IndexAndTaxonomyReplicationHandler(Directory indexDir, Directory taxoDir, Callable callback) - throws IOException { + public IndexAndTaxonomyReplicationHandler( + Directory indexDir, Directory taxoDir, Callable callback) throws IOException { this.callback = callback; this.indexDir = indexDir; this.taxoDir = taxoDir; @@ -75,8 +72,11 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler { final boolean indexExists = DirectoryReader.indexExists(indexDir); final boolean taxoExists = DirectoryReader.indexExists(taxoDir); if (indexExists != taxoExists) { - throw new IllegalStateException("search and taxonomy indexes must either both exist or not: index=" + indexExists - + " taxo=" + taxoExists); + throw new IllegalStateException( + "search and taxonomy indexes must either both exist or not: index=" + + indexExists + + " taxo=" + + taxoExists); } if (indexExists) { // both indexes exist final IndexCommit indexCommit = IndexReplicationHandler.getLastCommit(indexDir); @@ -85,27 +85,36 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler { currentVersion = IndexAndTaxonomyRevision.revisionVersion(indexCommit, taxoCommit); final InfoStream infoStream = InfoStream.getDefault(); if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { - infoStream.message(INFO_STREAM_COMPONENT, "constructor(): currentVersion=" + currentVersion - + " currentRevisionFiles=" + currentRevisionFiles); - infoStream.message(INFO_STREAM_COMPONENT, "constructor(): indexCommit=" + indexCommit - + " taxoCommit=" + taxoCommit); + infoStream.message( + INFO_STREAM_COMPONENT, + "constructor(): currentVersion=" + + currentVersion + + " currentRevisionFiles=" + + currentRevisionFiles); + infoStream.message( + INFO_STREAM_COMPONENT, + "constructor(): indexCommit=" + indexCommit + " taxoCommit=" + taxoCommit); } } } - + @Override public String currentVersion() { return currentVersion; } - + @Override - public Map> currentRevisionFiles() { + public Map> currentRevisionFiles() { return currentRevisionFiles; } - + @Override - public void revisionReady(String version, Map> revisionFiles, - Map> copiedFiles, Map sourceDirectory) throws IOException { + public void revisionReady( + String version, + Map> revisionFiles, + Map> copiedFiles, + Map sourceDirectory) + throws IOException { Directory taxoClientDir = sourceDirectory.get(IndexAndTaxonomyRevision.TAXONOMY_SOURCE); Directory indexClientDir = sourceDirectory.get(IndexAndTaxonomyRevision.INDEX_SOURCE); List taxoFiles = copiedFiles.get(IndexAndTaxonomyRevision.TAXONOMY_SOURCE); @@ -114,7 +123,7 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler { String indexSegmentsFile = IndexReplicationHandler.getSegmentsFile(indexFiles, false); String taxoPendingFile = taxoSegmentsFile == null ? null : "pending_" + taxoSegmentsFile; String indexPendingFile = "pending_" + indexSegmentsFile; - + boolean success = false; try { // copy taxonomy files before index files @@ -126,28 +135,28 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler { taxoDir.sync(taxoFiles); } indexDir.sync(indexFiles); - + // now copy, fsync, and rename segmentsFile, taxonomy first because it is ok if a // reader sees a more advanced taxonomy than the index. - + if (taxoSegmentsFile != null) { taxoDir.copyFrom(taxoClientDir, taxoSegmentsFile, taxoPendingFile, IOContext.READONCE); } indexDir.copyFrom(indexClientDir, indexSegmentsFile, indexPendingFile, IOContext.READONCE); - + if (taxoSegmentsFile != null) { taxoDir.sync(Collections.singletonList(taxoPendingFile)); } indexDir.sync(Collections.singletonList(indexPendingFile)); - + if (taxoSegmentsFile != null) { taxoDir.rename(taxoPendingFile, taxoSegmentsFile); taxoDir.syncMetaData(); } - + indexDir.rename(indexPendingFile, indexSegmentsFile); indexDir.syncMetaData(); - + success = true; } finally { if (!success) { @@ -165,12 +174,16 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler { // all files have been successfully copied + sync'd. update the handler's state currentRevisionFiles = revisionFiles; currentVersion = version; - + if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { - infoStream.message(INFO_STREAM_COMPONENT, "revisionReady(): currentVersion=" + currentVersion - + " currentRevisionFiles=" + currentRevisionFiles); + infoStream.message( + INFO_STREAM_COMPONENT, + "revisionReady(): currentVersion=" + + currentVersion + + " currentRevisionFiles=" + + currentRevisionFiles); } - + // Cleanup the index directory from old and unused index files. // NOTE: we don't use IndexWriter.deleteUnusedFiles here since it may have // side-effects, e.g. if it hits sudden IO errors while opening the index @@ -197,5 +210,4 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler { } this.infoStream = infoStream; } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java index 39df369017f..14eff99d5a6 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java @@ -21,7 +21,6 @@ import java.io.InputStream; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter; import org.apache.lucene.facet.taxonomy.writercache.TaxonomyWriterCache; import org.apache.lucene.index.IndexCommit; @@ -34,46 +33,46 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; /** - * A {@link Revision} of a single index and taxonomy index files which comprises - * the list of files from both indexes. This revision should be used whenever a - * pair of search and taxonomy indexes need to be replicated together to - * guarantee consistency of both on the replicating (client) side. - * + * A {@link Revision} of a single index and taxonomy index files which comprises the list of files + * from both indexes. This revision should be used whenever a pair of search and taxonomy indexes + * need to be replicated together to guarantee consistency of both on the replicating (client) side. + * * @see IndexRevision - * * @lucene.experimental */ public class IndexAndTaxonomyRevision implements Revision { - + /** - * A {@link DirectoryTaxonomyWriter} which sets the underlying - * {@link IndexWriter}'s {@link IndexDeletionPolicy} to - * {@link SnapshotDeletionPolicy}. + * A {@link DirectoryTaxonomyWriter} which sets the underlying {@link IndexWriter}'s {@link + * IndexDeletionPolicy} to {@link SnapshotDeletionPolicy}. */ public static final class SnapshotDirectoryTaxonomyWriter extends DirectoryTaxonomyWriter { - + private SnapshotDeletionPolicy sdp; private IndexWriter writer; - + /** - * @see DirectoryTaxonomyWriter#DirectoryTaxonomyWriter(Directory, - * IndexWriterConfig.OpenMode, TaxonomyWriterCache) + * @see DirectoryTaxonomyWriter#DirectoryTaxonomyWriter(Directory, IndexWriterConfig.OpenMode, + * TaxonomyWriterCache) */ - public SnapshotDirectoryTaxonomyWriter(Directory directory, OpenMode openMode, TaxonomyWriterCache cache) - throws IOException { + public SnapshotDirectoryTaxonomyWriter( + Directory directory, OpenMode openMode, TaxonomyWriterCache cache) throws IOException { super(directory, openMode, cache); } - - /** @see DirectoryTaxonomyWriter#DirectoryTaxonomyWriter(Directory, IndexWriterConfig.OpenMode) */ - public SnapshotDirectoryTaxonomyWriter(Directory directory, OpenMode openMode) throws IOException { + + /** + * @see DirectoryTaxonomyWriter#DirectoryTaxonomyWriter(Directory, IndexWriterConfig.OpenMode) + */ + public SnapshotDirectoryTaxonomyWriter(Directory directory, OpenMode openMode) + throws IOException { super(directory, openMode); } - + /** @see DirectoryTaxonomyWriter#DirectoryTaxonomyWriter(Directory) */ public SnapshotDirectoryTaxonomyWriter(Directory d) throws IOException { super(d); } - + @Override protected IndexWriterConfig createIndexWriterConfig(OpenMode openMode) { IndexWriterConfig conf = super.createIndexWriterConfig(openMode); @@ -81,61 +80,62 @@ public class IndexAndTaxonomyRevision implements Revision { conf.setIndexDeletionPolicy(sdp); return conf; } - + @Override - protected IndexWriter openIndexWriter(Directory directory, IndexWriterConfig config) throws IOException { + protected IndexWriter openIndexWriter(Directory directory, IndexWriterConfig config) + throws IOException { writer = super.openIndexWriter(directory, config); return writer; } - + /** Returns the {@link SnapshotDeletionPolicy} used by the underlying {@link IndexWriter}. */ public SnapshotDeletionPolicy getDeletionPolicy() { return sdp; } - + /** Returns the {@link IndexWriter} used by this {@link DirectoryTaxonomyWriter}. */ public IndexWriter getIndexWriter() { return writer; } - } - + private static final int RADIX = 16; - + public static final String INDEX_SOURCE = "index"; public static final String TAXONOMY_SOURCE = "taxo"; - + private final IndexWriter indexWriter; private final SnapshotDirectoryTaxonomyWriter taxoWriter; private final IndexCommit indexCommit, taxoCommit; private final SnapshotDeletionPolicy indexSDP, taxoSDP; private final String version; - private final Map> sourceFiles; - + private final Map> sourceFiles; + /** Returns a singleton map of the revision files from the given {@link IndexCommit}. */ - public static Map> revisionFiles(IndexCommit indexCommit, IndexCommit taxoCommit) - throws IOException { - HashMap> files = new HashMap<>(); + public static Map> revisionFiles( + IndexCommit indexCommit, IndexCommit taxoCommit) throws IOException { + HashMap> files = new HashMap<>(); files.put(INDEX_SOURCE, IndexRevision.revisionFiles(indexCommit).values().iterator().next()); files.put(TAXONOMY_SOURCE, IndexRevision.revisionFiles(taxoCommit).values().iterator().next()); return files; } - + /** - * Returns a String representation of a revision's version from the given - * {@link IndexCommit}s of the search and taxonomy indexes. + * Returns a String representation of a revision's version from the given {@link IndexCommit}s of + * the search and taxonomy indexes. */ public static String revisionVersion(IndexCommit indexCommit, IndexCommit taxoCommit) { - return Long.toString(indexCommit.getGeneration(), RADIX) + ":" + Long.toString(taxoCommit.getGeneration(), RADIX); + return Long.toString(indexCommit.getGeneration(), RADIX) + + ":" + + Long.toString(taxoCommit.getGeneration(), RADIX); } - + /** - * Constructor over the given {@link IndexWriter}. Uses the last - * {@link IndexCommit} found in the {@link Directory} managed by the given - * writer. + * Constructor over the given {@link IndexWriter}. Uses the last {@link IndexCommit} found in the + * {@link Directory} managed by the given writer. */ - public IndexAndTaxonomyRevision(IndexWriter indexWriter, SnapshotDirectoryTaxonomyWriter taxoWriter) - throws IOException { + public IndexAndTaxonomyRevision( + IndexWriter indexWriter, SnapshotDirectoryTaxonomyWriter taxoWriter) throws IOException { IndexDeletionPolicy delPolicy = indexWriter.getConfig().getIndexDeletionPolicy(); if (!(delPolicy instanceof SnapshotDeletionPolicy)) { throw new IllegalArgumentException("IndexWriter must be created with SnapshotDeletionPolicy"); @@ -149,7 +149,7 @@ public class IndexAndTaxonomyRevision implements Revision { this.version = revisionVersion(indexCommit, taxoCommit); this.sourceFiles = revisionFiles(indexCommit, taxoCommit); } - + @Override public int compareTo(String version) { final String[] parts = version.split(":"); @@ -157,7 +157,7 @@ public class IndexAndTaxonomyRevision implements Revision { final long taxoGen = Long.parseLong(parts[1], RADIX); final long indexCommitGen = indexCommit.getGeneration(); final long taxoCommitGen = taxoCommit.getGeneration(); - + // if the index generation is not the same as this commit's generation, // compare by it. Otherwise, compare by the taxonomy generation. if (indexCommitGen < indexGen) { @@ -168,32 +168,37 @@ public class IndexAndTaxonomyRevision implements Revision { return taxoCommitGen < taxoGen ? -1 : (taxoCommitGen > taxoGen ? 1 : 0); } } - + @Override public int compareTo(Revision o) { IndexAndTaxonomyRevision other = (IndexAndTaxonomyRevision) o; int cmp = indexCommit.compareTo(other.indexCommit); return cmp != 0 ? cmp : taxoCommit.compareTo(other.taxoCommit); } - + @Override public String getVersion() { return version; } - + @Override - public Map> getSourceFiles() { + public Map> getSourceFiles() { return sourceFiles; } - + @Override public InputStream open(String source, String fileName) throws IOException { - assert source.equals(INDEX_SOURCE) || source.equals(TAXONOMY_SOURCE) : "invalid source; expected=(" + INDEX_SOURCE - + " or " + TAXONOMY_SOURCE + ") got=" + source; + assert source.equals(INDEX_SOURCE) || source.equals(TAXONOMY_SOURCE) + : "invalid source; expected=(" + + INDEX_SOURCE + + " or " + + TAXONOMY_SOURCE + + ") got=" + + source; IndexCommit ic = source.equals(INDEX_SOURCE) ? indexCommit : taxoCommit; return new IndexInputInputStream(ic.getDirectory().openInput(fileName, IOContext.READONCE)); } - + @Override public void release() throws IOException { try { @@ -201,17 +206,16 @@ public class IndexAndTaxonomyRevision implements Revision { } finally { taxoSDP.release(taxoCommit); } - + try { indexWriter.deleteUnusedFiles(); } finally { taxoWriter.getIndexWriter().deleteUnusedFiles(); } } - + @Override public String toString() { return "IndexAndTaxonomyRevision version=" + version + " files=" + sourceFiles; } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexInputInputStream.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexInputInputStream.java index 6b6a8a99f22..841e242bb9c 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexInputInputStream.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexInputInputStream.java @@ -18,25 +18,24 @@ package org.apache.lucene.replicator; import java.io.IOException; import java.io.InputStream; - import org.apache.lucene.store.IndexInput; -/** +/** * An {@link InputStream} which wraps an {@link IndexInput}. - * + * * @lucene.experimental */ public final class IndexInputInputStream extends InputStream { - + private final IndexInput in; - + private long remaining; - + public IndexInputInputStream(IndexInput in) { this.in = in; remaining = in.length(); } - + @Override public int read() throws IOException { if (remaining == 0) { @@ -46,22 +45,22 @@ public final class IndexInputInputStream extends InputStream { return in.readByte(); } } - + @Override public int available() throws IOException { return (int) in.length(); } - + @Override public void close() throws IOException { in.close(); } - + @Override public int read(byte[] b) throws IOException { return read(b, 0, b.length); } - + @Override public int read(byte[] b, int off, int len) throws IOException { if (remaining == 0) { @@ -74,7 +73,7 @@ public final class IndexInputInputStream extends InputStream { remaining -= len; return len; } - + @Override public long skip(long n) throws IOException { if (remaining == 0) { @@ -87,5 +86,4 @@ public final class IndexInputInputStream extends InputStream { remaining -= n; return n; } - -} \ No newline at end of file +} diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java index 69840747a87..469564c9912 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.Callable; import java.util.regex.Matcher; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; @@ -37,42 +36,40 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.InfoStream; /** - * A {@link ReplicationHandler} for replication of an index. Implements - * {@link #revisionReady} by copying the files pointed by the client resolver to - * the index {@link Directory} and then touches the index with - * {@link IndexWriter} to make sure any unused files are deleted. - *

    - * NOTE: this handler assumes that {@link IndexWriter} is not opened by - * another process on the index directory. In fact, opening an - * {@link IndexWriter} on the same directory to which files are copied can lead - * to undefined behavior, where some or all the files will be deleted, override - * other files or simply create a mess. When you replicate an index, it is best - * if the index is never modified by {@link IndexWriter}, except the one that is - * open on the source index, from which you replicate. - *

    - * This handler notifies the application via a provided {@link Callable} when an - * updated index commit was made available for it. - * + * A {@link ReplicationHandler} for replication of an index. Implements {@link #revisionReady} by + * copying the files pointed by the client resolver to the index {@link Directory} and then touches + * the index with {@link IndexWriter} to make sure any unused files are deleted. + * + *

    NOTE: this handler assumes that {@link IndexWriter} is not opened by another process on + * the index directory. In fact, opening an {@link IndexWriter} on the same directory to which files + * are copied can lead to undefined behavior, where some or all the files will be deleted, override + * other files or simply create a mess. When you replicate an index, it is best if the index is + * never modified by {@link IndexWriter}, except the one that is open on the source index, from + * which you replicate. + * + *

    This handler notifies the application via a provided {@link Callable} when an updated index + * commit was made available for it. + * * @lucene.experimental */ public class IndexReplicationHandler implements ReplicationHandler { - + /** - * The component used to log messages to the {@link InfoStream#getDefault() - * default} {@link InfoStream}. + * The component used to log messages to the {@link InfoStream#getDefault() default} {@link + * InfoStream}. */ public static final String INFO_STREAM_COMPONENT = "IndexReplicationHandler"; - + private final Directory indexDir; private final Callable callback; - - private volatile Map> currentRevisionFiles; + + private volatile Map> currentRevisionFiles; private volatile String currentVersion; private volatile InfoStream infoStream = InfoStream.getDefault(); - + /** - * Returns the last {@link IndexCommit} found in the {@link Directory}, or - * {@code null} if there are no commits. + * Returns the last {@link IndexCommit} found in the {@link Directory}, or {@code null} if there + * are no commits. */ public static IndexCommit getLastCommit(Directory dir) throws IOException { try { @@ -87,16 +84,15 @@ public class IndexReplicationHandler implements ReplicationHandler { } return null; } - + /** - * Verifies that the last file is segments_N and fails otherwise. It also - * removes and returns the file from the list, because it needs to be handled - * last, after all files. This is important in order to guarantee that if a - * reader sees the new segments_N, all other segment files are already on - * stable storage. - *

    - * The reason why the code fails instead of putting segments_N file last is - * that this indicates an error in the Revision implementation. + * Verifies that the last file is segments_N and fails otherwise. It also removes and returns the + * file from the list, because it needs to be handled last, after all files. This is important in + * order to guarantee that if a reader sees the new segments_N, all other segment files are + * already on stable storage. + * + *

    The reason why the code fails instead of putting segments_N file last is that this indicates + * an error in the Revision implementation. */ public static String getSegmentsFile(List files, boolean allowEmpty) { if (files.isEmpty()) { @@ -106,18 +102,19 @@ public class IndexReplicationHandler implements ReplicationHandler { throw new IllegalStateException("empty list of files not allowed"); } } - + String segmentsFile = files.remove(files.size() - 1); if (!segmentsFile.startsWith(IndexFileNames.SEGMENTS)) { - throw new IllegalStateException("last file to copy+sync must be segments_N but got " + segmentsFile - + "; check your Revision implementation!"); + throw new IllegalStateException( + "last file to copy+sync must be segments_N but got " + + segmentsFile + + "; check your Revision implementation!"); } return segmentsFile; } /** - * Cleanup the index directory by deleting all given files. Called when file - * copy or sync failed. + * Cleanup the index directory by deleting all given files. Called when file copy or sync failed. */ public static void cleanupFilesOnFailure(Directory dir, List files) { for (String file : files) { @@ -126,18 +123,17 @@ public class IndexReplicationHandler implements ReplicationHandler { IOUtils.deleteFilesIgnoringExceptions(dir, file); } } - + /** - * Cleans up the index directory from old index files. This method uses the - * last commit found by {@link #getLastCommit(Directory)}. If it matches the - * expected segmentsFile, then all files not referenced by this commit point - * are deleted. - *

    - * NOTE: this method does a best effort attempt to clean the index - * directory. It suppresses any exceptions that occur, as this can be retried - * the next time. + * Cleans up the index directory from old index files. This method uses the last commit found by + * {@link #getLastCommit(Directory)}. If it matches the expected segmentsFile, then all files not + * referenced by this commit point are deleted. + * + *

    NOTE: this method does a best effort attempt to clean the index directory. It + * suppresses any exceptions that occur, as this can be retried the next time. */ - public static void cleanupOldIndexFiles(Directory dir, String segmentsFile, InfoStream infoStream) { + public static void cleanupOldIndexFiles( + Directory dir, String segmentsFile, InfoStream infoStream) { try { IndexCommit commit = getLastCommit(dir); // commit == null means weird IO errors occurred, ignore them @@ -159,16 +155,15 @@ public class IndexReplicationHandler implements ReplicationHandler { // cleanup will have a chance to succeed the next time we get a new // revision. if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { - infoStream.message(INFO_STREAM_COMPONENT, "cleanupOldIndexFiles(): failed on error " + t.getMessage()); + infoStream.message( + INFO_STREAM_COMPONENT, "cleanupOldIndexFiles(): failed on error " + t.getMessage()); } } } - - /** - * Copies the files from the source directory to the target one, if they are - * not the same. - */ - public static void copyFiles(Directory source, Directory target, List files) throws IOException { + + /** Copies the files from the source directory to the target one, if they are not the same. */ + public static void copyFiles(Directory source, Directory target, List files) + throws IOException { if (!source.equals(target)) { for (String file : files) { target.copyFrom(source, file, file, IOContext.READONCE); @@ -177,10 +172,11 @@ public class IndexReplicationHandler implements ReplicationHandler { } /** - * Constructor with the given index directory and callback to notify when the - * indexes were updated. + * Constructor with the given index directory and callback to notify when the indexes were + * updated. */ - public IndexReplicationHandler(Directory indexDir, Callable callback) throws IOException { + public IndexReplicationHandler(Directory indexDir, Callable callback) + throws IOException { this.callback = callback; this.indexDir = indexDir; currentRevisionFiles = null; @@ -192,49 +188,58 @@ public class IndexReplicationHandler implements ReplicationHandler { currentVersion = IndexRevision.revisionVersion(commit); final InfoStream infoStream = InfoStream.getDefault(); if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { - infoStream.message(INFO_STREAM_COMPONENT, "constructor(): currentVersion=" + currentVersion - + " currentRevisionFiles=" + currentRevisionFiles); + infoStream.message( + INFO_STREAM_COMPONENT, + "constructor(): currentVersion=" + + currentVersion + + " currentRevisionFiles=" + + currentRevisionFiles); infoStream.message(INFO_STREAM_COMPONENT, "constructor(): commit=" + commit); } } } - + @Override public String currentVersion() { return currentVersion; } - + @Override - public Map> currentRevisionFiles() { + public Map> currentRevisionFiles() { return currentRevisionFiles; } - + @Override - public void revisionReady(String version, Map> revisionFiles, - Map> copiedFiles, Map sourceDirectory) throws IOException { + public void revisionReady( + String version, + Map> revisionFiles, + Map> copiedFiles, + Map sourceDirectory) + throws IOException { if (revisionFiles.size() > 1) { - throw new IllegalArgumentException("this handler handles only a single source; got " + revisionFiles.keySet()); + throw new IllegalArgumentException( + "this handler handles only a single source; got " + revisionFiles.keySet()); } - + Directory clientDir = sourceDirectory.values().iterator().next(); List files = copiedFiles.values().iterator().next(); String segmentsFile = getSegmentsFile(files, false); String pendingSegmentsFile = "pending_" + segmentsFile; - + boolean success = false; try { // copy files from the client to index directory copyFiles(clientDir, indexDir, files); - + // fsync all copied files (except segmentsFile) indexDir.sync(files); - + // now copy and fsync segmentsFile as pending, then rename (simulating lucene commit) indexDir.copyFrom(clientDir, segmentsFile, pendingSegmentsFile, IOContext.READONCE); indexDir.sync(Collections.singletonList(pendingSegmentsFile)); indexDir.rename(pendingSegmentsFile, segmentsFile); indexDir.syncMetaData(); - + success = true; } finally { if (!success) { @@ -247,12 +252,16 @@ public class IndexReplicationHandler implements ReplicationHandler { // all files have been successfully copied + sync'd. update the handler's state currentRevisionFiles = revisionFiles; currentVersion = version; - + if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { - infoStream.message(INFO_STREAM_COMPONENT, "revisionReady(): currentVersion=" + currentVersion - + " currentRevisionFiles=" + currentRevisionFiles); + infoStream.message( + INFO_STREAM_COMPONENT, + "revisionReady(): currentVersion=" + + currentVersion + + " currentRevisionFiles=" + + currentRevisionFiles); } - + // Cleanup the index directory from old and unused index files. // NOTE: we don't use IndexWriter.deleteUnusedFiles here since it may have // side-effects, e.g. if it hits sudden IO errors while opening the index @@ -278,5 +287,4 @@ public class IndexReplicationHandler implements ReplicationHandler { } this.infoStream = infoStream; } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java index 14f9b5db469..c649e4aa7a5 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java @@ -23,7 +23,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; - import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.IndexWriter; @@ -33,45 +32,44 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; /** - * A {@link Revision} of a single index files which comprises the list of files - * that are part of the current {@link IndexCommit}. To ensure the files are not - * deleted by {@link IndexWriter} for as long as this revision stays alive (i.e. - * until {@link #release()}), the current commit point is snapshotted, using - * {@link SnapshotDeletionPolicy} (this means that the given writer's - * {@link IndexWriterConfig#getIndexDeletionPolicy() config} should return - * {@link SnapshotDeletionPolicy}). - *

    - * When this revision is {@link #release() released}, it releases the obtained - * snapshot as well as calls {@link IndexWriter#deleteUnusedFiles()} so that the - * snapshotted files are deleted (if they are no longer needed). - * + * A {@link Revision} of a single index files which comprises the list of files that are part of the + * current {@link IndexCommit}. To ensure the files are not deleted by {@link IndexWriter} for as + * long as this revision stays alive (i.e. until {@link #release()}), the current commit point is + * snapshotted, using {@link SnapshotDeletionPolicy} (this means that the given writer's {@link + * IndexWriterConfig#getIndexDeletionPolicy() config} should return {@link SnapshotDeletionPolicy}). + * + *

    When this revision is {@link #release() released}, it releases the obtained snapshot as well + * as calls {@link IndexWriter#deleteUnusedFiles()} so that the snapshotted files are deleted (if + * they are no longer needed). + * * @lucene.experimental */ public class IndexRevision implements Revision { - + private static final int RADIX = 16; private static final String SOURCE = "index"; - + private final IndexWriter writer; private final IndexCommit commit; private final SnapshotDeletionPolicy sdp; private final String version; - private final Map> sourceFiles; - + private final Map> sourceFiles; + // returns a RevisionFile with some metadata private static RevisionFile newRevisionFile(String file, Directory dir) throws IOException { RevisionFile revFile = new RevisionFile(file); revFile.size = dir.fileLength(file); return revFile; } - + /** Returns a singleton map of the revision files from the given {@link IndexCommit}. */ - public static Map> revisionFiles(IndexCommit commit) throws IOException { + public static Map> revisionFiles(IndexCommit commit) + throws IOException { Collection commitFiles = commit.getFileNames(); List revisionFiles = new ArrayList<>(commitFiles.size()); String segmentsFile = commit.getSegmentsFileName(); Directory dir = commit.getDirectory(); - + for (String file : commitFiles) { if (!file.equals(segmentsFile)) { revisionFiles.add(newRevisionFile(file, dir)); @@ -80,19 +78,15 @@ public class IndexRevision implements Revision { revisionFiles.add(newRevisionFile(segmentsFile, dir)); // segments_N must be last return Collections.singletonMap(SOURCE, revisionFiles); } - - /** - * Returns a String representation of a revision's version from the given - * {@link IndexCommit}. - */ + + /** Returns a String representation of a revision's version from the given {@link IndexCommit}. */ public static String revisionVersion(IndexCommit commit) { return Long.toString(commit.getGeneration(), RADIX); } - + /** - * Constructor over the given {@link IndexWriter}. Uses the last - * {@link IndexCommit} found in the {@link Directory} managed by the given - * writer. + * Constructor over the given {@link IndexWriter}. Uses the last {@link IndexCommit} found in the + * {@link Directory} managed by the given writer. */ public IndexRevision(IndexWriter writer) throws IOException { IndexDeletionPolicy delPolicy = writer.getConfig().getIndexDeletionPolicy(); @@ -105,45 +99,44 @@ public class IndexRevision implements Revision { this.version = revisionVersion(commit); this.sourceFiles = revisionFiles(commit); } - + @Override public int compareTo(String version) { long gen = Long.parseLong(version, RADIX); long commitGen = commit.getGeneration(); return commitGen < gen ? -1 : (commitGen > gen ? 1 : 0); } - + @Override public int compareTo(Revision o) { IndexRevision other = (IndexRevision) o; return commit.compareTo(other.commit); } - + @Override public String getVersion() { return version; } - + @Override - public Map> getSourceFiles() { + public Map> getSourceFiles() { return sourceFiles; } - + @Override public InputStream open(String source, String fileName) throws IOException { assert source.equals(SOURCE) : "invalid source; expected=" + SOURCE + " got=" + source; return new IndexInputInputStream(commit.getDirectory().openInput(fileName, IOContext.READONCE)); } - + @Override public void release() throws IOException { sdp.release(commit); writer.deleteUnusedFiles(); } - + @Override public String toString() { return "IndexRevision version=" + version + " files=" + sourceFiles; } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java b/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java index bab5083a8f4..fe519d07113 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java @@ -23,39 +23,35 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.store.AlreadyClosedException; /** - * A {@link Replicator} implementation for use by the side that publishes - * {@link Revision}s, as well for clients to {@link #checkForUpdate(String) - * check for updates}. When a client needs to be updated, it is returned a - * {@link SessionToken} through which it can - * {@link #obtainFile(String, String, String) obtain} the files of that - * revision. As long as a revision is being replicated, this replicator - * guarantees that it will not be {@link Revision#release() released}. - *

    - * Replication sessions expire by default after - * {@link #DEFAULT_SESSION_EXPIRATION_THRESHOLD}, and the threshold can be - * configured through {@link #setExpirationThreshold(long)}. - * + * A {@link Replicator} implementation for use by the side that publishes {@link Revision}s, as well + * for clients to {@link #checkForUpdate(String) check for updates}. When a client needs to be + * updated, it is returned a {@link SessionToken} through which it can {@link #obtainFile(String, + * String, String) obtain} the files of that revision. As long as a revision is being replicated, + * this replicator guarantees that it will not be {@link Revision#release() released}. + * + *

    Replication sessions expire by default after {@link #DEFAULT_SESSION_EXPIRATION_THRESHOLD}, + * and the threshold can be configured through {@link #setExpirationThreshold(long)}. + * * @lucene.experimental */ public class LocalReplicator implements Replicator { - + private static class RefCountedRevision { private final AtomicInteger refCount = new AtomicInteger(1); public final Revision revision; - + public RefCountedRevision(Revision revision) { this.revision = revision; } - + public void decRef() throws IOException { if (refCount.get() <= 0) { throw new IllegalStateException("this revision is already released"); } - + final int rc = refCount.decrementAndGet(); if (rc == 0) { boolean success = false; @@ -69,48 +65,49 @@ public class LocalReplicator implements Replicator { } } } else if (rc < 0) { - throw new IllegalStateException("too many decRef calls: refCount is " + rc + " after decrement"); + throw new IllegalStateException( + "too many decRef calls: refCount is " + rc + " after decrement"); } } - + public void incRef() { refCount.incrementAndGet(); } - } - + private static class ReplicationSession { public final SessionToken session; public final RefCountedRevision revision; private volatile long lastAccessTime; - + ReplicationSession(SessionToken session, RefCountedRevision revision) { this.session = session; this.revision = revision; lastAccessTime = TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS); } - + boolean isExpired(long expirationThreshold) { - return lastAccessTime < (TimeUnit.MILLISECONDS.convert(System.nanoTime(), - TimeUnit.NANOSECONDS) - expirationThreshold); + return lastAccessTime + < (TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS) + - expirationThreshold); } - + void markAccessed() { lastAccessTime = TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS); } } - + /** Threshold for expiring inactive sessions. Defaults to 30 minutes. */ public static final long DEFAULT_SESSION_EXPIRATION_THRESHOLD = 1000 * 60 * 30; - + private long expirationThresholdMilllis = LocalReplicator.DEFAULT_SESSION_EXPIRATION_THRESHOLD; - + private volatile RefCountedRevision currentRevision; private volatile boolean closed = false; - + private final AtomicInteger sessionToken = new AtomicInteger(0); private final Map sessions = new HashMap<>(); - + private void checkExpiredSessions() throws IOException { // make a "to-delete" list so we don't risk deleting from the map while iterating it final ArrayList toExpire = new ArrayList<>(); @@ -123,7 +120,7 @@ public class LocalReplicator implements Replicator { releaseSession(token.session.id); } } - + private void releaseSession(String sessionID) throws IOException { ReplicationSession session = sessions.remove(sessionID); // if we're called concurrently by close() and release(), could be that one @@ -132,35 +129,36 @@ public class LocalReplicator implements Replicator { session.revision.decRef(); } } - + /** Ensure that replicator is still open, or throw {@link AlreadyClosedException} otherwise. */ protected final synchronized void ensureOpen() { if (closed) { throw new AlreadyClosedException("This replicator has already been closed"); } } - + @Override public synchronized SessionToken checkForUpdate(String currentVersion) { ensureOpen(); if (currentRevision == null) { // no published revisions yet return null; } - + if (currentVersion != null && currentRevision.revision.compareTo(currentVersion) <= 0) { // currentVersion is newer or equal to latest published revision return null; } - + // currentVersion is either null or older than latest published revision currentRevision.incRef(); final String sessionID = Integer.toString(sessionToken.incrementAndGet()); final SessionToken sessionToken = new SessionToken(sessionID, currentRevision.revision); - final ReplicationSession timedSessionToken = new ReplicationSession(sessionToken, currentRevision); + final ReplicationSession timedSessionToken = + new ReplicationSession(sessionToken, currentRevision); sessions.put(sessionID, timedSessionToken); return sessionToken; } - + @Override public synchronized void close() throws IOException { if (!closed) { @@ -172,18 +170,19 @@ public class LocalReplicator implements Replicator { closed = true; } } - + /** * Returns the expiration threshold. - * + * * @see #setExpirationThreshold(long) */ public long getExpirationThreshold() { return expirationThresholdMilllis; } - + @Override - public synchronized InputStream obtainFile(String sessionID, String source, String fileName) throws IOException { + public synchronized InputStream obtainFile(String sessionID, String source, String fileName) + throws IOException { ensureOpen(); ReplicationSession session = sessions.get(sessionID); if (session != null && session.isExpired(expirationThresholdMilllis)) { @@ -192,13 +191,18 @@ public class LocalReplicator implements Replicator { } // session either previously expired, or we just expired it if (session == null) { - throw new SessionExpiredException("session (" + sessionID + ") expired while obtaining file: source=" + source - + " file=" + fileName); + throw new SessionExpiredException( + "session (" + + sessionID + + ") expired while obtaining file: source=" + + source + + " file=" + + fileName); } sessions.get(sessionID).markAccessed(); return session.revision.revision.open(source, fileName); } - + @Override public synchronized void publish(Revision revision) throws IOException { ensureOpen(); @@ -209,40 +213,39 @@ public class LocalReplicator implements Replicator { revision.release(); return; } - + if (compare < 0) { revision.release(); - throw new IllegalArgumentException("Cannot publish an older revision: rev=" + revision + " current=" - + currentRevision); - } + throw new IllegalArgumentException( + "Cannot publish an older revision: rev=" + revision + " current=" + currentRevision); + } } - + // swap revisions final RefCountedRevision oldRevision = currentRevision; currentRevision = new RefCountedRevision(revision); if (oldRevision != null) { oldRevision.decRef(); } - + // check for expired sessions checkExpiredSessions(); } - + @Override public synchronized void release(String sessionID) throws IOException { ensureOpen(); releaseSession(sessionID); } - + /** - * Modify session expiration time - if a replication session is inactive that - * long it is automatically expired, and further attempts to operate within - * this session will throw a {@link SessionExpiredException}. + * Modify session expiration time - if a replication session is inactive that long it is + * automatically expired, and further attempts to operate within this session will throw a {@link + * SessionExpiredException}. */ public synchronized void setExpirationThreshold(long expirationThreshold) throws IOException { ensureOpen(); this.expirationThresholdMilllis = expirationThreshold; checkExpiredSessions(); } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/PerSessionDirectoryFactory.java b/lucene/replicator/src/java/org/apache/lucene/replicator/PerSessionDirectoryFactory.java index 2cdc1ea3286..adb351ff901 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/PerSessionDirectoryFactory.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/PerSessionDirectoryFactory.java @@ -19,28 +19,26 @@ package org.apache.lucene.replicator; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; - import org.apache.lucene.replicator.ReplicationClient.SourceDirectoryFactory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.IOUtils; /** - * A {@link SourceDirectoryFactory} which returns {@link FSDirectory} under a - * dedicated session directory. When a session is over, the entire directory is - * deleted. - * + * A {@link SourceDirectoryFactory} which returns {@link FSDirectory} under a dedicated session + * directory. When a session is over, the entire directory is deleted. + * * @lucene.experimental */ public class PerSessionDirectoryFactory implements SourceDirectoryFactory { - + private final Path workDir; - + /** Constructor with the given sources mapping. */ public PerSessionDirectoryFactory(Path workDir) { this.workDir = workDir; } - + @Override public Directory getDirectory(String sessionID, String source) throws IOException { Path sessionDir = workDir.resolve(sessionID); @@ -49,7 +47,7 @@ public class PerSessionDirectoryFactory implements SourceDirectoryFactory { Files.createDirectories(sourceDir); return FSDirectory.open(sourceDir); } - + @Override public void cleanupSession(String sessionID) throws IOException { if (sessionID.isEmpty()) { // protect against deleting workDir entirely! @@ -57,5 +55,4 @@ public class PerSessionDirectoryFactory implements SourceDirectoryFactory { } IOUtils.rm(workDir.resolve(sessionID)); } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java b/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java index 2773eb120c1..54626051a6f 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java @@ -32,7 +32,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -42,29 +41,28 @@ import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.ThreadInterruptedException; /** - * A client which monitors and obtains new revisions from a {@link Replicator}. - * It can be used to either periodically check for updates by invoking - * {@link #startUpdateThread}, or manually by calling {@link #updateNow()}. - *

    - * Whenever a new revision is available, the {@link #requiredFiles(Map)} are - * copied to the {@link Directory} specified by {@link PerSessionDirectoryFactory} and - * a handler is notified. - * + * A client which monitors and obtains new revisions from a {@link Replicator}. It can be used to + * either periodically check for updates by invoking {@link #startUpdateThread}, or manually by + * calling {@link #updateNow()}. + * + *

    Whenever a new revision is available, the {@link #requiredFiles(Map)} are copied to the {@link + * Directory} specified by {@link PerSessionDirectoryFactory} and a handler is notified. + * * @lucene.experimental */ public class ReplicationClient implements Closeable { - + private class ReplicationThread extends Thread { - + private final long interval; - + // client uses this to stop us final CountDownLatch stop = new CountDownLatch(1); - + public ReplicationThread(long interval) { this.interval = interval; } - + @SuppressWarnings("synthetic-access") @Override public void run() { @@ -79,7 +77,7 @@ public class ReplicationClient implements Closeable { updateLock.unlock(); } time = System.currentTimeMillis() - time; - + // adjust timeout to compensate the time spent doing the replication. final long timeout = interval - time; if (timeout > 0) { @@ -99,112 +97,112 @@ public class ReplicationClient implements Closeable { } } } - } - + /** Handler for revisions obtained by the client. */ public static interface ReplicationHandler { - + /** Returns the current revision files held by the handler. */ - public Map> currentRevisionFiles(); - + public Map> currentRevisionFiles(); + /** Returns the current revision version held by the handler. */ public String currentVersion(); - + /** - * Called when a new revision was obtained and is available (i.e. all needed - * files were successfully copied). - * - * @param version - * the version of the {@link Revision} that was copied - * @param revisionFiles - * the files contained by this {@link Revision} - * @param copiedFiles - * the files that were actually copied - * @param sourceDirectory - * a mapping from a source of files to the {@link Directory} they - * were copied into + * Called when a new revision was obtained and is available (i.e. all needed files were + * successfully copied). + * + * @param version the version of the {@link Revision} that was copied + * @param revisionFiles the files contained by this {@link Revision} + * @param copiedFiles the files that were actually copied + * @param sourceDirectory a mapping from a source of files to the {@link Directory} they were + * copied into */ - public void revisionReady(String version, Map> revisionFiles, - Map> copiedFiles, Map sourceDirectory) throws IOException; + public void revisionReady( + String version, + Map> revisionFiles, + Map> copiedFiles, + Map sourceDirectory) + throws IOException; } - + /** - * Resolves a session and source into a {@link Directory} to use for copying - * the session files to. + * Resolves a session and source into a {@link Directory} to use for copying the session files to. */ public static interface SourceDirectoryFactory { - + /** - * Called to denote that the replication actions for this session were finished and the directory is no longer needed. + * Called to denote that the replication actions for this session were finished and the + * directory is no longer needed. */ public void cleanupSession(String sessionID) throws IOException; - + /** - * Returns the {@link Directory} to use for the given session and source. - * Implementations may e.g. return different directories for different - * sessions, or the same directory for all sessions. In that case, it is - * advised to clean the directory before it is used for a new session. - * + * Returns the {@link Directory} to use for the given session and source. Implementations may + * e.g. return different directories for different sessions, or the same directory for all + * sessions. In that case, it is advised to clean the directory before it is used for a new + * session. + * * @see #cleanupSession(String) */ public Directory getDirectory(String sessionID, String source) throws IOException; - } - + /** The component name to use with {@link InfoStream#isEnabled(String)}. */ public static final String INFO_STREAM_COMPONENT = "ReplicationThread"; - + private final Replicator replicator; private final ReplicationHandler handler; private final SourceDirectoryFactory factory; private final byte[] copyBuffer = new byte[16384]; private final Lock updateLock = new ReentrantLock(); - + private volatile ReplicationThread updateThread; private volatile boolean closed = false; private volatile InfoStream infoStream = InfoStream.getDefault(); - + /** * Constructor. - * + * * @param replicator the {@link Replicator} used for checking for updates * @param handler notified when new revisions are ready - * @param factory returns a {@link Directory} for a given source and session + * @param factory returns a {@link Directory} for a given source and session */ - public ReplicationClient(Replicator replicator, ReplicationHandler handler, SourceDirectoryFactory factory) { + public ReplicationClient( + Replicator replicator, ReplicationHandler handler, SourceDirectoryFactory factory) { this.replicator = replicator; this.handler = handler; this.factory = factory; } - + private void copyBytes(IndexOutput out, InputStream in) throws IOException { int numBytes; while ((numBytes = in.read(copyBuffer)) > 0) { out.writeBytes(copyBuffer, 0, numBytes); } } - + private void doUpdate() throws IOException { SessionToken session = null; - final Map sourceDirectory = new HashMap<>(); - final Map> copiedFiles = new HashMap<>(); + final Map sourceDirectory = new HashMap<>(); + final Map> copiedFiles = new HashMap<>(); boolean notify = false; try { final String version = handler.currentVersion(); session = replicator.checkForUpdate(version); if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { - infoStream.message(INFO_STREAM_COMPONENT, "doUpdate(): handlerVersion=" + version + " session=" + session); + infoStream.message( + INFO_STREAM_COMPONENT, "doUpdate(): handlerVersion=" + version + " session=" + session); } if (session == null) { // already up to date return; } - Map> requiredFiles = requiredFiles(session.sourceFiles); + Map> requiredFiles = requiredFiles(session.sourceFiles); if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { infoStream.message(INFO_STREAM_COMPONENT, "doUpdate(): requiredFiles=" + requiredFiles); } - for (Entry> e : requiredFiles.entrySet()) { + for (Entry> e : requiredFiles.entrySet()) { String source = e.getKey(); Directory dir = factory.getDirectory(session.id, source); sourceDirectory.put(source, dir); @@ -214,7 +212,9 @@ public class ReplicationClient implements Closeable { if (closed) { // if we're closed, abort file copy if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { - infoStream.message(INFO_STREAM_COMPONENT, "doUpdate(): detected client was closed); abort file copy"); + infoStream.message( + INFO_STREAM_COMPONENT, + "doUpdate(): detected client was closed); abort file copy"); } return; } @@ -245,12 +245,12 @@ public class ReplicationClient implements Closeable { } } } - + // notify outside the try-finally above, so the session is released sooner. // the handler may take time to finish acting on the copied files, but the // session itself is no longer needed. try { - if (notify && !closed ) { // no use to notify if we are closed already + if (notify && !closed) { // no use to notify if we are closed already handler.revisionReady(session.version, session.sourceFiles, copiedFiles, sourceDirectory); } } finally { @@ -260,55 +260,56 @@ public class ReplicationClient implements Closeable { } } } - + /** Throws {@link AlreadyClosedException} if the client has already been closed. */ protected final void ensureOpen() { if (closed) { throw new AlreadyClosedException("this update client has already been closed"); } } - + /** - * Called when an exception is hit by the replication thread. The default - * implementation prints the full stacktrace to the {@link InfoStream} set in - * {@link #setInfoStream(InfoStream)}, or the {@link InfoStream#getDefault() - * default} one. You can override to log the exception elswhere. - *

    - * NOTE: if you override this method to throw the exception further, - * the replication thread will be terminated. The only way to restart it is to - * call {@link #stopUpdateThread()} followed by - * {@link #startUpdateThread(long, String)}. + * Called when an exception is hit by the replication thread. The default implementation prints + * the full stacktrace to the {@link InfoStream} set in {@link #setInfoStream(InfoStream)}, or the + * {@link InfoStream#getDefault() default} one. You can override to log the exception elswhere. + * + *

    NOTE: if you override this method to throw the exception further, the replication + * thread will be terminated. The only way to restart it is to call {@link #stopUpdateThread()} + * followed by {@link #startUpdateThread(long, String)}. */ protected void handleUpdateException(Throwable t) { final StringWriter sw = new StringWriter(); t.printStackTrace(new PrintWriter(sw)); if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) { - infoStream.message(INFO_STREAM_COMPONENT, "an error occurred during revision update: " + sw.toString()); + infoStream.message( + INFO_STREAM_COMPONENT, "an error occurred during revision update: " + sw.toString()); } } - + /** - * Returns the files required for replication. By default, this method returns - * all files that exist in the new revision, but not in the handler. + * Returns the files required for replication. By default, this method returns all files that + * exist in the new revision, but not in the handler. */ - protected Map> requiredFiles(Map> newRevisionFiles) { - Map> handlerRevisionFiles = handler.currentRevisionFiles(); + protected Map> requiredFiles( + Map> newRevisionFiles) { + Map> handlerRevisionFiles = handler.currentRevisionFiles(); if (handlerRevisionFiles == null) { return newRevisionFiles; } - - Map> requiredFiles = new HashMap<>(); - for (Entry> e : handlerRevisionFiles.entrySet()) { + + Map> requiredFiles = new HashMap<>(); + for (Entry> e : handlerRevisionFiles.entrySet()) { // put the handler files in a Set, for faster contains() checks later Set handlerFiles = new HashSet<>(); for (RevisionFile file : e.getValue()) { handlerFiles.add(file.fileName); } - + // make sure to preserve revisionFiles order ArrayList res = new ArrayList<>(); String source = e.getKey(); - assert newRevisionFiles.containsKey(source) : "source not found in newRevisionFiles: " + newRevisionFiles; + assert newRevisionFiles.containsKey(source) + : "source not found in newRevisionFiles: " + newRevisionFiles; for (RevisionFile file : newRevisionFiles.get(source)) { if (!handlerFiles.contains(file.fileName)) { res.add(file); @@ -316,10 +317,10 @@ public class ReplicationClient implements Closeable { } requiredFiles.put(source, res); } - + return requiredFiles; } - + @Override public synchronized void close() { if (!closed) { @@ -327,13 +328,12 @@ public class ReplicationClient implements Closeable { closed = true; } } - + /** - * Start the update thread with the specified interval in milliseconds. For - * debugging purposes, you can optionally set the name to set on - * {@link Thread#setName(String)}. If you pass {@code null}, a default name - * will be set. - * + * Start the update thread with the specified interval in milliseconds. For debugging purposes, + * you can optionally set the name to set on {@link Thread#setName(String)}. If you pass {@code + * null}, a default name will be set. + * * @throws IllegalStateException if the thread has already been started */ public synchronized void startUpdateThread(long intervalMillis, String threadName) { @@ -349,10 +349,10 @@ public class ReplicationClient implements Closeable { // we rely on isAlive to return true in isUpdateThreadAlive, assert to be on the safe side assert updateThread.isAlive() : "updateThread started but not alive?"; } - + /** - * Stop the update thread. If the update thread is not running, silently does - * nothing. This method returns after the update thread has stopped. + * Stop the update thread. If the update thread is not running, silently does nothing. This method + * returns after the update thread has stopped. */ public synchronized void stopUpdateThread() { if (updateThread != null) { @@ -369,18 +369,17 @@ public class ReplicationClient implements Closeable { updateThread = null; } } - + /** - * Returns true if the update thread is alive. The update thread is alive if - * it has been {@link #startUpdateThread(long, String) started} and not - * {@link #stopUpdateThread() stopped}, as well as didn't hit an error which - * caused it to terminate (i.e. {@link #handleUpdateException(Throwable)} - * threw the exception further). + * Returns true if the update thread is alive. The update thread is alive if it has been {@link + * #startUpdateThread(long, String) started} and not {@link #stopUpdateThread() stopped}, as well + * as didn't hit an error which caused it to terminate (i.e. {@link + * #handleUpdateException(Throwable)} threw the exception further). */ public synchronized boolean isUpdateThreadAlive() { return updateThread != null && updateThread.isAlive(); } - + @Override public String toString() { String res = "ReplicationClient"; @@ -389,10 +388,9 @@ public class ReplicationClient implements Closeable { } return res; } - + /** - * Executes the update operation immediately, irregardless if an update thread - * is running or not. + * Executes the update operation immediately, irregardless if an update thread is running or not. */ public void updateNow() throws IOException { ensureOpen(); @@ -411,5 +409,4 @@ public class ReplicationClient implements Closeable { } this.infoStream = infoStream; } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/Replicator.java b/lucene/replicator/src/java/org/apache/lucene/replicator/Replicator.java index 971776db333..32fb68eb4d2 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/Replicator.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/Replicator.java @@ -21,59 +21,48 @@ import java.io.IOException; import java.io.InputStream; /** - * An interface for replicating files. Allows a producer to - * {@link #publish(Revision) publish} {@link Revision}s and consumers to - * {@link #checkForUpdate(String) check for updates}. When a client needs to be - * updated, it is given a {@link SessionToken} through which it can - * {@link #obtainFile(String, String, String) obtain} the files of that - * revision. After the client has finished obtaining all the files, it should - * {@link #release(String) release} the given session, so that the files can be - * reclaimed if they are not needed anymore. - *

    - * A client is always updated to the newest revision available. That is, if a - * client is on revision r1 and revisions r2 and r3 - * were published, then when the cllient will next check for update, it will - * receive r3. - * + * An interface for replicating files. Allows a producer to {@link #publish(Revision) publish} + * {@link Revision}s and consumers to {@link #checkForUpdate(String) check for updates}. When a + * client needs to be updated, it is given a {@link SessionToken} through which it can {@link + * #obtainFile(String, String, String) obtain} the files of that revision. After the client has + * finished obtaining all the files, it should {@link #release(String) release} the given session, + * so that the files can be reclaimed if they are not needed anymore. + * + *

    A client is always updated to the newest revision available. That is, if a client is on + * revision r1 and revisions r2 and r3 were published, then when the + * cllient will next check for update, it will receive r3. + * * @lucene.experimental */ public interface Replicator extends Closeable { - + /** - * Publish a new {@link Revision} for consumption by clients. It is the - * caller's responsibility to verify that the revision files exist and can be - * read by clients. When the revision is no longer needed, it will be - * {@link Revision#release() released} by the replicator. + * Publish a new {@link Revision} for consumption by clients. It is the caller's responsibility to + * verify that the revision files exist and can be read by clients. When the revision is no longer + * needed, it will be {@link Revision#release() released} by the replicator. */ public void publish(Revision revision) throws IOException; - + /** - * Check whether the given version is up-to-date and returns a - * {@link SessionToken} which can be used for fetching the revision files, - * otherwise returns {@code null}. - *

    - * NOTE: when the returned session token is no longer needed, you - * should call {@link #release(String)} so that the session resources can be - * reclaimed, including the revision files. + * Check whether the given version is up-to-date and returns a {@link SessionToken} which can be + * used for fetching the revision files, otherwise returns {@code null}. + * + *

    NOTE: when the returned session token is no longer needed, you should call {@link + * #release(String)} so that the session resources can be reclaimed, including the revision files. */ public SessionToken checkForUpdate(String currVersion) throws IOException; - - /** - * Notify that the specified {@link SessionToken} is no longer needed by the - * caller. - */ + + /** Notify that the specified {@link SessionToken} is no longer needed by the caller. */ public void release(String sessionID) throws IOException; - + /** - * Returns an {@link InputStream} for the requested file and source in the - * context of the given {@link SessionToken#id session}. - *

    - * NOTE: it is the caller's responsibility to close the returned - * stream. - * - * @throws SessionExpiredException if the specified session has already - * expired + * Returns an {@link InputStream} for the requested file and source in the context of the given + * {@link SessionToken#id session}. + * + *

    NOTE: it is the caller's responsibility to close the returned stream. + * + * @throws SessionExpiredException if the specified session has already expired */ - public InputStream obtainFile(String sessionID, String source, String fileName) throws IOException; - + public InputStream obtainFile(String sessionID, String source, String fileName) + throws IOException; } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/Revision.java b/lucene/replicator/src/java/org/apache/lucene/replicator/Revision.java index c8099a776c6..20c4c8e8454 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/Revision.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/Revision.java @@ -20,55 +20,48 @@ import java.io.IOException; import java.io.InputStream; import java.util.List; import java.util.Map; - import org.apache.lucene.store.IndexInput; /** - * A revision comprises lists of files that come from different sources and need - * to be replicated together to e.g. guarantee that all resources are in sync. - * In most cases an application will replicate a single index, and so the - * revision will contain files from a single source. However, some applications - * may require to treat a collection of indexes as a single entity so that the - * files from all sources are replicated together, to guarantee consistency - * between them. For example, an application which indexes facets will need to - * replicate both the search and taxonomy indexes together, to guarantee that - * they match at the client side. - * + * A revision comprises lists of files that come from different sources and need to be replicated + * together to e.g. guarantee that all resources are in sync. In most cases an application will + * replicate a single index, and so the revision will contain files from a single source. However, + * some applications may require to treat a collection of indexes as a single entity so that the + * files from all sources are replicated together, to guarantee consistency between them. For + * example, an application which indexes facets will need to replicate both the search and taxonomy + * indexes together, to guarantee that they match at the client side. + * * @lucene.experimental */ public interface Revision extends Comparable { - + /** - * Compares the revision to the given version string. Behaves like - * {@link Comparable#compareTo(Object)}. + * Compares the revision to the given version string. Behaves like {@link + * Comparable#compareTo(Object)}. */ public int compareTo(String version); - + /** - * Returns a string representation of the version of this revision. The - * version is used by {@link #compareTo(String)} as well as to - * serialize/deserialize revision information. Therefore it must be self - * descriptive as well as be able to identify one revision from another. + * Returns a string representation of the version of this revision. The version is used by {@link + * #compareTo(String)} as well as to serialize/deserialize revision information. Therefore it must + * be self descriptive as well as be able to identify one revision from another. */ public String getVersion(); - + /** - * Returns the files that comprise this revision, as a mapping from a source - * to a list of files. + * Returns the files that comprise this revision, as a mapping from a source to a list of files. */ - public Map> getSourceFiles(); - + public Map> getSourceFiles(); + /** - * Returns an {@link IndexInput} for the given fileName and source. It is the - * caller's responsibility to close the {@link IndexInput} when it has been - * consumed. + * Returns an {@link IndexInput} for the given fileName and source. It is the caller's + * responsibility to close the {@link IndexInput} when it has been consumed. */ public InputStream open(String source, String fileName) throws IOException; - + /** - * Called when this revision can be safely released, i.e. where there are no - * more references to it. + * Called when this revision can be safely released, i.e. where there are no more references to + * it. */ public void release() throws IOException; - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/RevisionFile.java b/lucene/replicator/src/java/org/apache/lucene/replicator/RevisionFile.java index 2ca52057a4d..101cbde65fa 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/RevisionFile.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/RevisionFile.java @@ -17,20 +17,19 @@ package org.apache.lucene.replicator; /** - * Describes a file in a {@link Revision}. A file has a source, which allows a - * single revision to contain files from multiple sources (e.g. multiple - * indexes). - * + * Describes a file in a {@link Revision}. A file has a source, which allows a single revision to + * contain files from multiple sources (e.g. multiple indexes). + * * @lucene.experimental */ public class RevisionFile { - + /** The name of the file. */ public final String fileName; - + /** The size of the file denoted by {@link #fileName}. */ public long size = -1; - + /** Constructor with the given file name. */ public RevisionFile(String fileName) { if (fileName == null || fileName.isEmpty()) { @@ -38,21 +37,20 @@ public class RevisionFile { } this.fileName = fileName; } - + @Override public boolean equals(Object obj) { RevisionFile other = (RevisionFile) obj; return fileName.equals(other.fileName) && size == other.size; } - + @Override public int hashCode() { return fileName.hashCode() ^ (int) (size ^ (size >>> 32)); } - + @Override public String toString() { return "fileName=" + fileName + " size=" + size; } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/SessionExpiredException.java b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionExpiredException.java index d7e2441da85..ab156bf453b 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/SessionExpiredException.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionExpiredException.java @@ -19,35 +19,26 @@ package org.apache.lucene.replicator; import java.io.IOException; /** - * Exception indicating that a revision update session was expired due to lack - * of activity. - * + * Exception indicating that a revision update session was expired due to lack of activity. + * * @see LocalReplicator#DEFAULT_SESSION_EXPIRATION_THRESHOLD * @see LocalReplicator#setExpirationThreshold(long) - * * @lucene.experimental */ public class SessionExpiredException extends IOException { - - /** - * @see IOException#IOException(String, Throwable) - */ + + /** @see IOException#IOException(String, Throwable) */ public SessionExpiredException(String message, Throwable cause) { super(message, cause); } - - /** - * @see IOException#IOException(String) - */ + + /** @see IOException#IOException(String) */ public SessionExpiredException(String message) { super(message); } - - /** - * @see IOException#IOException(Throwable) - */ + + /** @see IOException#IOException(Throwable) */ public SessionExpiredException(Throwable cause) { super(cause); } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java index 28ab311e62a..ef7ee223c4a 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java @@ -26,35 +26,30 @@ import java.util.Map; import java.util.Map.Entry; /** - * Token for a replication session, for guaranteeing that source replicated - * files will be kept safe until the replication completes. - * + * Token for a replication session, for guaranteeing that source replicated files will be kept safe + * until the replication completes. + * * @see Replicator#checkForUpdate(String) * @see Replicator#release(String) * @see LocalReplicator#DEFAULT_SESSION_EXPIRATION_THRESHOLD - * * @lucene.experimental */ public final class SessionToken { - + /** - * ID of this session. - * Should be passed when releasing the session, thereby acknowledging the + * ID of this session. Should be passed when releasing the session, thereby acknowledging the * {@link Replicator Replicator} that this session is no longer in use. + * * @see Replicator#release(String) */ public final String id; - - /** - * @see Revision#getVersion() - */ + + /** @see Revision#getVersion() */ public final String version; - - /** - * @see Revision#getSourceFiles() - */ - public final Map> sourceFiles; - + + /** @see Revision#getSourceFiles() */ + public final Map> sourceFiles; + /** Constructor which deserializes from the given {@link DataInput}. */ public SessionToken(DataInput in) throws IOException { this.id = in.readUTF(); @@ -75,20 +70,20 @@ public final class SessionToken { --numSources; } } - + /** Constructor with the given id and revision. */ public SessionToken(String id, Revision revision) { this.id = id; this.version = revision.getVersion(); this.sourceFiles = revision.getSourceFiles(); } - + /** Serialize the token data for communication between server and client. */ public void serialize(DataOutput out) throws IOException { out.writeUTF(id); out.writeUTF(version); out.writeInt(sourceFiles.size()); - for (Entry> e : sourceFiles.entrySet()) { + for (Entry> e : sourceFiles.entrySet()) { out.writeUTF(e.getKey()); List files = e.getValue(); out.writeInt(files.size()); @@ -98,10 +93,9 @@ public final class SessionToken { } } } - + @Override public String toString() { return "id=" + id + " version=" + version + " files=" + sourceFiles; } - -} \ No newline at end of file +} diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpClientBase.java b/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpClientBase.java index 9a5713b7585..11458ce39e7 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpClientBase.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpClientBase.java @@ -23,7 +23,6 @@ import java.io.ObjectInputStream; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.concurrent.Callable; - import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; @@ -41,81 +40,87 @@ import org.apache.lucene.util.SuppressForbidden; /** * Base class for Http clients. - * + * * @lucene.experimental - * */ + */ public abstract class HttpClientBase implements Closeable { - + /** Default connection timeout for this client, in milliseconds. */ public static final int DEFAULT_CONNECTION_TIMEOUT = 1000; - + /** Default socket timeout for this client, in milliseconds. */ public static final int DEFAULT_SO_TIMEOUT = 60000; - + // TODO compression? - + /** The URL stting to execute requests against. */ protected final String url; - + private volatile boolean closed = false; - + private final CloseableHttpClient httpc; private final RequestConfig defaultConfig; - + /** - * @param conMgr - * connection manager to use for this http client. NOTE:The - * provided {@link HttpClientConnectionManager} will not be - * {@link HttpClientConnectionManager#shutdown()} by this class. - * @param defaultConfig - * the default {@link RequestConfig} to set on the client. If - * {@code null} a default config is created w/ the default connection - * and socket timeouts. + * @param conMgr connection manager to use for this http client. NOTE:The provided {@link + * HttpClientConnectionManager} will not be {@link HttpClientConnectionManager#shutdown()} by + * this class. + * @param defaultConfig the default {@link RequestConfig} to set on the client. If {@code null} a + * default config is created w/ the default connection and socket timeouts. */ - protected HttpClientBase(String host, int port, String path, HttpClientConnectionManager conMgr, RequestConfig defaultConfig) { + protected HttpClientBase( + String host, + int port, + String path, + HttpClientConnectionManager conMgr, + RequestConfig defaultConfig) { url = normalizedURL(host, port, path); if (defaultConfig == null) { - this.defaultConfig = RequestConfig.custom() - .setConnectionRequestTimeout(DEFAULT_CONNECTION_TIMEOUT) - .setSocketTimeout(DEFAULT_SO_TIMEOUT).build(); + this.defaultConfig = + RequestConfig.custom() + .setConnectionRequestTimeout(DEFAULT_CONNECTION_TIMEOUT) + .setSocketTimeout(DEFAULT_SO_TIMEOUT) + .build(); } else { this.defaultConfig = defaultConfig; } - httpc = HttpClientBuilder.create().setConnectionManager(conMgr).setDefaultRequestConfig(this.defaultConfig).build(); + httpc = + HttpClientBuilder.create() + .setConnectionManager(conMgr) + .setDefaultRequestConfig(this.defaultConfig) + .build(); } - + /** Throws {@link AlreadyClosedException} if this client is already closed. */ protected final void ensureOpen() throws AlreadyClosedException { if (closed) { throw new AlreadyClosedException("HttpClient already closed"); } } - - /** - * Create a URL out of the given parameters, translate an empty/null path to '/' - */ + + /** Create a URL out of the given parameters, translate an empty/null path to '/' */ private static String normalizedURL(String host, int port, String path) { if (path == null || path.length() == 0) { path = "/"; } return "http://" + host + ":" + port + path; } - + /** - * Internal: response status after invocation, and in case or error attempt to read the - * exception sent by the server. + * Internal: response status after invocation, and in case or error attempt to read the + * exception sent by the server. */ protected void verifyStatus(HttpResponse response) throws IOException { StatusLine statusLine = response.getStatusLine(); if (statusLine.getStatusCode() != HttpStatus.SC_OK) { try { - throwKnownError(response, statusLine); + throwKnownError(response, statusLine); } finally { EntityUtils.consumeQuietly(response.getEntity()); } } } - + @SuppressForbidden(reason = "XXX: security hole") protected void throwKnownError(HttpResponse response, StatusLine statusLine) throws IOException { ObjectInputStream in = null; @@ -125,24 +130,25 @@ public abstract class HttpClientBase implements Closeable { // the response stream is not an exception - could be an error in servlet.init(). throw new RuntimeException("Unknown error: " + statusLine, t); } - + Throwable t; try { t = (Throwable) in.readObject(); assert t != null; - } catch (Throwable th) { + } catch (Throwable th) { throw new RuntimeException("Failed to read exception object: " + statusLine, th); } finally { in.close(); } throw IOUtils.rethrowAlways(t); } - + /** - * internal: execute a request and return its result - * The params argument is treated as: name1,value1,name2,value2,... + * internal: execute a request and return its result The params argument is + * treated as: name1,value1,name2,value2,... */ - protected HttpResponse executePOST(String request, HttpEntity entity, String... params) throws IOException { + protected HttpResponse executePOST(String request, HttpEntity entity, String... params) + throws IOException { ensureOpen(); HttpPost m = new HttpPost(queryString(request, params)); m.setEntity(entity); @@ -150,10 +156,10 @@ public abstract class HttpClientBase implements Closeable { verifyStatus(response); return response; } - + /** - * internal: execute a request and return its result - * The params argument is treated as: name1,value1,name2,value2,... + * internal: execute a request and return its result The params argument is + * treated as: name1,value1,name2,value2,... */ protected HttpResponse executeGET(String request, String... params) throws IOException { ensureOpen(); @@ -162,28 +168,33 @@ public abstract class HttpClientBase implements Closeable { verifyStatus(response); return response; } - + private String queryString(String request, String... params) throws UnsupportedEncodingException { StringBuilder query = new StringBuilder(url).append('/').append(request).append('?'); if (params != null) { for (int i = 0; i < params.length; i += 2) { - query.append(params[i]).append('=').append(URLEncoder.encode(params[i+1], "UTF8")).append('&'); + query + .append(params[i]) + .append('=') + .append(URLEncoder.encode(params[i + 1], "UTF8")) + .append('&'); } } return query.substring(0, query.length() - 1); } - + /** Internal utility: input stream of the provided response */ public InputStream responseInputStream(HttpResponse response) throws IOException { return responseInputStream(response, false); } - + // TODO: can we simplify this Consuming !?!?!? /** - * Internal utility: input stream of the provided response, which optionally - * consumes the response's resources when the input stream is exhausted. + * Internal utility: input stream of the provided response, which optionally consumes the + * response's resources when the input stream is exhausted. */ - public InputStream responseInputStream(HttpResponse response, boolean consume) throws IOException { + public InputStream responseInputStream(HttpResponse response, boolean consume) + throws IOException { final HttpEntity entity = response.getEntity(); final InputStream in = entity.getContent(); if (!consume) { @@ -191,29 +202,34 @@ public abstract class HttpClientBase implements Closeable { } return new InputStream() { private boolean consumed = false; + @Override public int read() throws IOException { final int res = in.read(); consume(res); return res; } + @Override public void close() throws IOException { in.close(); consume(-1); } + @Override public int read(byte[] b) throws IOException { final int res = in.read(b); consume(res); return res; } + @Override public int read(byte[] b, int off, int len) throws IOException { final int res = in.read(b, off, len); consume(res); return res; } + private void consume(int minusOne) { if (!consumed && minusOne == -1) { try { @@ -226,29 +242,30 @@ public abstract class HttpClientBase implements Closeable { } }; } - + /** - * Returns true iff this instance was {@link #close() closed}, otherwise - * returns false. Note that if you override {@link #close()}, you must call - * {@code super.close()}, in order for this instance to be properly closed. + * Returns true iff this instance was {@link #close() closed}, otherwise returns false. Note that + * if you override {@link #close()}, you must call {@code super.close()}, in order for this + * instance to be properly closed. */ protected final boolean isClosed() { return closed; } - + /** * Same as {@link #doAction(HttpResponse, boolean, Callable)} but always do consume at the end. */ protected T doAction(HttpResponse response, Callable call) throws IOException { return doAction(response, true, call); } - + /** - * Do a specific action and validate after the action that the status is still OK, - * and if not, attempt to extract the actual server side exception. Optionally - * release the response at exit, depending on consume parameter. + * Do a specific action and validate after the action that the status is still OK, and if not, + * attempt to extract the actual server side exception. Optionally release the response at exit, + * depending on consume parameter. */ - protected T doAction(HttpResponse response, boolean consume, Callable call) throws IOException { + protected T doAction(HttpResponse response, boolean consume, Callable call) + throws IOException { Throwable th = null; try { return call.call(); @@ -263,13 +280,12 @@ public abstract class HttpClientBase implements Closeable { } } } - throw IOUtils.rethrowAlways(th); + throw IOUtils.rethrowAlways(th); } - + @Override public void close() throws IOException { httpc.close(); closed = true; } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpReplicator.java b/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpReplicator.java index 95323a1f3e9..dd12172d983 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpReplicator.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpReplicator.java @@ -20,7 +20,6 @@ import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.util.concurrent.Callable; - import org.apache.http.HttpResponse; import org.apache.http.conn.HttpClientConnectionManager; import org.apache.lucene.replicator.Replicator; @@ -29,76 +28,82 @@ import org.apache.lucene.replicator.SessionToken; import org.apache.lucene.replicator.http.ReplicationService.ReplicationAction; /** - * An HTTP implementation of {@link Replicator}. Assumes the API supported by - * {@link ReplicationService}. - * + * An HTTP implementation of {@link Replicator}. Assumes the API supported by {@link + * ReplicationService}. + * * @lucene.experimental */ public class HttpReplicator extends HttpClientBase implements Replicator { - + /** Construct with specified connection manager. */ public HttpReplicator(String host, int port, String path, HttpClientConnectionManager conMgr) { super(host, port, path, conMgr, null); } - + @Override public SessionToken checkForUpdate(String currVersion) throws IOException { String[] params = null; if (currVersion != null) { - params = new String[] { ReplicationService.REPLICATE_VERSION_PARAM, currVersion }; + params = new String[] {ReplicationService.REPLICATE_VERSION_PARAM, currVersion}; } final HttpResponse response = executeGET(ReplicationAction.UPDATE.name(), params); - return doAction(response, new Callable() { - @Override - public SessionToken call() throws Exception { - final DataInputStream dis = new DataInputStream(responseInputStream(response)); - try { - if (dis.readByte() == 0) { - return null; - } else { - return new SessionToken(dis); + return doAction( + response, + new Callable() { + @Override + public SessionToken call() throws Exception { + final DataInputStream dis = new DataInputStream(responseInputStream(response)); + try { + if (dis.readByte() == 0) { + return null; + } else { + return new SessionToken(dis); + } + } finally { + dis.close(); + } } - } finally { - dis.close(); - } - } - }); + }); } - + @Override - public InputStream obtainFile(String sessionID, String source, String fileName) throws IOException { - String[] params = new String[] { - ReplicationService.REPLICATE_SESSION_ID_PARAM, sessionID, - ReplicationService.REPLICATE_SOURCE_PARAM, source, - ReplicationService.REPLICATE_FILENAME_PARAM, fileName, - }; + public InputStream obtainFile(String sessionID, String source, String fileName) + throws IOException { + String[] params = + new String[] { + ReplicationService.REPLICATE_SESSION_ID_PARAM, sessionID, + ReplicationService.REPLICATE_SOURCE_PARAM, source, + ReplicationService.REPLICATE_FILENAME_PARAM, fileName, + }; final HttpResponse response = executeGET(ReplicationAction.OBTAIN.name(), params); - return doAction(response, false, new Callable() { - @Override - public InputStream call() throws Exception { - return responseInputStream(response, true); - } - }); + return doAction( + response, + false, + new Callable() { + @Override + public InputStream call() throws Exception { + return responseInputStream(response, true); + } + }); } - + @Override public void publish(Revision revision) throws IOException { throw new UnsupportedOperationException( "this replicator implementation does not support remote publishing of revisions"); } - + @Override public void release(String sessionID) throws IOException { - String[] params = new String[] { - ReplicationService.REPLICATE_SESSION_ID_PARAM, sessionID - }; + String[] params = new String[] {ReplicationService.REPLICATE_SESSION_ID_PARAM, sessionID}; final HttpResponse response = executeGET(ReplicationAction.RELEASE.name(), params); - doAction(response, new Callable() { - @Override - public Object call() throws Exception { - return null; // do not remove this call: as it is still validating for us! - } - }); + doAction( + response, + new Callable() { + @Override + public Object call() throws Exception { + return null; // do not remove this call: as it is still validating for us! + } + }); } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java b/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java index 880a18e89a8..7f9371a59f6 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java @@ -25,70 +25,70 @@ import java.util.ArrayList; import java.util.Locale; import java.util.Map; import java.util.StringTokenizer; - import javax.servlet.ServletException; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.http.HttpStatus; import org.apache.lucene.replicator.Replicator; import org.apache.lucene.replicator.SessionToken; import org.apache.lucene.util.SuppressForbidden; /** - * A server-side service for handling replication requests. The service assumes - * requests are sent in the format - * /<context>/<shard>/<action> where + * A server-side service for handling replication requests. The service assumes requests are sent in + * the format /<context>/<shard>/<action> where + * *
      - *
    • {@code context} is the servlet context, e.g. {@link #REPLICATION_CONTEXT} - *
    • {@code shard} is the ID of the shard, e.g. "s1" - *
    • {@code action} is one of {@link ReplicationAction} values + *
    • {@code context} is the servlet context, e.g. {@link #REPLICATION_CONTEXT} + *
    • {@code shard} is the ID of the shard, e.g. "s1" + *
    • {@code action} is one of {@link ReplicationAction} values *
    - * For example, to check whether there are revision updates for shard "s1" you - * should send the request: http://host:port/replicate/s1/update. - *

    - * This service is written like a servlet, and - * {@link #perform(HttpServletRequest, HttpServletResponse)} takes servlet - * request and response accordingly, so it is quite easy to embed in your - * application's servlet. - * + * + * For example, to check whether there are revision updates for shard "s1" you should send the + * request: http://host:port/replicate/s1/update. + * + *

    This service is written like a servlet, and {@link #perform(HttpServletRequest, + * HttpServletResponse)} takes servlet request and response accordingly, so it is quite easy to + * embed in your application's servlet. + * * @lucene.experimental */ public class ReplicationService { - + /** Actions supported by the {@link ReplicationService}. */ public enum ReplicationAction { - OBTAIN, RELEASE, UPDATE + OBTAIN, + RELEASE, + UPDATE } - + /** The context path for the servlet. */ public static final String REPLICATION_CONTEXT = "/replicate"; - + /** Request parameter name for providing the revision version. */ - public final static String REPLICATE_VERSION_PARAM = "version"; - + public static final String REPLICATE_VERSION_PARAM = "version"; + /** Request parameter name for providing a session ID. */ - public final static String REPLICATE_SESSION_ID_PARAM = "sessionid"; - + public static final String REPLICATE_SESSION_ID_PARAM = "sessionid"; + /** Request parameter name for providing the file's source. */ - public final static String REPLICATE_SOURCE_PARAM = "source"; - + public static final String REPLICATE_SOURCE_PARAM = "source"; + /** Request parameter name for providing the file's name. */ - public final static String REPLICATE_FILENAME_PARAM = "filename"; - + public static final String REPLICATE_FILENAME_PARAM = "filename"; + private static final int SHARD_IDX = 0, ACTION_IDX = 1; - - private final Map replicators; - - public ReplicationService(Map replicators) { + + private final Map replicators; + + public ReplicationService(Map replicators) { super(); this.replicators = replicators; } - + /** - * Returns the path elements that were given in the servlet request, excluding - * the servlet's action context. + * Returns the path elements that were given in the servlet request, excluding the servlet's + * action context. */ private String[] getPathElements(HttpServletRequest req) { String path = req.getServletPath(); @@ -101,7 +101,7 @@ public class ReplicationService { if (path.length() > actionLen && path.charAt(actionLen) == '/') { ++startIdx; } - + // split the string on '/' and remove any empty elements. This is better // than using String.split() since the latter may return empty elements in // the array @@ -112,15 +112,16 @@ public class ReplicationService { } return elements.toArray(new String[0]); } - - private static String extractRequestParam(HttpServletRequest req, String paramName) throws ServletException { + + private static String extractRequestParam(HttpServletRequest req, String paramName) + throws ServletException { String param = req.getParameter(paramName); if (param == null) { throw new ServletException("Missing mandatory parameter: " + paramName); } return param; } - + private static void copy(InputStream in, OutputStream out) throws IOException { byte[] buf = new byte[16384]; int numRead; @@ -128,28 +129,30 @@ public class ReplicationService { out.write(buf, 0, numRead); } } - + /** Executes the replication task. */ @SuppressForbidden(reason = "XXX: security hole") - public void perform(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { + public void perform(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { String[] pathElements = getPathElements(req); - + if (pathElements.length != 2) { - throw new ServletException("invalid path, must contain shard ID and action, e.g. */s1/update"); + throw new ServletException( + "invalid path, must contain shard ID and action, e.g. */s1/update"); } - + final ReplicationAction action; try { action = ReplicationAction.valueOf(pathElements[ACTION_IDX].toUpperCase(Locale.ENGLISH)); } catch (IllegalArgumentException e) { throw new ServletException("Unsupported action provided: " + pathElements[ACTION_IDX]); } - + final Replicator replicator = replicators.get(pathElements[SHARD_IDX]); if (replicator == null) { throw new ServletException("unrecognized shard ID " + pathElements[SHARD_IDX]); } - + // SOLR-8933 Don't close this stream. ServletOutputStream resOut = resp.getOutputStream(); try { @@ -196,5 +199,4 @@ public class ReplicationService { resp.flushBuffer(); } } - } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/package-info.java b/lucene/replicator/src/java/org/apache/lucene/replicator/http/package-info.java index b4769c2d8cb..fc19fd5725a 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/http/package-info.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/package-info.java @@ -14,8 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * HTTP replication implementation - */ -package org.apache.lucene.replicator.http; \ No newline at end of file + +/** HTTP replication implementation */ +package org.apache.lucene.replicator.http; diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyJob.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyJob.java index 74e7c88b79d..e87e060a31d 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyJob.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyJob.java @@ -25,19 +25,20 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; - import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.util.IOUtils; -/** Handles copying one set of files, e.g. all files for a new NRT point, or files for pre-copying a merged segment. - * This notifies the caller via OnceDone when the job finishes or failed. +/** + * Handles copying one set of files, e.g. all files for a new NRT point, or files for pre-copying a + * merged segment. This notifies the caller via OnceDone when the job finishes or failed. * - * @lucene.experimental */ + * @lucene.experimental + */ public abstract class CopyJob implements Comparable { - private final static AtomicLong counter = new AtomicLong(); + private static final AtomicLong counter = new AtomicLong(); protected final ReplicaNode dest; - protected final Map files; + protected final Map files; public final long ord = counter.incrementAndGet(); @@ -50,7 +51,7 @@ public abstract class CopyJob implements Comparable { public final String reason; - protected final List> toCopy; + protected final List> toCopy; protected long totBytes; @@ -64,9 +65,15 @@ public abstract class CopyJob implements Comparable { protected volatile String cancelReason; // toString may concurrently access this: - protected final Map copiedFiles = new ConcurrentHashMap<>(); + protected final Map copiedFiles = new ConcurrentHashMap<>(); - protected CopyJob(String reason, Map files, ReplicaNode dest, boolean highPriority, OnceDone onceDone) throws IOException { + protected CopyJob( + String reason, + Map files, + ReplicaNode dest, + boolean highPriority, + OnceDone onceDone) + throws IOException { this.reason = reason; this.files = files; this.dest = dest; @@ -87,9 +94,12 @@ public abstract class CopyJob implements Comparable { public void run(CopyJob job) throws IOException; } - /** Transfers whatever tmp files were already copied in this previous job and cancels the previous job */ + /** + * Transfers whatever tmp files were already copied in this previous job and cancels the previous + * job + */ public synchronized void transferAndCancel(CopyJob prevJob) throws IOException { - synchronized(prevJob) { + synchronized (prevJob) { dest.message("CopyJob: now transfer prevJob " + prevJob); try { _transferAndCancel(prevJob); @@ -116,19 +126,27 @@ public abstract class CopyJob implements Comparable { prevJob.exc = new Throwable(); // Carry over already copied files that we also want to copy - Iterator> it = toCopy.iterator(); + Iterator> it = toCopy.iterator(); long bytesAlreadyCopied = 0; // Iterate over all files we think we need to copy: while (it.hasNext()) { - Map.Entry ent = it.next(); + Map.Entry ent = it.next(); String fileName = ent.getKey(); String prevTmpFileName = prevJob.copiedFiles.get(fileName); if (prevTmpFileName != null) { - // This fileName is common to both jobs, and the old job already finished copying it (to a temp file), so we keep it: + // This fileName is common to both jobs, and the old job already finished copying it (to a + // temp file), so we keep it: long fileLength = ent.getValue().length; bytesAlreadyCopied += fileLength; - dest.message("xfer: carry over already-copied file " + fileName + " (" + prevTmpFileName + ", " + fileLength + " bytes)"); + dest.message( + "xfer: carry over already-copied file " + + fileName + + " (" + + prevTmpFileName + + ", " + + fileLength + + " bytes)"); copiedFiles.put(fileName, prevTmpFileName); // So we don't try to delete it, below: @@ -137,20 +155,34 @@ public abstract class CopyJob implements Comparable { // So it's not in our copy list anymore: it.remove(); } else if (prevJob.current != null && prevJob.current.name.equals(fileName)) { - // This fileName is common to both jobs, and it's the file that the previous job was in the process of copying. In this case - // we continue copying it from the prevoius job. This is important for cases where we are copying over a large file - // because otherwise we could keep failing the NRT copy and restarting this file from the beginning and never catch up: - dest.message("xfer: carry over in-progress file " + fileName + " (" + prevJob.current.tmpName + ") bytesCopied=" + prevJob.current.getBytesCopied() + " of " + prevJob.current.bytesToCopy); + // This fileName is common to both jobs, and it's the file that the previous job was in the + // process of copying. In this case + // we continue copying it from the prevoius job. This is important for cases where we are + // copying over a large file + // because otherwise we could keep failing the NRT copy and restarting this file from the + // beginning and never catch up: + dest.message( + "xfer: carry over in-progress file " + + fileName + + " (" + + prevJob.current.tmpName + + ") bytesCopied=" + + prevJob.current.getBytesCopied() + + " of " + + prevJob.current.bytesToCopy); bytesAlreadyCopied += prevJob.current.getBytesCopied(); assert current == null; - // must set current first, before writing/read to c.in/out in case that hits an exception, so that we then close the temp + // must set current first, before writing/read to c.in/out in case that hits an exception, + // so that we then close the temp // IndexOutput when cancelling ourselves: current = newCopyOneFile(prevJob.current); - // Tell our new (primary) connection we'd like to copy this file first, but resuming from how many bytes we already copied last time: - // We do this even if bytesToCopy == bytesCopied, because we still need to readLong() the checksum from the primary connection: + // Tell our new (primary) connection we'd like to copy this file first, but resuming from + // how many bytes we already copied last time: + // We do this even if bytesToCopy == bytesCopied, because we still need to readLong() the + // checksum from the primary connection: assert prevJob.current.getBytesCopied() <= prevJob.current.bytesToCopy; prevJob.current = null; @@ -169,7 +201,7 @@ public abstract class CopyJob implements Comparable { dest.message("xfer: now delete old temp files: " + prevJob.copiedFiles.values()); IOUtils.deleteFilesIgnoringExceptions(dest.dir, prevJob.copiedFiles.values()); - if (prevJob.current != null) { + if (prevJob.current != null) { IOUtils.closeWhileHandlingException(prevJob.current); if (Node.VERBOSE_FILES) { dest.message("remove partial file " + prevJob.current.tmpName); @@ -184,7 +216,10 @@ public abstract class CopyJob implements Comparable { /** Begin copying files */ public abstract void start() throws IOException; - /** Use current thread (blocking) to do all copying and then return once done, or throw exception on failure */ + /** + * Use current thread (blocking) to do all copying and then return once done, or throw exception + * on failure + */ public abstract void runBlocking() throws Exception; public void cancel(String reason, Throwable exc) throws IOException { @@ -193,10 +228,14 @@ public abstract class CopyJob implements Comparable { return; } - dest.message(String.format(Locale.ROOT, "top: cancel after copying %s; exc=%s:\n files=%s\n copiedFiles=%s", - Node.bytesToString(totBytesCopied), - exc, - files == null ? "null" : files.keySet(), copiedFiles.keySet())); + dest.message( + String.format( + Locale.ROOT, + "top: cancel after copying %s; exc=%s:\n files=%s\n copiedFiles=%s", + Node.bytesToString(totBytesCopied), + exc, + files == null ? "null" : files.keySet(), + copiedFiles.keySet())); if (exc == null) { exc = new Throwable(); @@ -208,7 +247,7 @@ public abstract class CopyJob implements Comparable { // Delete all temp files we wrote: IOUtils.deleteFilesIgnoringExceptions(dest.dir, copiedFiles.values()); - if (current != null) { + if (current != null) { IOUtils.closeWhileHandlingException(current); if (Node.VERBOSE_FILES) { dest.message("remove partial file " + current.tmpName); diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyOneFile.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyOneFile.java index 7db296b0036..cf5a05ae102 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyOneFile.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyOneFile.java @@ -20,13 +20,11 @@ package org.apache.lucene.replicator.nrt; import java.io.Closeable; import java.io.IOException; import java.util.Locale; - import org.apache.lucene.store.DataInput; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; /** Copies one file from an incoming DataInput to a dest filename in a local Directory */ - public class CopyOneFile implements Closeable { private final DataInput in; private final IndexOutput out; @@ -40,7 +38,9 @@ public class CopyOneFile implements Closeable { private long bytesCopied; - public CopyOneFile(DataInput in, ReplicaNode dest, String name, FileMetaData metaData, byte[] buffer) throws IOException { + public CopyOneFile( + DataInput in, ReplicaNode dest, String name, FileMetaData metaData, byte[] buffer) + throws IOException { this.in = in; this.name = name; this.dest = dest; @@ -49,11 +49,18 @@ public class CopyOneFile implements Closeable { out = dest.createTempOutput(name, "copy", IOContext.DEFAULT); tmpName = out.getName(); - // last 8 bytes are checksum, which we write ourselves after copying all bytes and confirming checksum: + // last 8 bytes are checksum, which we write ourselves after copying all bytes and confirming + // checksum: bytesToCopy = metaData.length - Long.BYTES; if (Node.VERBOSE_FILES) { - dest.message("file " + name + ": start copying to tmp file " + tmpName + " length=" + (8+bytesToCopy)); + dest.message( + "file " + + name + + ": start copying to tmp file " + + tmpName + + " length=" + + (8 + bytesToCopy)); } copyStartNS = System.nanoTime(); @@ -83,21 +90,35 @@ public class CopyOneFile implements Closeable { /** Copy another chunk of bytes, returning true once the copy is done */ public boolean visit() throws IOException { // Copy up to 640 KB per visit: - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { long bytesLeft = bytesToCopy - bytesCopied; if (bytesLeft == 0) { long checksum = out.getChecksum(); if (checksum != metaData.checksum) { // Bits flipped during copy! - dest.message("file " + tmpName + ": checksum mismatch after copy (bits flipped during network copy?) after-copy checksum=" + checksum + " vs expected=" + metaData.checksum + "; cancel job"); + dest.message( + "file " + + tmpName + + ": checksum mismatch after copy (bits flipped during network copy?) after-copy checksum=" + + checksum + + " vs expected=" + + metaData.checksum + + "; cancel job"); throw new IOException("file " + name + ": checksum mismatch after file copy"); } - // Paranoia: make sure the primary node is not smoking crack, by somehow sending us an already corrupted file whose checksum (in its + // Paranoia: make sure the primary node is not smoking crack, by somehow sending us an + // already corrupted file whose checksum (in its // footer) disagrees with reality: long actualChecksumIn = in.readLong(); if (actualChecksumIn != checksum) { - dest.message("file " + tmpName + ": checksum claimed by primary disagrees with the file's footer: claimed checksum=" + checksum + " vs actual=" + actualChecksumIn); + dest.message( + "file " + + tmpName + + ": checksum claimed by primary disagrees with the file's footer: claimed checksum=" + + checksum + + " vs actual=" + + actualChecksumIn); throw new IOException("file " + name + ": checksum mismatch after file copy"); } out.writeLong(checksum); @@ -105,10 +126,13 @@ public class CopyOneFile implements Closeable { close(); if (Node.VERBOSE_FILES) { - dest.message(String.format(Locale.ROOT, "file %s: done copying [%s, %.3fms]", - name, - Node.bytesToString(metaData.length), - (System.nanoTime() - copyStartNS)/1000000.0)); + dest.message( + String.format( + Locale.ROOT, + "file %s: done copying [%s, %.3fms]", + name, + Node.bytesToString(metaData.length), + (System.nanoTime() - copyStartNS) / 1000000.0)); } return true; @@ -118,7 +142,8 @@ public class CopyOneFile implements Closeable { in.readBytes(buffer, 0, toCopy); out.writeBytes(buffer, 0, toCopy); - // TODO: rsync will fsync a range of the file; maybe we should do that here for large files in case we crash/killed + // TODO: rsync will fsync a range of the file; maybe we should do that here for large files in + // case we crash/killed bytesCopied += toCopy; } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyState.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyState.java index aff45c0ae63..c4ef3135c38 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyState.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/CopyState.java @@ -20,26 +20,33 @@ package org.apache.lucene.replicator.nrt; import java.util.Collections; import java.util.Map; import java.util.Set; - import org.apache.lucene.index.SegmentInfos; -/** Holds incRef'd file level details for one point-in-time segment infos on the primary node. +/** + * Holds incRef'd file level details for one point-in-time segment infos on the primary node. * - * @lucene.experimental */ + * @lucene.experimental + */ public class CopyState { - public final Map files; + public final Map files; public final long version; public final long gen; public final byte[] infosBytes; public final Set completedMergeFiles; public final long primaryGen; - + // only non-null on the primary node public final SegmentInfos infos; - public CopyState(Map files, long version, long gen, byte[] infosBytes, - Set completedMergeFiles, long primaryGen, SegmentInfos infos) { + public CopyState( + Map files, + long version, + long gen, + byte[] infosBytes, + Set completedMergeFiles, + long primaryGen, + SegmentInfos infos) { assert completedMergeFiles != null; this.files = Collections.unmodifiableMap(files); this.version = version; diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/FileMetaData.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/FileMetaData.java index 5d63a8ca039..d7eeebf6646 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/FileMetaData.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/FileMetaData.java @@ -17,13 +17,16 @@ package org.apache.lucene.replicator.nrt; -/** Holds metadata details about a single file that we use to confirm two files (one remote, one local) are in fact "identical". +/** + * Holds metadata details about a single file that we use to confirm two files (one remote, one + * local) are in fact "identical". * - * @lucene.experimental */ - + * @lucene.experimental + */ public class FileMetaData { - // Header and footer of the file must be identical between primary and replica to consider the files equal: + // Header and footer of the file must be identical between primary and replica to consider the + // files equal: public final byte[] header; public final byte[] footer; @@ -44,4 +47,3 @@ public class FileMetaData { return "FileMetaData(length=" + length + ")"; } } - diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/Node.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/Node.java index aa66d9fafe2..660d1e87849 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/Node.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/Node.java @@ -25,7 +25,6 @@ import java.io.PrintStream; import java.nio.file.NoSuchFileException; import java.util.Locale; import java.util.Map; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; @@ -36,10 +35,11 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -/** Common base class for {@link PrimaryNode} and {@link ReplicaNode}. +/** + * Common base class for {@link PrimaryNode} and {@link ReplicaNode}. * - * @lucene.experimental */ - + * @lucene.experimental + */ public abstract class Node implements Closeable { public static boolean VERBOSE_FILES = true; @@ -47,11 +47,16 @@ public abstract class Node implements Closeable { // Keys we store into IndexWriter's commit user data: - /** Key to store the primary gen in the commit data, which increments every time we promote a new primary, so replicas can detect when the - * primary they were talking to is changed */ + /** + * Key to store the primary gen in the commit data, which increments every time we promote a new + * primary, so replicas can detect when the primary they were talking to is changed + */ public static String PRIMARY_GEN_KEY = "__primaryGen"; - /** Key to store the version in the commit data, which increments every time we open a new NRT reader */ + /** + * Key to store the version in the commit data, which increments every time we open a new NRT + * reader + */ public static String VERSION_KEY = "__version"; /** Compact ordinal for this node */ @@ -60,11 +65,15 @@ public abstract class Node implements Closeable { protected final Directory dir; protected final SearcherFactory searcherFactory; - - // Tracks NRT readers, opened from IW (primary) or opened from replicated SegmentInfos pulled across the wire (replica): + + // Tracks NRT readers, opened from IW (primary) or opened from replicated SegmentInfos pulled + // across the wire (replica): protected ReferenceManager mgr; - /** Startup time of original test, carefully propogated to all nodes to produce consistent "seconds since start time" in messages */ + /** + * Startup time of original test, carefully propogated to all nodes to produce consistent "seconds + * since start time" in messages + */ public static long globalStartNS; /** When this node was started */ @@ -79,7 +88,7 @@ public abstract class Node implements Closeable { volatile String state = "idle"; /** File metadata for last sync that succeeded; we use this as a cache */ - protected volatile Map lastFileMetaData; + protected volatile Map lastFileMetaData; public Node(int id, Directory dir, SearcherFactory searcherFactory, PrintStream printStream) { this.id = id; @@ -108,37 +117,45 @@ public abstract class Node implements Closeable { public static void nodeMessage(PrintStream printStream, String message) { if (printStream != null) { long now = System.nanoTime(); - printStream.println(String.format(Locale.ROOT, - "%5.3fs %5.1fs: [%11s] %s", - (now-globalStartNS)/1000000000., - (now-localStartNS)/1000000000., - Thread.currentThread().getName(), - message)); + printStream.println( + String.format( + Locale.ROOT, + "%5.3fs %5.1fs: [%11s] %s", + (now - globalStartNS) / 1000000000., + (now - localStartNS) / 1000000000., + Thread.currentThread().getName(), + message)); } } public static void nodeMessage(PrintStream printStream, int id, String message) { if (printStream != null) { long now = System.nanoTime(); - printStream.println(String.format(Locale.ROOT, - "%5.3fs %5.1fs: N%d [%11s] %s", - (now-globalStartNS)/1000000000., - (now-localStartNS)/1000000000., - id, - Thread.currentThread().getName(), - message)); + printStream.println( + String.format( + Locale.ROOT, + "%5.3fs %5.1fs: N%d [%11s] %s", + (now - globalStartNS) / 1000000000., + (now - localStartNS) / 1000000000., + id, + Thread.currentThread().getName(), + message)); } } public void message(String message) { if (printStream != null) { long now = System.nanoTime(); - printStream.println(String.format(Locale.ROOT, - "%5.3fs %5.1fs: %7s %2s [%11s] %s", - (now-globalStartNS)/1000000000., - (now-localStartNS)/1000000000., - state, name(), - Thread.currentThread().getName(), message)); + printStream.println( + String.format( + Locale.ROOT, + "%5.3fs %5.1fs: %7s %2s [%11s] %s", + (now - globalStartNS) / 1000000000., + (now - localStartNS) / 1000000000., + state, + name(), + Thread.currentThread().getName(), + message)); } } @@ -162,21 +179,25 @@ public abstract class Node implements Closeable { if (bytes < 1024) { return bytes + " b"; } else if (bytes < 1024 * 1024) { - return String.format(Locale.ROOT, "%.1f KB", bytes/1024.); + return String.format(Locale.ROOT, "%.1f KB", bytes / 1024.); } else if (bytes < 1024 * 1024 * 1024) { - return String.format(Locale.ROOT, "%.1f MB", bytes/1024./1024.); + return String.format(Locale.ROOT, "%.1f MB", bytes / 1024. / 1024.); } else { - return String.format(Locale.ROOT, "%.1f GB", bytes/1024./1024./1024.); + return String.format(Locale.ROOT, "%.1f GB", bytes / 1024. / 1024. / 1024.); } } - /** Opens the specified file, reads its identifying information, including file length, full index header (includes the unique segment - * ID) and the full footer (includes checksum), and returns the resulting {@link FileMetaData}. + /** + * Opens the specified file, reads its identifying information, including file length, full index + * header (includes the unique segment ID) and the full footer (includes checksum), and returns + * the resulting {@link FileMetaData}. * - *

    This returns null, logging a message, if there are any problems (the file does not exist, is corrupt, truncated, etc.).

    */ + *

    This returns null, logging a message, if there are any problems (the file does not exist, is + * corrupt, truncated, etc.). + */ public FileMetaData readLocalFileMetaData(String fileName) throws IOException { - Map cache = lastFileMetaData; + Map cache = lastFileMetaData; FileMetaData result; if (cache != null) { // We may already have this file cached from the last NRT point: @@ -192,30 +213,33 @@ public abstract class Node implements Closeable { byte[] header; byte[] footer; try (IndexInput in = dir.openInput(fileName, IOContext.DEFAULT)) { - try { - length = in.length(); - header = CodecUtil.readIndexHeader(in); - footer = CodecUtil.readFooter(in); - checksum = CodecUtil.retrieveChecksum(in); - } catch (EOFException | CorruptIndexException cie) { - // File exists but is busted: we must copy it. This happens when node had crashed, corrupting an un-fsync'd file. On init we try - // to delete such unreferenced files, but virus checker can block that, leaving this bad file. - if (VERBOSE_FILES) { - message("file " + fileName + ": will copy [existing file is corrupt]"); - } - return null; - } + try { + length = in.length(); + header = CodecUtil.readIndexHeader(in); + footer = CodecUtil.readFooter(in); + checksum = CodecUtil.retrieveChecksum(in); + } catch (EOFException | CorruptIndexException cie) { + // File exists but is busted: we must copy it. This happens when node had crashed, + // corrupting an un-fsync'd file. On init we try + // to delete such unreferenced files, but virus checker can block that, leaving this bad + // file. if (VERBOSE_FILES) { - message("file " + fileName + " has length=" + bytesToString(length)); + message("file " + fileName + ": will copy [existing file is corrupt]"); } - } catch (FileNotFoundException | NoSuchFileException e) { + return null; + } + if (VERBOSE_FILES) { + message("file " + fileName + " has length=" + bytesToString(length)); + } + } catch (FileNotFoundException | NoSuchFileException e) { if (VERBOSE_FILES) { message("file " + fileName + ": will copy [file does not exist]"); } return null; } - // NOTE: checksum is redundant w/ footer, but we break it out separately because when the bits cross the wire we need direct access to + // NOTE: checksum is redundant w/ footer, but we break it out separately because when the bits + // cross the wire we need direct access to // checksum when copying to catch bit flips: result = new FileMetaData(header, footer, length, checksum); } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/NodeCommunicationException.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/NodeCommunicationException.java index cbbb65c2487..c46d2c27a23 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/NodeCommunicationException.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/NodeCommunicationException.java @@ -18,8 +18,8 @@ package org.apache.lucene.replicator.nrt; /** - * Should be thrown by subclasses of {@link PrimaryNode} and {@link ReplicaNode} if a non-fatal exception - * occurred while communicating between nodes. + * Should be thrown by subclasses of {@link PrimaryNode} and {@link ReplicaNode} if a non-fatal + * exception occurred while communicating between nodes. * * @lucene.experimental */ diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/PreCopyMergedSegmentWarmer.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/PreCopyMergedSegmentWarmer.java index 15b9474ad7a..183448e7efe 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/PreCopyMergedSegmentWarmer.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/PreCopyMergedSegmentWarmer.java @@ -17,22 +17,22 @@ package org.apache.lucene.replicator.nrt; -/** A merged segment warmer that pre-copies the merged segment out to - * replicas before primary cuts over to the merged segment. This - * ensures that NRT reopen time on replicas is only in proportion to - * flushed segment sizes, not merged segments. */ - +/** + * A merged segment warmer that pre-copies the merged segment out to replicas before primary cuts + * over to the merged segment. This ensures that NRT reopen time on replicas is only in proportion + * to flushed segment sizes, not merged segments. + */ import java.io.IOException; import java.util.HashMap; import java.util.Locale; import java.util.Map; - import org.apache.lucene.index.IndexWriter.IndexReaderWarmer; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentReader; -// TODO: or ... replica node can do merging locally? tricky to keep things in sync, when one node merges more slowly than others... +// TODO: or ... replica node can do merging locally? tricky to keep things in sync, when one node +// merges more slowly than others... class PreCopyMergedSegmentWarmer implements IndexReaderWarmer { @@ -46,8 +46,8 @@ class PreCopyMergedSegmentWarmer implements IndexReaderWarmer { public void warm(LeafReader reader) throws IOException { long startNS = System.nanoTime(); final SegmentCommitInfo info = ((SegmentReader) reader).getSegmentInfo(); - //System.out.println("TEST: warm merged segment files " + info); - Map filesMetaData = new HashMap<>(); + // System.out.println("TEST: warm merged segment files " + info); + Map filesMetaData = new HashMap<>(); for (String fileName : info.files()) { FileMetaData metaData = primary.readLocalFileMetaData(fileName); assert metaData != null; @@ -56,7 +56,12 @@ class PreCopyMergedSegmentWarmer implements IndexReaderWarmer { } primary.preCopyMergedSegmentFiles(info, filesMetaData); - primary.message(String.format(Locale.ROOT, "top: done warm merge " + info + ": took %.3f sec, %.1f MB", (System.nanoTime()-startNS)/1000000000., info.sizeInBytes()/1024/1024.)); + primary.message( + String.format( + Locale.ROOT, + "top: done warm merge " + info + ": took %.3f sec, %.1f MB", + (System.nanoTime() - startNS) / 1000000000., + info.sizeInBytes() / 1024 / 1024.)); primary.finishedMergedFiles.addAll(filesMetaData.keySet()); } } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/PrimaryNode.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/PrimaryNode.java index 2d24f9bc4d4..8c6ac926e45 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/PrimaryNode.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/PrimaryNode.java @@ -25,7 +25,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; @@ -45,10 +44,10 @@ import org.apache.lucene.util.ThreadInterruptedException; * replicas since step 2) could otherwise be done concurrently with replicas copying files over. */ -/** +/** * Node that holds an IndexWriter, indexing documents into its local index. * - * @lucene.experimental + * @lucene.experimental */ public abstract class PrimaryNode extends Node { @@ -57,26 +56,38 @@ public abstract class PrimaryNode extends Node { protected final IndexWriter writer; - // IncRef'd state of the last published NRT point; when a replica comes asking, we give it this as the current NRT point: + // IncRef'd state of the last published NRT point; when a replica comes asking, we give it this as + // the current NRT point: private CopyState copyState; protected final long primaryGen; - /** Contains merged segments that have been copied to all running replicas (as of when that merge started warming). */ + /** + * Contains merged segments that have been copied to all running replicas (as of when that merge + * started warming). + */ final Set finishedMergedFiles = Collections.synchronizedSet(new HashSet()); private final AtomicInteger copyingCount = new AtomicInteger(); - public PrimaryNode(IndexWriter writer, int id, long primaryGen, long forcePrimaryVersion, - SearcherFactory searcherFactory, PrintStream printStream) throws IOException { + public PrimaryNode( + IndexWriter writer, + int id, + long primaryGen, + long forcePrimaryVersion, + SearcherFactory searcherFactory, + PrintStream printStream) + throws IOException { super(id, writer.getDirectory(), searcherFactory, printStream); message("top: now init primary"); this.writer = writer; this.primaryGen = primaryGen; try { - // So that when primary node's IndexWriter finishes a merge, but before it cuts over to the merged segment, - // it copies it out to the replicas. This ensures the whole system's NRT latency remains low even when a + // So that when primary node's IndexWriter finishes a merge, but before it cuts over to the + // merged segment, + // it copies it out to the replicas. This ensures the whole system's NRT latency remains low + // even when a // large merge completes: writer.getConfig().setMergedSegmentWarmer(new PreCopyMergedSegmentWarmer(this)); @@ -85,10 +96,10 @@ public abstract class PrimaryNode extends Node { message("commitData: " + writer.getLiveCommitData()); // Record our primaryGen in the userData, and set initial version to 0: - Map commitData = new HashMap<>(); - Iterable> iter = writer.getLiveCommitData(); + Map commitData = new HashMap<>(); + Iterable> iter = writer.getLiveCommitData(); if (iter != null) { - for(Map.Entry ent : iter) { + for (Map.Entry ent : iter) { commitData.put(ent.getKey(), ent.getValue()); } } @@ -101,8 +112,10 @@ public abstract class PrimaryNode extends Node { } writer.setLiveCommitData(commitData.entrySet(), false); - // We forcefully advance the SIS version to an unused future version. This is necessary if the previous primary crashed and we are - // starting up on an "older" index, else versions can be illegally reused but show different results: + // We forcefully advance the SIS version to an unused future version. This is necessary if + // the previous primary crashed and we are + // starting up on an "older" index, else versions can be illegally reused but show different + // results: if (forcePrimaryVersion != -1) { message("now forcePrimaryVersion to version=" + forcePrimaryVersion); writer.advanceSegmentInfosVersion(forcePrimaryVersion); @@ -119,23 +132,33 @@ public abstract class PrimaryNode extends Node { } } - /** Returns the current primary generation, which is incremented each time a new primary is started for this index */ + /** + * Returns the current primary generation, which is incremented each time a new primary is started + * for this index + */ public long getPrimaryGen() { return primaryGen; } - // TODO: in the future, we should separate "flush" (returns an incRef'd SegmentInfos) from "refresh" (open new NRT reader from - // IndexWriter) so that the latter can be done concurrently while copying files out to replicas, minimizing the refresh time from the - // replicas. But fixing this is tricky because e.g. IndexWriter may complete a big merge just after returning the incRef'd SegmentInfos - // and before we can open a new reader causing us to close the just-merged readers only to then open them again from the (now stale) - // SegmentInfos. To fix this "properly" I think IW.inc/decRefDeleter must also incread the ReaderPool entry + // TODO: in the future, we should separate "flush" (returns an incRef'd SegmentInfos) from + // "refresh" (open new NRT reader from + // IndexWriter) so that the latter can be done concurrently while copying files out to replicas, + // minimizing the refresh time from the + // replicas. But fixing this is tricky because e.g. IndexWriter may complete a big merge just + // after returning the incRef'd SegmentInfos + // and before we can open a new reader causing us to close the just-merged readers only to then + // open them again from the (now stale) + // SegmentInfos. To fix this "properly" I think IW.inc/decRefDeleter must also incread the + // ReaderPool entry - /** Flush all index operations to disk and opens a new near-real-time reader. - * new NRT point, to make the changes visible to searching. Returns true if there were changes. */ + /** + * Flush all index operations to disk and opens a new near-real-time reader. new NRT point, to + * make the changes visible to searching. Returns true if there were changes. + */ public boolean flushAndRefresh() throws IOException { message("top: now flushAndRefresh"); Set completedMergeFiles; - synchronized(finishedMergedFiles) { + synchronized (finishedMergedFiles) { completedMergeFiles = Set.copyOf(finishedMergedFiles); } mgr.maybeRefreshBlocking(); @@ -143,7 +166,13 @@ public abstract class PrimaryNode extends Node { if (result) { message("top: opened NRT reader version=" + curInfos.getVersion()); finishedMergedFiles.removeAll(completedMergeFiles); - message("flushAndRefresh: version=" + curInfos.getVersion() + " completedMergeFiles=" + completedMergeFiles + " finishedMergedFiles=" + finishedMergedFiles); + message( + "flushAndRefresh: version=" + + curInfos.getVersion() + + " completedMergeFiles=" + + completedMergeFiles + + " finishedMergedFiles=" + + finishedMergedFiles); } else { message("top: no changes in flushAndRefresh; still version=" + curInfos.getVersion()); } @@ -155,9 +184,9 @@ public abstract class PrimaryNode extends Node { } public synchronized long getLastCommitVersion() { - Iterable> iter = writer.getLiveCommitData(); + Iterable> iter = writer.getLiveCommitData(); assert iter != null; - for(Map.Entry ent : iter) { + for (Map.Entry ent : iter) { if (ent.getKey().equals(VERSION_KEY)) { return Long.parseLong(ent.getValue()); } @@ -169,9 +198,10 @@ public abstract class PrimaryNode extends Node { @Override public void commit() throws IOException { - Map commitData = new HashMap<>(); + Map commitData = new HashMap<>(); commitData.put(PRIMARY_GEN_KEY, Long.toString(primaryGen)); - // TODO (opto): it's a bit wasteful that we put "last refresh" version here, not the actual version we are committing, because it means + // TODO (opto): it's a bit wasteful that we put "last refresh" version here, not the actual + // version we are committing, because it means // on xlog replay we are replaying more ops than necessary. commitData.put(VERSION_KEY, Long.toString(copyState.version)); message("top: commit commitData=" + commitData); @@ -182,7 +212,8 @@ public abstract class PrimaryNode extends Node { /** IncRef the current CopyState and return it */ public synchronized CopyState getCopyState() throws IOException { ensureOpen(false); - //message("top: getCopyState replicaID=" + replicaID + " replicaNodeID=" + replicaNodeID + " version=" + curInfos.getVersion() + " infos=" + curInfos.toString()); + // message("top: getCopyState replicaID=" + replicaID + " replicaNodeID=" + replicaNodeID + " + // version=" + curInfos.getVersion() + " infos=" + curInfos.toString()); assert curInfos == copyState.infos; writer.incRefDeleter(copyState.infos); int count = copyingCount.incrementAndGet(); @@ -192,7 +223,7 @@ public abstract class PrimaryNode extends Node { /** Called once replica is done (or failed) copying an NRT point */ public void releaseCopyState(CopyState copyState) throws IOException { - //message("top: releaseCopyState version=" + copyState.version); + // message("top: releaseCopyState version=" + copyState.version); assert copyState.infos != null; writer.decRefDeleter(copyState.infos); int count = copyingCount.decrementAndGet(); @@ -229,7 +260,11 @@ public abstract class PrimaryNode extends Node { } if (curInfos != null && infos.getVersion() == curInfos.getVersion()) { // no change - message("top: skip switch to infos: version=" + infos.getVersion() + " is unchanged: " + infos.toString()); + message( + "top: skip switch to infos: version=" + + infos.getVersion() + + " is unchanged: " + + infos.toString()); return false; } @@ -244,17 +279,18 @@ public abstract class PrimaryNode extends Node { // Serialize the SegmentInfos. ByteBuffersDataOutput buffer = new ByteBuffersDataOutput(); - try (ByteBuffersIndexOutput tmpIndexOutput = new ByteBuffersIndexOutput(buffer, "temporary", "temporary")) { + try (ByteBuffersIndexOutput tmpIndexOutput = + new ByteBuffersIndexOutput(buffer, "temporary", "temporary")) { infos.write(tmpIndexOutput); } byte[] infosBytes = buffer.toArrayCopy(); - Map filesMetaData = new HashMap(); - for(SegmentCommitInfo info : infos) { - for(String fileName : info.files()) { + Map filesMetaData = new HashMap(); + for (SegmentCommitInfo info : infos) { + for (String fileName : info.files()) { FileMetaData metaData = readLocalFileMetaData(fileName); // NOTE: we hold a refCount on this infos, so this file better exist: - assert metaData != null: "file \"" + fileName + "\" is missing metadata"; + assert metaData != null : "file \"" + fileName + "\" is missing metadata"; assert filesMetaData.containsKey(fileName) == false; filesMetaData.put(fileName, metaData); } @@ -262,10 +298,22 @@ public abstract class PrimaryNode extends Node { lastFileMetaData = Collections.unmodifiableMap(filesMetaData); - message("top: set copyState primaryGen=" + primaryGen + " version=" + infos.getVersion() + " files=" + filesMetaData.keySet()); - copyState = new CopyState(lastFileMetaData, - infos.getVersion(), infos.getGeneration(), infosBytes, completedMergeFiles, - primaryGen, curInfos); + message( + "top: set copyState primaryGen=" + + primaryGen + + " version=" + + infos.getVersion() + + " files=" + + filesMetaData.keySet()); + copyState = + new CopyState( + lastFileMetaData, + infos.getVersion(), + infos.getGeneration(), + infosBytes, + completedMergeFiles, + primaryGen, + curInfos); return true; } @@ -309,5 +357,6 @@ public abstract class PrimaryNode extends Node { } /** Called when a merge has finished, but before IW switches to the merged segment */ - protected abstract void preCopyMergedSegmentFiles(SegmentCommitInfo info, Map files) throws IOException; + protected abstract void preCopyMergedSegmentFiles( + SegmentCommitInfo info, Map files) throws IOException; } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java index 86dbc52c716..120dad64e96 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; - import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -33,7 +32,7 @@ import org.apache.lucene.store.IOContext; // TODO: can we factor/share with IFD: this is doing exactly the same thing, but on the replica side class ReplicaFileDeleter { - private final Map refCounts = new HashMap(); + private final Map refCounts = new HashMap(); private final Directory dir; private final Node node; @@ -42,10 +41,10 @@ class ReplicaFileDeleter { this.node = node; } - /** Used only by asserts: returns true if the file exists - * (can be opened), false if it cannot be opened, and - * (unlike Java's File.exists) throws IOException if - * there's some unexpected error. */ + /** + * Used only by asserts: returns true if the file exists (can be opened), false if it cannot be + * opened, and (unlike Java's File.exists) throws IOException if there's some unexpected error. + */ private static boolean slowFileExists(Directory dir, String fileName) throws IOException { try { dir.openInput(fileName, IOContext.DEFAULT).close(); @@ -56,9 +55,9 @@ class ReplicaFileDeleter { } public synchronized void incRef(Collection fileNames) throws IOException { - for(String fileName : fileNames) { + for (String fileName : fileNames) { - assert slowFileExists(dir, fileName): "file " + fileName + " does not exist!"; + assert slowFileExists(dir, fileName) : "file " + fileName + " does not exist!"; Integer curCount = refCounts.get(fileName); if (curCount == null) { @@ -71,9 +70,9 @@ class ReplicaFileDeleter { public synchronized void decRef(Collection fileNames) throws IOException { Set toDelete = new HashSet<>(); - for(String fileName : fileNames) { + for (String fileName : fileNames) { Integer curCount = refCounts.get(fileName); - assert curCount != null: "fileName=" + fileName; + assert curCount != null : "fileName=" + fileName; assert curCount.intValue() > 0; if (curCount.intValue() == 1) { refCounts.remove(fileName); @@ -85,9 +84,11 @@ class ReplicaFileDeleter { delete(toDelete); - // TODO: this local IR could incRef files here, like we do now with IW's NRT readers ... then we can assert this again: + // TODO: this local IR could incRef files here, like we do now with IW's NRT readers ... then we + // can assert this again: - // we can't assert this, e.g a search can be running when we switch to a new NRT point, holding a previous IndexReader still open for + // we can't assert this, e.g a search can be running when we switch to a new NRT point, holding + // a previous IndexReader still open for // a bit: /* // We should never attempt deletion of a still-open file: @@ -104,7 +105,8 @@ class ReplicaFileDeleter { node.message("now delete " + toDelete.size() + " files: " + toDelete); } - // First pass: delete any segments_N files. We do these first to be certain stale commit points are removed + // First pass: delete any segments_N files. We do these first to be certain stale commit points + // are removed // before we remove any files they reference, in case we crash right now: for (String fileName : toDelete) { assert refCounts.containsKey(fileName) == false; @@ -115,13 +117,12 @@ class ReplicaFileDeleter { // Only delete other files if we were able to remove the segments_N files; this way we never // leave a corrupt commit in the index even in the presense of virus checkers: - for(String fileName : toDelete) { + for (String fileName : toDelete) { assert refCounts.containsKey(fileName) == false; if (fileName.startsWith(IndexFileNames.SEGMENTS) == false) { delete(fileName); } } - } private synchronized void delete(String fileName) throws IOException { @@ -153,10 +154,10 @@ class ReplicaFileDeleter { public synchronized void deleteUnknownFiles(String segmentsFileName) throws IOException { Set toDelete = new HashSet<>(); - for(String fileName : dir.listAll()) { - if (refCounts.containsKey(fileName) == false && - fileName.equals("write.lock") == false && - fileName.equals(segmentsFileName) == false) { + for (String fileName : dir.listAll()) { + if (refCounts.containsKey(fileName) == false + && fileName.equals("write.lock") == false + && fileName.equals(segmentsFileName) == false) { node.message("will delete unknown file \"" + fileName + "\""); toDelete.add(fileName); } diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaNode.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaNode.java index 174a11f82c6..29088a867e9 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaNode.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaNode.java @@ -33,7 +33,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; @@ -53,10 +52,12 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.Version; -/** Replica node, that pulls index changes from the primary node by copying newly flushed or merged index files. - * - * @lucene.experimental */ - +/** + * Replica node, that pulls index changes from the primary node by copying newly flushed or merged + * index files. + * + * @lucene.experimental + */ public abstract class ReplicaNode extends Node { ReplicaFileDeleter deleter; @@ -82,11 +83,14 @@ public abstract class ReplicaNode extends Node { /** Primary gen last time we successfully replicated: */ protected long lastPrimaryGen; - public ReplicaNode(int id, Directory dir, SearcherFactory searcherFactory, PrintStream printStream) throws IOException { + public ReplicaNode( + int id, Directory dir, SearcherFactory searcherFactory, PrintStream printStream) + throws IOException { super(id, dir, searcherFactory, printStream); if (dir.getPendingDeletions().isEmpty() == false) { - throw new IllegalArgumentException("Directory " + dir + " still has pending deleted files; cannot initialize IndexWriter"); + throw new IllegalArgumentException( + "Directory " + dir + " still has pending deleted files; cannot initialize IndexWriter"); } boolean success = false; @@ -94,9 +98,10 @@ public abstract class ReplicaNode extends Node { try { message("top: init replica dir=" + dir); - // Obtain a write lock on this index since we "act like" an IndexWriter, to prevent any other IndexWriter or ReplicaNode from using it: + // Obtain a write lock on this index since we "act like" an IndexWriter, to prevent any other + // IndexWriter or ReplicaNode from using it: writeFileLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME); - + state = "init"; deleter = new ReplicaFileDeleter(this, dir); success = true; @@ -111,7 +116,10 @@ public abstract class ReplicaNode extends Node { } } - /** Start up this replica, which possibly requires heavy copying of files from the primary node, if we were down for a long time */ + /** + * Start up this replica, which possibly requires heavy copying of files from the primary node, if + * we were down for a long time + */ protected synchronized void start(long curPrimaryGen) throws IOException { if (state.equals("init") == false) { @@ -124,13 +132,18 @@ public abstract class ReplicaNode extends Node { // Figure out what state our local index is in now: String segmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(dir); - // Also look for any pending_segments_N, in case we crashed mid-commit. We must "inflate" our infos gen to at least this, since - // otherwise we may wind up re-using the pending_segments_N file name on commit, and then our deleter can get angry because it still + // Also look for any pending_segments_N, in case we crashed mid-commit. We must "inflate" our + // infos gen to at least this, since + // otherwise we may wind up re-using the pending_segments_N file name on commit, and then our + // deleter can get angry because it still // wants to delete this file: long maxPendingGen = -1; - for(String fileName : dir.listAll()) { + for (String fileName : dir.listAll()) { if (fileName.startsWith(IndexFileNames.PENDING_SEGMENTS)) { - long gen = Long.parseLong(fileName.substring(IndexFileNames.PENDING_SEGMENTS.length()+1), Character.MAX_RADIX); + long gen = + Long.parseLong( + fileName.substring(IndexFileNames.PENDING_SEGMENTS.length() + 1), + Character.MAX_RADIX); if (gen > maxPendingGen) { maxPendingGen = gen; } @@ -162,7 +175,8 @@ public abstract class ReplicaNode extends Node { message("top: delete unknown files on init: all files=" + Arrays.toString(dir.listAll())); deleter.deleteUnknownFiles(segmentsFileName); - message("top: done delete unknown files on init: all files=" + Arrays.toString(dir.listAll())); + message( + "top: done delete unknown files on init: all files=" + Arrays.toString(dir.listAll())); String s = infos.getUserData().get(PRIMARY_GEN_KEY); long myPrimaryGen; @@ -180,21 +194,28 @@ public abstract class ReplicaNode extends Node { assert myPrimaryGen < curPrimaryGen; - // Primary changed while we were down. In this case, we must sync from primary before opening a reader, because it's possible current - // files we have will need to be overwritten with different ones (if index rolled back and "forked"), and we can't overwrite open + // Primary changed while we were down. In this case, we must sync from primary before + // opening a reader, because it's possible current + // files we have will need to be overwritten with different ones (if index rolled back and + // "forked"), and we can't overwrite open // files on Windows: final long initSyncStartNS = System.nanoTime(); - message("top: init: primary changed while we were down myPrimaryGen=" + myPrimaryGen + - " vs curPrimaryGen=" + curPrimaryGen + - "; sync now before mgr init"); + message( + "top: init: primary changed while we were down myPrimaryGen=" + + myPrimaryGen + + " vs curPrimaryGen=" + + curPrimaryGen + + "; sync now before mgr init"); // Try until we succeed in copying over the latest NRT point: CopyJob job = null; - // We may need to overwrite files referenced by our latest commit, either right now on initial sync, or on a later sync. To make - // sure the index is never even in an "apparently" corrupt state (where an old segments_N references invalid files) we forcefully + // We may need to overwrite files referenced by our latest commit, either right now on + // initial sync, or on a later sync. To make + // sure the index is never even in an "apparently" corrupt state (where an old segments_N + // references invalid files) we forcefully // remove the commit now, and refuse to start the replica if this delete fails: message("top: now delete starting commit point " + segmentsFileName); @@ -203,9 +224,13 @@ public abstract class ReplicaNode extends Node { deleter.decRef(Collections.singleton(segmentsFileName)); if (dir.getPendingDeletions().isEmpty() == false) { - // If e.g. virus checker blocks us from deleting, we absolutely cannot start this node else there is a definite window during + // If e.g. virus checker blocks us from deleting, we absolutely cannot start this node + // else there is a definite window during // which if we carsh, we cause corruption: - throw new RuntimeException("replica cannot start: existing segments file=" + segmentsFileName + " must be removed in order to start, but the file delete failed"); + throw new RuntimeException( + "replica cannot start: existing segments file=" + + segmentsFileName + + " must be removed in order to start, but the file delete failed"); } // So we don't later try to decRef it (illegally) again: @@ -213,17 +238,21 @@ public abstract class ReplicaNode extends Node { assert didRemove; while (true) { - job = newCopyJob("sync on startup replica=" + name() + " myVersion=" + infos.getVersion(), - null, - null, - true, - null); + job = + newCopyJob( + "sync on startup replica=" + name() + " myVersion=" + infos.getVersion(), + null, + null, + true, + null); job.start(); message("top: init: sync sis.version=" + job.getCopyState().version); - // Force this copy job to finish while we wait, now. Note that this can be very time consuming! - // NOTE: newNRTPoint detects we are still in init (mgr is null) and does not cancel our copy if a flush happens + // Force this copy job to finish while we wait, now. Note that this can be very time + // consuming! + // NOTE: newNRTPoint detects we are still in init (mgr is null) and does not cancel our + // copy if a flush happens try { job.runBlocking(); job.finish(); @@ -244,9 +273,9 @@ public abstract class ReplicaNode extends Node { lastPrimaryGen = job.getCopyState().primaryGen; byte[] infosBytes = job.getCopyState().infosBytes; - SegmentInfos syncInfos = SegmentInfos.readCommit(dir, - toIndexInput(job.getCopyState().infosBytes), - job.getCopyState().gen); + SegmentInfos syncInfos = + SegmentInfos.readCommit( + dir, toIndexInput(job.getCopyState().infosBytes), job.getCopyState().gen); // Must always commit to a larger generation than what's currently in the index: syncInfos.updateGeneration(infos); @@ -264,11 +293,14 @@ public abstract class ReplicaNode extends Node { message("top: init: set lastNRTFiles=" + lastNRTFiles); lastFileMetaData = job.getCopyState().files; - message(String.format(Locale.ROOT, "top: %d: start: done sync: took %.3fs for %s, opened NRT reader version=%d", - id, - (System.nanoTime()-initSyncStartNS)/1000000000.0, - bytesToString(job.getTotalBytesCopied()), - job.getCopyState().version)); + message( + String.format( + Locale.ROOT, + "top: %d: start: done sync: took %.3fs for %s, opened NRT reader version=%d", + id, + (System.nanoTime() - initSyncStartNS) / 1000000000.0, + bytesToString(job.getTotalBytesCopied()), + job.getCopyState().version)); doCommit = true; } else { @@ -278,11 +310,13 @@ public abstract class ReplicaNode extends Node { } if (infos.getGeneration() < maxPendingGen) { - message("top: move infos generation from " + infos.getGeneration() + " to " + maxPendingGen); + message( + "top: move infos generation from " + infos.getGeneration() + " to " + maxPendingGen); infos.setNextWriteGeneration(maxPendingGen); } - // Notify primary we started, to give it a chance to send any warming merges our way to reduce NRT latency of first sync: + // Notify primary we started, to give it a chance to send any warming merges our way to reduce + // NRT latency of first sync: sendNewReplica(); // Finally, we are open for business, since our index now "agrees" with the primary: @@ -290,7 +324,8 @@ public abstract class ReplicaNode extends Node { // Must commit after init mgr: if (doCommit) { - // Very important to commit what we just sync'd over, because we removed the pre-existing commit point above if we had to + // Very important to commit what we just sync'd over, because we removed the pre-existing + // commit point above if we had to // overwrite any files it referenced: commit(); } @@ -307,14 +342,13 @@ public abstract class ReplicaNode extends Node { throw IOUtils.rethrowAlways(t); } } - + final Object commitLock = new Object(); @Override public void commit() throws IOException { - synchronized(commitLock) { - + synchronized (commitLock) { SegmentInfos infos; Collection indexFiles; @@ -324,12 +358,18 @@ public abstract class ReplicaNode extends Node { deleter.incRef(indexFiles); } - message("top: commit primaryGen=" + lastPrimaryGen + " infos=" + infos.toString() + " files=" + indexFiles); + message( + "top: commit primaryGen=" + + lastPrimaryGen + + " infos=" + + infos.toString() + + " files=" + + indexFiles); // fsync all index files we are now referencing dir.sync(indexFiles); - Map commitData = new HashMap<>(); + Map commitData = new HashMap<>(); commitData.put(PRIMARY_GEN_KEY, Long.toString(lastPrimaryGen)); commitData.put(VERSION_KEY, Long.toString(getCurrentSearchingVersion())); infos.setUserData(commitData, false); @@ -337,13 +377,22 @@ public abstract class ReplicaNode extends Node { // write and fsync a new segments_N infos.commit(dir); - // Notify current infos (which may have changed while we were doing dir.sync above) what generation we are up to; this way future + // Notify current infos (which may have changed while we were doing dir.sync above) what + // generation we are up to; this way future // commits are guaranteed to go to the next (unwritten) generations: if (mgr != null) { ((SegmentInfosSearcherManager) mgr).getCurrentInfos().updateGeneration(infos); } String segmentsFileName = infos.getSegmentsFileName(); - message("top: commit wrote segments file " + segmentsFileName + " version=" + infos.getVersion() + " sis=" + infos.toString() + " commitData=" + commitData); + message( + "top: commit wrote segments file " + + segmentsFileName + + " version=" + + infos.getVersion() + + " sis=" + + infos.toString() + + " commitData=" + + commitData); deleter.incRef(Collections.singletonList(segmentsFileName)); message("top: commit decRef lastCommitFiles=" + lastCommitFiles); deleter.decRef(lastCommitFiles); @@ -356,13 +405,18 @@ public abstract class ReplicaNode extends Node { protected void finishNRTCopy(CopyJob job, long startNS) throws IOException { CopyState copyState = job.getCopyState(); - message("top: finishNRTCopy: version=" + copyState.version + (job.getFailed() ? " FAILED" : "") + " job=" + job); + message( + "top: finishNRTCopy: version=" + + copyState.version + + (job.getFailed() ? " FAILED" : "") + + " job=" + + job); - // NOTE: if primary crashed while we were still copying then the job will hit an exc trying to read bytes for the files from the primary node, + // NOTE: if primary crashed while we were still copying then the job will hit an exc trying to + // read bytes for the files from the primary node, // and the job will be marked as failed here: synchronized (this) { - if ("syncing".equals(state)) { state = "idle"; } @@ -384,9 +438,8 @@ public abstract class ReplicaNode extends Node { // Turn byte[] back to SegmentInfos: byte[] infosBytes = copyState.infosBytes; - SegmentInfos infos = SegmentInfos.readCommit(dir, - toIndexInput(copyState.infosBytes), - copyState.gen); + SegmentInfos infos = + SegmentInfos.readCommit(dir, toIndexInput(copyState.infosBytes), copyState.gen); assert infos.getVersion() == copyState.version; message(" version=" + infos.getVersion() + " segments=" + infos.toString()); @@ -396,12 +449,14 @@ public abstract class ReplicaNode extends Node { ((SegmentInfosSearcherManager) mgr).setCurrentInfos(infos); } - // Must first incRef new NRT files, then decRef old ones, to make sure we don't remove an NRT file that's in common to both: + // Must first incRef new NRT files, then decRef old ones, to make sure we don't remove an NRT + // file that's in common to both: Collection newFiles = copyState.files.keySet(); message("top: incRef newNRTFiles=" + newFiles); deleter.incRef(newFiles); - // If any of our new files were previously copied merges, we clear them now, so we don't try to later delete a non-existent file: + // If any of our new files were previously copied merges, we clear them now, so we don't try + // to later delete a non-existent file: pendingMergeFiles.removeAll(newFiles); message("top: after remove from pending merges pendingMergeFiles=" + pendingMergeFiles); @@ -411,12 +466,14 @@ public abstract class ReplicaNode extends Node { lastNRTFiles.addAll(newFiles); message("top: set lastNRTFiles=" + lastNRTFiles); - // At this point we can remove any completed merge segment files that we still do not reference. This can happen when a merge - // finishes, copies its files out to us, but is then merged away (or dropped due to 100% deletions) before we ever cutover to it + // At this point we can remove any completed merge segment files that we still do not + // reference. This can happen when a merge + // finishes, copies its files out to us, but is then merged away (or dropped due to 100% + // deletions) before we ever cutover to it // in an NRT point: if (copyState.completedMergeFiles.isEmpty() == false) { message("now remove-if-not-ref'd completed merge files: " + copyState.completedMergeFiles); - for(String fileName : copyState.completedMergeFiles) { + for (String fileName : copyState.completedMergeFiles) { if (pendingMergeFiles.contains(fileName)) { pendingMergeFiles.remove(fileName); deleter.deleteIfNoRef(fileName); @@ -435,45 +492,64 @@ public abstract class ReplicaNode extends Node { mgr.release(s); } - message(String.format(Locale.ROOT, "top: done sync: took %.3fs for %s, opened NRT reader version=%d markerCount=%d", - (System.nanoTime()-startNS)/1000000000.0, - bytesToString(job.getTotalBytesCopied()), - copyState.version, - markerCount)); + message( + String.format( + Locale.ROOT, + "top: done sync: took %.3fs for %s, opened NRT reader version=%d markerCount=%d", + (System.nanoTime() - startNS) / 1000000000.0, + bytesToString(job.getTotalBytesCopied()), + copyState.version, + markerCount)); } private ChecksumIndexInput toIndexInput(byte[] input) { return new BufferedChecksumIndexInput( new ByteBuffersIndexInput( - new ByteBuffersDataInput( - Arrays.asList(ByteBuffer.wrap(input))), "SegmentInfos")); + new ByteBuffersDataInput(Arrays.asList(ByteBuffer.wrap(input))), "SegmentInfos")); } - /** Start a background copying job, to copy the specified files from the current primary node. If files is null then the latest copy - * state should be copied. If prevJob is not null, then the new copy job is replacing it and should 1) cancel the previous one, and - * 2) optionally salvage e.g. partially copied and, shared with the new copy job, files. */ - protected abstract CopyJob newCopyJob(String reason, Map files, Map prevFiles, - boolean highPriority, CopyJob.OnceDone onceDone) throws IOException; + /** + * Start a background copying job, to copy the specified files from the current primary node. If + * files is null then the latest copy state should be copied. If prevJob is not null, then the new + * copy job is replacing it and should 1) cancel the previous one, and 2) optionally salvage e.g. + * partially copied and, shared with the new copy job, files. + */ + protected abstract CopyJob newCopyJob( + String reason, + Map files, + Map prevFiles, + boolean highPriority, + CopyJob.OnceDone onceDone) + throws IOException; /** Runs this job async'd */ protected abstract void launch(CopyJob job); - /** Tell primary we (replica) just started, so primary can tell us to warm any already warming merges. This lets us keep low nrt refresh - * time for the first nrt sync after we started. */ + /** + * Tell primary we (replica) just started, so primary can tell us to warm any already warming + * merges. This lets us keep low nrt refresh time for the first nrt sync after we started. + */ protected abstract void sendNewReplica() throws IOException; - /** Call this to notify this replica node that a new NRT infos is available on the primary. - * We kick off a job (runs in the background) to copy files across, and open a new reader once that's done. */ + /** + * Call this to notify this replica node that a new NRT infos is available on the primary. We kick + * off a job (runs in the background) to copy files across, and open a new reader once that's + * done. + */ public synchronized CopyJob newNRTPoint(long newPrimaryGen, long version) throws IOException { if (isClosed()) { throw new AlreadyClosedException("this replica is closed: state=" + state); } - // Cutover (possibly) to new primary first, so we discard any pre-copied merged segments up front, before checking for which files need - // copying. While it's possible the pre-copied merged segments could still be useful to us, in the case that the new primary is either - // the same primary (just e.g. rebooted), or a promoted replica that had a newer NRT point than we did that included the pre-copied - // merged segments, it's still a bit risky to rely solely on checksum/file length to catch the difference, so we defensively discard + // Cutover (possibly) to new primary first, so we discard any pre-copied merged segments up + // front, before checking for which files need + // copying. While it's possible the pre-copied merged segments could still be useful to us, in + // the case that the new primary is either + // the same primary (just e.g. rebooted), or a promoted replica that had a newer NRT point than + // we did that included the pre-copied + // merged segments, it's still a bit risky to rely solely on checksum/file length to catch the + // difference, so we defensively discard // here and re-copy in that case: maybeNewPrimary(newPrimaryGen); @@ -495,8 +571,14 @@ public abstract class ReplicaNode extends Node { } if (version < curVersion) { - // This can happen, if two syncs happen close together, and due to thread scheduling, the incoming older version runs after the newer version - message("top: new NRT point (version=" + version + ") is older than current (version=" + curVersion + "); skipping"); + // This can happen, if two syncs happen close together, and due to thread scheduling, the + // incoming older version runs after the newer version + message( + "top: new NRT point (version=" + + version + + ") is older than current (version=" + + curVersion + + "); skipping"); return null; } @@ -505,24 +587,26 @@ public abstract class ReplicaNode extends Node { message("top: newNRTPoint"); CopyJob job = null; try { - job = newCopyJob("NRT point sync version=" + version, - null, - lastFileMetaData, - true, - new CopyJob.OnceDone() { - @Override - public void run(CopyJob job) { - try { - finishNRTCopy(job, startNS); - } catch (IOException ioe) { - throw new RuntimeException(ioe); - } - } - }); + job = + newCopyJob( + "NRT point sync version=" + version, + null, + lastFileMetaData, + true, + new CopyJob.OnceDone() { + @Override + public void run(CopyJob job) { + try { + finishNRTCopy(job, startNS); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + } + }); } catch (NodeCommunicationException nce) { // E.g. primary could crash/close when we are asking it for the copy state: message("top: ignoring communication exception creating CopyJob: " + nce); - //nce.printStackTrace(printStream); + // nce.printStackTrace(printStream); if (state.equals("syncing")) { state = "idle"; } @@ -542,13 +626,20 @@ public abstract class ReplicaNode extends Node { curNRTCopy = job; - for(String fileName : curNRTCopy.getFileNamesToCopy()) { - assert lastCommitFiles.contains(fileName) == false: "fileName=" + fileName + " is in lastCommitFiles and is being copied?"; + for (String fileName : curNRTCopy.getFileNamesToCopy()) { + assert lastCommitFiles.contains(fileName) == false + : "fileName=" + fileName + " is in lastCommitFiles and is being copied?"; synchronized (mergeCopyJobs) { for (CopyJob mergeJob : mergeCopyJobs) { if (mergeJob.getFileNames().contains(fileName)) { - // TODO: we could maybe transferAndCancel here? except CopyJob can't transferAndCancel more than one currently - message("top: now cancel merge copy job=" + mergeJob + ": file " + fileName + " is now being copied via NRT point"); + // TODO: we could maybe transferAndCancel here? except CopyJob can't transferAndCancel + // more than one currently + message( + "top: now cancel merge copy job=" + + mergeJob + + ": file " + + fileName + + " is now being copied via NRT point"); mergeJob.cancel("newNRTPoint is copying over the same file", null); } } @@ -567,7 +658,8 @@ public abstract class ReplicaNode extends Node { return null; } - // Runs in the background jobs thread, maybe slowly/throttled, and calls finishSync once it's done: + // Runs in the background jobs thread, maybe slowly/throttled, and calls finishSync once it's + // done: launch(curNRTCopy); return curNRTCopy; } @@ -578,7 +670,10 @@ public abstract class ReplicaNode extends Node { @Override public boolean isClosed() { - return "closed".equals(state) || "closing".equals(state) || "crashing".equals(state) || "crashed".equals(state); + return "closed".equals(state) + || "closing".equals(state) + || "crashing".equals(state) + || "crashed".equals(state); } @Override @@ -604,11 +699,11 @@ public abstract class ReplicaNode extends Node { lastCommitFiles.clear(); message("top: delete if no ref pendingMergeFiles=" + pendingMergeFiles); - for(String fileName : pendingMergeFiles) { + for (String fileName : pendingMergeFiles) { deleter.deleteIfNoRef(fileName); } pendingMergeFiles.clear(); - + message("top: close dir"); IOUtils.close(writeFileLock, dir); } @@ -619,14 +714,21 @@ public abstract class ReplicaNode extends Node { /** Called when the primary changed */ protected synchronized void maybeNewPrimary(long newPrimaryGen) throws IOException { if (newPrimaryGen != lastPrimaryGen) { - message("top: now change lastPrimaryGen from " + lastPrimaryGen + " to " + newPrimaryGen + " pendingMergeFiles=" + pendingMergeFiles); + message( + "top: now change lastPrimaryGen from " + + lastPrimaryGen + + " to " + + newPrimaryGen + + " pendingMergeFiles=" + + pendingMergeFiles); message("top: delete if no ref pendingMergeFiles=" + pendingMergeFiles); - for(String fileName : pendingMergeFiles) { + for (String fileName : pendingMergeFiles) { deleter.deleteIfNoRef(fileName); } - assert newPrimaryGen > lastPrimaryGen: "newPrimaryGen=" + newPrimaryGen + " vs lastPrimaryGen=" + lastPrimaryGen; + assert newPrimaryGen > lastPrimaryGen + : "newPrimaryGen=" + newPrimaryGen + " vs lastPrimaryGen=" + lastPrimaryGen; lastPrimaryGen = newPrimaryGen; pendingMergeFiles.clear(); } else { @@ -634,7 +736,9 @@ public abstract class ReplicaNode extends Node { } } - protected synchronized CopyJob launchPreCopyMerge(AtomicBoolean finished, long newPrimaryGen, Map files) throws IOException { + protected synchronized CopyJob launchPreCopyMerge( + AtomicBoolean finished, long newPrimaryGen, Map files) + throws IOException { CopyJob job; @@ -643,60 +747,78 @@ public abstract class ReplicaNode extends Node { Set fileNames = files.keySet(); message("now pre-copy warm merge files=" + fileNames + " primaryGen=" + newPrimaryGen); - for(String fileName : fileNames) { - assert pendingMergeFiles.contains(fileName) == false: "file \"" + fileName + "\" is already being warmed!"; - assert lastNRTFiles.contains(fileName) == false: "file \"" + fileName + "\" is already NRT visible!"; + for (String fileName : fileNames) { + assert pendingMergeFiles.contains(fileName) == false + : "file \"" + fileName + "\" is already being warmed!"; + assert lastNRTFiles.contains(fileName) == false + : "file \"" + fileName + "\" is already NRT visible!"; } - job = newCopyJob("warm merge on " + name() + " filesNames=" + fileNames, - files, null, false, - new CopyJob.OnceDone() { + job = + newCopyJob( + "warm merge on " + name() + " filesNames=" + fileNames, + files, + null, + false, + new CopyJob.OnceDone() { - @Override - public void run(CopyJob job) throws IOException { - // Signals that this replica has finished - mergeCopyJobs.remove(job); - message("done warming merge " + fileNames + " failed?=" + job.getFailed()); - synchronized(this) { - if (job.getFailed() == false) { - if (lastPrimaryGen != primaryGenStart) { - message("merge pre copy finished but primary has changed; cancelling job files=" + fileNames); - job.cancel("primary changed during merge copy", null); - } else { - boolean abort = false; - for (String fileName : fileNames) { - if (lastNRTFiles.contains(fileName)) { - message("abort merge finish: file " + fileName + " is referenced by last NRT point"); - abort = true; - } - if (lastCommitFiles.contains(fileName)) { - message("abort merge finish: file " + fileName + " is referenced by last commit point"); - abort = true; - } - } - if (abort) { - // Even though in newNRTPoint we have similar logic, which cancels any merge copy jobs if an NRT point - // shows up referencing the files we are warming (because primary got impatient and gave up on us), we also - // need it here in case replica is way far behind and fails to even receive the merge pre-copy request - // until after the newNRTPoint referenced those files: - job.cancel("merged segment was separately copied via NRT point", null); - } else { - job.finish(); - message("merge pre copy finished files=" + fileNames); - for(String fileName : fileNames) { - assert pendingMergeFiles.contains(fileName) == false : "file \"" + fileName + "\" is already in pendingMergeFiles"; - message("add file " + fileName + " to pendingMergeFiles"); - pendingMergeFiles.add(fileName); - } - } - } - } else { - message("merge copy finished with failure"); - } - } - finished.set(true); - } - }); + @Override + public void run(CopyJob job) throws IOException { + // Signals that this replica has finished + mergeCopyJobs.remove(job); + message("done warming merge " + fileNames + " failed?=" + job.getFailed()); + synchronized (this) { + if (job.getFailed() == false) { + if (lastPrimaryGen != primaryGenStart) { + message( + "merge pre copy finished but primary has changed; cancelling job files=" + + fileNames); + job.cancel("primary changed during merge copy", null); + } else { + boolean abort = false; + for (String fileName : fileNames) { + if (lastNRTFiles.contains(fileName)) { + message( + "abort merge finish: file " + + fileName + + " is referenced by last NRT point"); + abort = true; + } + if (lastCommitFiles.contains(fileName)) { + message( + "abort merge finish: file " + + fileName + + " is referenced by last commit point"); + abort = true; + } + } + if (abort) { + // Even though in newNRTPoint we have similar logic, which cancels any merge + // copy jobs if an NRT point + // shows up referencing the files we are warming (because primary got + // impatient and gave up on us), we also + // need it here in case replica is way far behind and fails to even receive + // the merge pre-copy request + // until after the newNRTPoint referenced those files: + job.cancel("merged segment was separately copied via NRT point", null); + } else { + job.finish(); + message("merge pre copy finished files=" + fileNames); + for (String fileName : fileNames) { + assert pendingMergeFiles.contains(fileName) == false + : "file \"" + fileName + "\" is already in pendingMergeFiles"; + message("add file " + fileName + " to pendingMergeFiles"); + pendingMergeFiles.add(fileName); + } + } + } + } else { + message("merge copy finished with failure"); + } + } + finished.set(true); + } + }); job.start(); @@ -709,16 +831,20 @@ public abstract class ReplicaNode extends Node { return job; } - public IndexOutput createTempOutput(String prefix, String suffix, IOContext ioContext) throws IOException { + public IndexOutput createTempOutput(String prefix, String suffix, IOContext ioContext) + throws IOException { return dir.createTempOutput(prefix, suffix, IOContext.DEFAULT); } - /** Compares incoming per-file identity (id, checksum, header, footer) versus what we have locally and returns the subset of the incoming - * files that need copying */ - public List> getFilesToCopy(Map files) throws IOException { + /** + * Compares incoming per-file identity (id, checksum, header, footer) versus what we have locally + * and returns the subset of the incoming files that need copying + */ + public List> getFilesToCopy(Map files) + throws IOException { - List> toCopy = new ArrayList<>(); - for (Map.Entry ent : files.entrySet()) { + List> toCopy = new ArrayList<>(); + for (Map.Entry ent : files.entrySet()) { String fileName = ent.getKey(); FileMetaData fileMetaData = ent.getValue(); if (fileIsIdentical(fileName, fileMetaData) == false) { @@ -729,9 +855,12 @@ public abstract class ReplicaNode extends Node { return toCopy; } - /** Carefully determine if the file on the primary, identified by its {@code String fileName} along with the {@link FileMetaData} - * "summarizing" its contents, is precisely the same file that we have locally. If the file does not exist locally, or if its header - * (includes the segment id), length, footer (including checksum) differ, then this returns false, else true. */ + /** + * Carefully determine if the file on the primary, identified by its {@code String fileName} along + * with the {@link FileMetaData} "summarizing" its contents, is precisely the same file that we + * have locally. If the file does not exist locally, or if its header (includes the segment id), + * length, footer (including checksum) differ, then this returns false, else true. + */ private boolean fileIsIdentical(String fileName, FileMetaData srcMetaData) throws IOException { FileMetaData destMetaData = readLocalFileMetaData(fileName); @@ -740,8 +869,8 @@ public abstract class ReplicaNode extends Node { return false; } - if (Arrays.equals(destMetaData.header, srcMetaData.header) == false || - Arrays.equals(destMetaData.footer, srcMetaData.footer) == false) { + if (Arrays.equals(destMetaData.header, srcMetaData.header) == false + || Arrays.equals(destMetaData.footer, srcMetaData.footer) == false) { // Segment name was reused! This is rare but possible and otherwise devastating: if (Node.VERBOSE_FILES) { message("file " + fileName + ": will copy [header/footer is different]"); @@ -752,7 +881,7 @@ public abstract class ReplicaNode extends Node { } } - private ConcurrentMap copying = new ConcurrentHashMap<>(); + private ConcurrentMap copying = new ConcurrentHashMap<>(); // Used only to catch bugs, ensuring a given file name is only ever being copied bye one job: public void startCopyFile(String name) { diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java index d18ee1029d9..2927601bda6 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; @@ -35,10 +34,12 @@ import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.Directory; -/** A SearcherManager that refreshes via an externally provided (NRT) SegmentInfos, either from {@link IndexWriter} or via - * nrt replication to another index. +/** + * A SearcherManager that refreshes via an externally provided (NRT) SegmentInfos, either from + * {@link IndexWriter} or via nrt replication to another index. * - * @lucene.experimental */ + * @lucene.experimental + */ class SegmentInfosSearcherManager extends ReferenceManager { private volatile SegmentInfos currentInfos; private final Directory dir; @@ -46,7 +47,9 @@ class SegmentInfosSearcherManager extends ReferenceManager { private final AtomicInteger openReaderCount = new AtomicInteger(); private final SearcherFactory searcherFactory; - public SegmentInfosSearcherManager(Directory dir, Node node, SegmentInfos infosIn, SearcherFactory searcherFactory) throws IOException { + public SegmentInfosSearcherManager( + Directory dir, Node node, SegmentInfos infosIn, SearcherFactory searcherFactory) + throws IOException { this.dir = dir; this.node = node; if (searcherFactory == null) { @@ -55,7 +58,9 @@ class SegmentInfosSearcherManager extends ReferenceManager { this.searcherFactory = searcherFactory; currentInfos = infosIn; node.message("SegmentInfosSearcherManager.init: use incoming infos=" + infosIn.toString()); - current = SearcherManager.getSearcher(searcherFactory, StandardDirectoryReader.open(dir, currentInfos, null), null); + current = + SearcherManager.getSearcher( + searcherFactory, StandardDirectoryReader.open(dir, currentInfos, null), null); addReaderClosedListener(current.getIndexReader()); } @@ -78,8 +83,10 @@ class SegmentInfosSearcherManager extends ReferenceManager { return currentInfos; } - /** Switch to new segments, refreshing if necessary. Note that it's the caller job to ensure there's a held refCount for the - * incoming infos, so all files exist. */ + /** + * Switch to new segments, refreshing if necessary. Note that it's the caller job to ensure + * there's a held refCount for the incoming infos, so all files exist. + */ public void setCurrentInfos(SegmentInfos infos) throws IOException { if (currentInfos != null) { // So that if we commit, we will go to the next @@ -98,7 +105,7 @@ class SegmentInfosSearcherManager extends ReferenceManager { subs = null; } else { subs = new ArrayList<>(); - for(LeafReaderContext ctx : old.getIndexReader().leaves()) { + for (LeafReaderContext ctx : old.getIndexReader().leaves()) { subs.add(ctx.reader()); } } @@ -116,17 +123,19 @@ class SegmentInfosSearcherManager extends ReferenceManager { throw new IllegalStateException("StandardDirectoryReader must support caching"); } openReaderCount.incrementAndGet(); - cacheHelper.addClosedListener(new IndexReader.ClosedListener() { - @Override - public void onClose(IndexReader.CacheKey cacheKey) { - onReaderClosed(); - } - }); + cacheHelper.addClosedListener( + new IndexReader.ClosedListener() { + @Override + public void onClose(IndexReader.CacheKey cacheKey) { + onReaderClosed(); + } + }); } - /** Tracks how many readers are still open, so that when we are closed, - * we can additionally wait until all in-flight searchers are - * closed. */ + /** + * Tracks how many readers are still open, so that when we are closed, we can additionally wait + * until all in-flight searchers are closed. + */ synchronized void onReaderClosed() { if (openReaderCount.decrementAndGet() == 0) { notifyAll(); diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/package-info.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/package-info.java index a5477628db8..872764cab24 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/package-info.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/package-info.java @@ -14,8 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Near-real-time replication framework - */ + +/** Near-real-time replication framework */ package org.apache.lucene.replicator.nrt; diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/package-info.java b/lucene/replicator/src/java/org/apache/lucene/replicator/package-info.java index 1dd4842c245..85d6e851503 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/package-info.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/package-info.java @@ -14,48 +14,47 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** + +/** * Files replication framework * - * The - * Replicator allows replicating files between a server and client(s). Producers publish - * revisions and consumers update to the latest revision available. - * ReplicationClient is a helper utility for performing the update operation. It can - * be invoked either - * manually or periodically by - * starting an update thread. - * HttpReplicator can be used to replicate revisions by consumers that reside on - * a different node than the producer. + *

    The Replicator allows replicating files between a server and + * client(s). Producers publish revisions and consumers update to the + * latest revision available. ReplicationClient is a helper + * utility for performing the update operation. It can be invoked either manually or periodically by starting an update + * thread. HttpReplicator can be used to replicate + * revisions by consumers that reside on a different node than the producer. * - *

    - * The replication framework supports replicating any type of files, with built-in support for a single search index as - * well as an index and taxonomy pair. For a single index, the application should publish an - * IndexRevision and set - * IndexReplicationHandler on the client. For an index and taxonomy pair, the - * application should publish an IndexAndTaxonomyRevision and set - * IndexAndTaxonomyReplicationHandler on the client. + *

    The replication framework supports replicating any type of files, with built-in support for a + * single search index as well as an index and taxonomy pair. For a single index, the application + * should publish an IndexRevision and set IndexReplicationHandler on the client. For an index and + * taxonomy pair, the application should publish an IndexAndTaxonomyRevision and set IndexAndTaxonomyReplicationHandler on the + * client. * - *

    - * When the replication client detects that there is a newer revision available, it copies the files of the revision and - * then invokes the handler to complete the operation (e.g. copy the files to the index directory, fsync them, reopen an - * index reader etc.). By default, only files that do not exist in the handler's - * current revision files are copied, - * however this can be overridden by extending the client. + *

    When the replication client detects that there is a newer revision available, it copies the + * files of the revision and then invokes the handler to complete the operation (e.g. copy the files + * to the index directory, fsync them, reopen an index reader etc.). By default, only files that do + * not exist in the handler's current revision + * files are copied, however this can be overridden by extending the client. + * + *

    An example usage of the Replicator: * - *

    - * An example usage of the Replicator: - * *

    - * // ++++++++++++++ SERVER SIDE ++++++++++++++ // 
    + * // ++++++++++++++ SERVER SIDE ++++++++++++++ //
      * IndexWriter publishWriter; // the writer used for indexing
      * Replicator replicator = new LocalReplicator();
      * replicator.publish(new IndexRevision(publishWriter));
      *
    - * // ++++++++++++++ CLIENT SIDE ++++++++++++++ // 
    + * // ++++++++++++++ CLIENT SIDE ++++++++++++++ //
      * // either LocalReplictor, or HttpReplicator if client and server are on different nodes
      * Replicator replicator;
    - * 
    + *
      * // callback invoked after handler finished handling the revision and e.g. can reopen the reader.
      * Callable<Boolean> callback = null; // can also be null if no callback is needed
      * ReplicationHandler handler = new IndexReplicationHandler(indexDir, callback);
    @@ -64,7 +63,7 @@
      *
      * // invoke client manually
      * client.updateNow();
    - * 
    + *
      * // or, periodically
      * client.startUpdateThread(100); // check for update every 100 milliseconds
      * 
    diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java b/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java index e2cd3117f5e..d6d8bc4980d 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java @@ -34,11 +34,11 @@ import org.eclipse.jetty.util.ssl.SslContextFactory; import org.eclipse.jetty.util.thread.QueuedThreadPool; import org.junit.AfterClass; -@ThreadLeakLingering(linger = 80000) // Jetty might ignore interrupt for a minute +@ThreadLeakLingering(linger = 80000) // Jetty might ignore interrupt for a minute public abstract class ReplicatorTestCase extends LuceneTestCase { - + private static HttpClientConnectionManager clientConnectionManager; - + @AfterClass public static void afterClassReplicatorTestCase() throws Exception { if (clientConnectionManager != null) { @@ -46,10 +46,10 @@ public abstract class ReplicatorTestCase extends LuceneTestCase { clientConnectionManager = null; } } - + /** - * Returns a new {@link Server HTTP Server} instance. To obtain its port, use - * {@link #serverPort(Server)}. + * Returns a new {@link Server HTTP Server} instance. To obtain its port, use {@link + * #serverPort(Server)}. */ public static synchronized Server newHttpServer(Handler handler) throws Exception { // if this property is true, then jetty will be configured to use SSL @@ -58,31 +58,27 @@ public abstract class ReplicatorTestCase extends LuceneTestCase { // // This means we will use the same truststore, keystore (and keys) for // the server as well as any client actions taken by this JVM in - // talking to that server, but for the purposes of testing that should + // talking to that server, but for the purposes of testing that should // be good enough final boolean useSsl = Boolean.getBoolean("tests.jettySsl"); final SslContextFactory.Server sslcontext = new SslContextFactory.Server(); - + if (useSsl) { if (null != System.getProperty("javax.net.ssl.keyStore")) { - sslcontext.setKeyStorePath - (System.getProperty("javax.net.ssl.keyStore")); + sslcontext.setKeyStorePath(System.getProperty("javax.net.ssl.keyStore")); } if (null != System.getProperty("javax.net.ssl.keyStorePassword")) { - sslcontext.setKeyStorePassword - (System.getProperty("javax.net.ssl.keyStorePassword")); + sslcontext.setKeyStorePassword(System.getProperty("javax.net.ssl.keyStorePassword")); } if (null != System.getProperty("javax.net.ssl.trustStore")) { - sslcontext.setKeyStorePath - (System.getProperty("javax.net.ssl.trustStore")); + sslcontext.setKeyStorePath(System.getProperty("javax.net.ssl.trustStore")); } if (null != System.getProperty("javax.net.ssl.trustStorePassword")) { - sslcontext.setTrustStorePassword - (System.getProperty("javax.net.ssl.trustStorePassword")); + sslcontext.setTrustStorePassword(System.getProperty("javax.net.ssl.trustStorePassword")); } sslcontext.setNeedClientAuth(Boolean.getBoolean("tests.jettySsl.clientAuth")); } - + final QueuedThreadPool threadPool = new QueuedThreadPool(); threadPool.setDaemon(true); threadPool.setMaxThreads(10000); @@ -93,59 +89,61 @@ public abstract class ReplicatorTestCase extends LuceneTestCase { server.setStopAtShutdown(true); server.manage(threadPool); - final ServerConnector connector; if (useSsl) { HttpConfiguration configuration = new HttpConfiguration(); configuration.setSecureScheme("https"); configuration.addCustomizer(new SecureRequestCustomizer()); @SuppressWarnings("resource") - ServerConnector c = new ServerConnector(server, new SslConnectionFactory(sslcontext, "http/1.1"), - new HttpConnectionFactory(configuration)); + ServerConnector c = + new ServerConnector( + server, + new SslConnectionFactory(sslcontext, "http/1.1"), + new HttpConnectionFactory(configuration)); connector = c; } else { @SuppressWarnings("resource") ServerConnector c = new ServerConnector(server, new HttpConnectionFactory()); connector = c; } - + connector.setPort(0); connector.setHost("127.0.0.1"); server.setConnectors(new Connector[] {connector}); - server.setSessionIdManager(new DefaultSessionIdManager(server, new Random(random().nextLong()))); + server.setSessionIdManager( + new DefaultSessionIdManager(server, new Random(random().nextLong()))); server.setHandler(handler); - + server.start(); - + return server; } - + /** Returns a {@link Server}'s port. */ public static int serverPort(Server server) { - return ((ServerConnector)server.getConnectors()[0]).getLocalPort(); + return ((ServerConnector) server.getConnectors()[0]).getLocalPort(); } - + /** Returns a {@link Server}'s host. */ public static String serverHost(Server server) { - return ((ServerConnector)server.getConnectors()[0]).getHost(); + return ((ServerConnector) server.getConnectors()[0]).getHost(); } - + /** - * Stops the given HTTP Server instance. This method does its best to guarantee - * that no threads will be left running following this method. + * Stops the given HTTP Server instance. This method does its best to guarantee that no threads + * will be left running following this method. */ public static void stopHttpServer(Server httpServer) throws Exception { httpServer.stop(); httpServer.join(); } - + /** * Returns a {@link HttpClientConnectionManager}. - *

    - * NOTE: do not {@link HttpClientConnectionManager#shutdown()} this - * connection manager, it will be close automatically after all tests have - * finished. + * + *

    NOTE: do not {@link HttpClientConnectionManager#shutdown()} this connection manager, + * it will be close automatically after all tests have finished. */ public static synchronized HttpClientConnectionManager getClientConnectionManager() { if (clientConnectionManager == null) { @@ -154,8 +152,7 @@ public abstract class ReplicatorTestCase extends LuceneTestCase { ccm.setMaxTotal(128); clientConnectionManager = ccm; } - + return clientConnectionManager; } - } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyReplicationClient.java b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyReplicationClient.java index 3303a6cc735..4e80e308c87 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyReplicationClient.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyReplicationClient.java @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.document.Document; import org.apache.lucene.facet.DrillDownQuery; import org.apache.lucene.facet.FacetField; @@ -58,15 +57,15 @@ import org.junit.Before; import org.junit.Test; public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { - + private static class IndexAndTaxonomyReadyCallback implements Callable, Closeable { - + private final Directory indexDir, taxoDir; private DirectoryReader indexReader; private DirectoryTaxonomyReader taxoReader; private FacetsConfig config; private long lastIndexGeneration = -1; - + public IndexAndTaxonomyReadyCallback(Directory indexDir, Directory taxoDir) throws IOException { this.indexDir = indexDir; this.taxoDir = taxoDir; @@ -78,7 +77,7 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { taxoReader = new DirectoryTaxonomyReader(taxoDir); } } - + @Override public Boolean call() throws Exception { if (indexReader == null) { @@ -88,14 +87,17 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { } else { // verify search index DirectoryReader newReader = DirectoryReader.openIfChanged(indexReader); - assertNotNull("should not have reached here if no changes were made to the index", newReader); + assertNotNull( + "should not have reached here if no changes were made to the index", newReader); long newGeneration = newReader.getIndexCommit().getGeneration(); - assertTrue("expected newer generation; current=" + lastIndexGeneration + " new=" + newGeneration, newGeneration > lastIndexGeneration); + assertTrue( + "expected newer generation; current=" + lastIndexGeneration + " new=" + newGeneration, + newGeneration > lastIndexGeneration); indexReader.close(); indexReader = newReader; lastIndexGeneration = newGeneration; TestUtil.checkIndex(indexDir); - + // verify taxonomy index DirectoryTaxonomyReader newTaxoReader = TaxonomyReader.openIfChanged(taxoReader); if (newTaxoReader != null) { @@ -103,7 +105,7 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { taxoReader = newTaxoReader; } TestUtil.checkIndex(taxoDir); - + // verify faceted search int id = Integer.parseInt(indexReader.getIndexCommit().getUserData().get(VERSION_ID), 16); IndexSearcher searcher = new IndexSearcher(indexReader); @@ -111,7 +113,7 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { searcher.search(new MatchAllDocsQuery(), fc); Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc); assertEquals(1, facets.getSpecificValue("A", Integer.toString(id, 16)).intValue()); - + DrillDownQuery drillDown = new DrillDownQuery(config); drillDown.add("A", Integer.toString(id, 16)); TopDocs docs = searcher.search(drillDown, 10); @@ -119,13 +121,13 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { } return null; } - + @Override public void close() throws IOException { IOUtils.close(indexReader, taxoReader); } } - + private Directory publishIndexDir, publishTaxoDir; private MockDirectoryWrapper handlerIndexDir, handlerTaxoDir; private Replicator replicator; @@ -137,9 +139,9 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { private FacetsConfig config; private IndexAndTaxonomyReadyCallback callback; private Path clientWorkDir; - + private static final String VERSION_ID = "version"; - + private void assertHandlerRevision(int expectedID, Directory dir) throws IOException { // loop as long as client is alive. test-framework will terminate us if // there's a serious bug, e.g. client doesn't really update. otherwise, @@ -151,11 +153,12 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { } catch (InterruptedException e) { throw new ThreadInterruptedException(e); } - + try { DirectoryReader reader = DirectoryReader.open(dir); try { - int handlerID = Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16); + int handlerID = + Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16); if (expectedID == handlerID) { return; } @@ -169,23 +172,26 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { } } } - + private Revision createRevision(final int id) throws IOException { publishIndexWriter.addDocument(newDocument(publishTaxoWriter, id)); - publishIndexWriter.setLiveCommitData(new HashMap() {{ - put(VERSION_ID, Integer.toString(id, 16)); - }}.entrySet()); + publishIndexWriter.setLiveCommitData( + new HashMap() { + { + put(VERSION_ID, Integer.toString(id, 16)); + } + }.entrySet()); publishIndexWriter.commit(); publishTaxoWriter.commit(); return new IndexAndTaxonomyRevision(publishIndexWriter, publishTaxoWriter); } - + private Document newDocument(TaxonomyWriter taxoWriter, int id) throws IOException { Document doc = new Document(); doc.add(new FacetField("A", Integer.toString(id, 16))); return config.build(taxoWriter, doc); } - + @Override @Before public void setUp() throws Exception { @@ -200,7 +206,7 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { callback = new IndexAndTaxonomyReadyCallback(handlerIndexDir, handlerTaxoDir); handler = new IndexAndTaxonomyReplicationHandler(handlerIndexDir, handlerTaxoDir, callback); client = new ReplicationClient(replicator, handler, sourceDirFactory); - + IndexWriterConfig conf = newIndexWriterConfig(null); conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); publishIndexWriter = new IndexWriter(publishIndexDir, conf); @@ -208,82 +214,89 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { config = new FacetsConfig(); config.setHierarchical("A", true); } - + @After @Override public void tearDown() throws Exception { publishIndexWriter.close(); - IOUtils.close(client, callback, publishTaxoWriter, replicator, publishIndexDir, publishTaxoDir, - handlerIndexDir, handlerTaxoDir); + IOUtils.close( + client, + callback, + publishTaxoWriter, + replicator, + publishIndexDir, + publishTaxoDir, + handlerIndexDir, + handlerTaxoDir); super.tearDown(); } - + @Test public void testNoUpdateThread() throws Exception { assertNull("no version expected at start", handler.currentVersion()); - + // Callback validates the replicated index replicator.publish(createRevision(1)); client.updateNow(); - + // make sure updating twice, when in fact there's nothing to update, works client.updateNow(); - + replicator.publish(createRevision(2)); client.updateNow(); - + // Publish two revisions without update, handler should be upgraded to latest replicator.publish(createRevision(3)); replicator.publish(createRevision(4)); client.updateNow(); } - + @Test public void testRestart() throws Exception { replicator.publish(createRevision(1)); client.updateNow(); - + replicator.publish(createRevision(2)); client.updateNow(); - + client.stopUpdateThread(); client.close(); client = new ReplicationClient(replicator, handler, sourceDirFactory); - + // Publish two revisions without update, handler should be upgraded to latest replicator.publish(createRevision(3)); replicator.publish(createRevision(4)); client.updateNow(); } - + @Test public void testUpdateThread() throws Exception { client.startUpdateThread(10, "indexTaxo"); - + replicator.publish(createRevision(1)); assertHandlerRevision(1, handlerIndexDir); - + replicator.publish(createRevision(2)); assertHandlerRevision(2, handlerIndexDir); - + // Publish two revisions without update, handler should be upgraded to latest replicator.publish(createRevision(3)); replicator.publish(createRevision(4)); assertHandlerRevision(4, handlerIndexDir); } - + @Test public void testRecreateTaxonomy() throws Exception { replicator.publish(createRevision(1)); client.updateNow(); - + // recreate index and taxonomy Directory newTaxo = newDirectory(); new DirectoryTaxonomyWriter(newTaxo).close(); publishTaxoWriter.replaceTaxonomy(newTaxo); publishIndexWriter.deleteAll(); replicator.publish(createRevision(2)); - + client.updateNow(); newTaxo.close(); } @@ -305,141 +318,149 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { // wrap sourceDirFactory to return a MockDirWrapper so we can simulate errors final SourceDirectoryFactory in = sourceDirFactory; final AtomicInteger failures = new AtomicInteger(atLeast(10)); - sourceDirFactory = new SourceDirectoryFactory() { - - private long clientMaxSize = 100, handlerIndexMaxSize = 100, handlerTaxoMaxSize = 100; - private double clientExRate = 1.0, handlerIndexExRate = 1.0, handlerTaxoExRate = 1.0; - - @Override - public void cleanupSession(String sessionID) throws IOException { - in.cleanupSession(sessionID); - } - - @SuppressWarnings("synthetic-access") - @Override - public Directory getDirectory(String sessionID, String source) throws IOException { - Directory dir = in.getDirectory(sessionID, source); - if (random().nextBoolean() && failures.get() > 0) { // client should fail, return wrapped dir - MockDirectoryWrapper mdw = new MockDirectoryWrapper(random(), dir); - mdw.setRandomIOExceptionRateOnOpen(clientExRate); - mdw.setMaxSizeInBytes(clientMaxSize); - mdw.setRandomIOExceptionRate(clientExRate); - mdw.setCheckIndexOnClose(false); - clientMaxSize *= 2; - clientExRate /= 2; - return mdw; - } - - if (failures.get() > 0 && random().nextBoolean()) { // handler should fail - if (random().nextBoolean()) { // index dir fail - handlerIndexDir.setMaxSizeInBytes(handlerIndexMaxSize); - handlerIndexDir.setRandomIOExceptionRate(handlerIndexExRate); - handlerIndexDir.setRandomIOExceptionRateOnOpen(handlerIndexExRate); - handlerIndexMaxSize *= 2; - handlerIndexExRate /= 2; - } else { // taxo dir fail - handlerTaxoDir.setMaxSizeInBytes(handlerTaxoMaxSize); - handlerTaxoDir.setRandomIOExceptionRate(handlerTaxoExRate); - handlerTaxoDir.setRandomIOExceptionRateOnOpen(handlerTaxoExRate); - handlerTaxoDir.setCheckIndexOnClose(false); - handlerTaxoMaxSize *= 2; - handlerTaxoExRate /= 2; - } - } else { - // disable all errors - handlerIndexDir.setMaxSizeInBytes(0); - handlerIndexDir.setRandomIOExceptionRate(0.0); - handlerIndexDir.setRandomIOExceptionRateOnOpen(0.0); - handlerTaxoDir.setMaxSizeInBytes(0); - handlerTaxoDir.setRandomIOExceptionRate(0.0); - handlerTaxoDir.setRandomIOExceptionRateOnOpen(0.0); - } + sourceDirFactory = + new SourceDirectoryFactory() { - return dir; - } - }; - - handler = new IndexAndTaxonomyReplicationHandler(handlerIndexDir, handlerTaxoDir, new Callable() { - @Override - public Boolean call() throws Exception { - if (random().nextDouble() < 0.2 && failures.get() > 0) { - throw new RuntimeException("random exception from callback"); - } - return null; - } - }); + private long clientMaxSize = 100, handlerIndexMaxSize = 100, handlerTaxoMaxSize = 100; + private double clientExRate = 1.0, handlerIndexExRate = 1.0, handlerTaxoExRate = 1.0; + + @Override + public void cleanupSession(String sessionID) throws IOException { + in.cleanupSession(sessionID); + } + + @SuppressWarnings("synthetic-access") + @Override + public Directory getDirectory(String sessionID, String source) throws IOException { + Directory dir = in.getDirectory(sessionID, source); + if (random().nextBoolean() + && failures.get() > 0) { // client should fail, return wrapped dir + MockDirectoryWrapper mdw = new MockDirectoryWrapper(random(), dir); + mdw.setRandomIOExceptionRateOnOpen(clientExRate); + mdw.setMaxSizeInBytes(clientMaxSize); + mdw.setRandomIOExceptionRate(clientExRate); + mdw.setCheckIndexOnClose(false); + clientMaxSize *= 2; + clientExRate /= 2; + return mdw; + } + + if (failures.get() > 0 && random().nextBoolean()) { // handler should fail + if (random().nextBoolean()) { // index dir fail + handlerIndexDir.setMaxSizeInBytes(handlerIndexMaxSize); + handlerIndexDir.setRandomIOExceptionRate(handlerIndexExRate); + handlerIndexDir.setRandomIOExceptionRateOnOpen(handlerIndexExRate); + handlerIndexMaxSize *= 2; + handlerIndexExRate /= 2; + } else { // taxo dir fail + handlerTaxoDir.setMaxSizeInBytes(handlerTaxoMaxSize); + handlerTaxoDir.setRandomIOExceptionRate(handlerTaxoExRate); + handlerTaxoDir.setRandomIOExceptionRateOnOpen(handlerTaxoExRate); + handlerTaxoDir.setCheckIndexOnClose(false); + handlerTaxoMaxSize *= 2; + handlerTaxoExRate /= 2; + } + } else { + // disable all errors + handlerIndexDir.setMaxSizeInBytes(0); + handlerIndexDir.setRandomIOExceptionRate(0.0); + handlerIndexDir.setRandomIOExceptionRateOnOpen(0.0); + handlerTaxoDir.setMaxSizeInBytes(0); + handlerTaxoDir.setRandomIOExceptionRate(0.0); + handlerTaxoDir.setRandomIOExceptionRateOnOpen(0.0); + } + + return dir; + } + }; + + handler = + new IndexAndTaxonomyReplicationHandler( + handlerIndexDir, + handlerTaxoDir, + new Callable() { + @Override + public Boolean call() throws Exception { + if (random().nextDouble() < 0.2 && failures.get() > 0) { + throw new RuntimeException("random exception from callback"); + } + return null; + } + }); final AtomicBoolean failed = new AtomicBoolean(); // wrap handleUpdateException so we can act on the thrown exception - client = new ReplicationClient(replicator, handler, sourceDirFactory) { - @SuppressWarnings("synthetic-access") - @Override - protected void handleUpdateException(Throwable t) { - if (t instanceof IOException) { - try { - if (VERBOSE) { - System.out.println("hit exception during update: " + t); - t.printStackTrace(System.out); - } - - // test that the index can be read and also some basic statistics - DirectoryReader reader = DirectoryReader.open(handlerIndexDir.getDelegate()); - try { - int numDocs = reader.numDocs(); - int version = Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16); - assertEquals(numDocs, version); - } finally { - reader.close(); - } - // verify index is fully consistent - TestUtil.checkIndex(handlerIndexDir.getDelegate()); - - // verify taxonomy index is fully consistent (since we only add one - // category to all documents, there's nothing much more to validate. - ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); - CheckIndex.Status indexStatus = null; - - try (CheckIndex checker = new CheckIndex(handlerTaxoDir.getDelegate())) { - checker.setFailFast(true); - checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false); + client = + new ReplicationClient(replicator, handler, sourceDirFactory) { + @SuppressWarnings("synthetic-access") + @Override + protected void handleUpdateException(Throwable t) { + if (t instanceof IOException) { try { - indexStatus = checker.checkIndex(null); - } catch (IOException | RuntimeException ioe) { - // ok: we fallback below - } - } + if (VERBOSE) { + System.out.println("hit exception during update: " + t); + t.printStackTrace(System.out); + } - } catch (IOException e) { - failed.set(true); - throw new RuntimeException(e); - } catch (RuntimeException e) { - failed.set(true); - throw e; - } finally { - // count-down number of failures - failures.decrementAndGet(); - assert failures.get() >= 0 : "handler failed too many times: " + failures.get(); - if (VERBOSE) { - if (failures.get() == 0) { - System.out.println("no more failures expected"); - } else { - System.out.println("num failures left: " + failures.get()); + // test that the index can be read and also some basic statistics + DirectoryReader reader = DirectoryReader.open(handlerIndexDir.getDelegate()); + try { + int numDocs = reader.numDocs(); + int version = + Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16); + assertEquals(numDocs, version); + } finally { + reader.close(); + } + // verify index is fully consistent + TestUtil.checkIndex(handlerIndexDir.getDelegate()); + + // verify taxonomy index is fully consistent (since we only add one + // category to all documents, there's nothing much more to validate. + ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); + CheckIndex.Status indexStatus = null; + + try (CheckIndex checker = new CheckIndex(handlerTaxoDir.getDelegate())) { + checker.setFailFast(true); + checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false); + try { + indexStatus = checker.checkIndex(null); + } catch (IOException | RuntimeException ioe) { + // ok: we fallback below + } + } + + } catch (IOException e) { + failed.set(true); + throw new RuntimeException(e); + } catch (RuntimeException e) { + failed.set(true); + throw e; + } finally { + // count-down number of failures + failures.decrementAndGet(); + assert failures.get() >= 0 : "handler failed too many times: " + failures.get(); + if (VERBOSE) { + if (failures.get() == 0) { + System.out.println("no more failures expected"); + } else { + System.out.println("num failures left: " + failures.get()); + } + } } + } else { + failed.set(true); + if (t instanceof RuntimeException) { + throw (RuntimeException) t; + } + throw new RuntimeException(t); } } - } else { - failed.set(true); - if (t instanceof RuntimeException) { - throw (RuntimeException) t; - } - throw new RuntimeException(t); - } - } - }; - + }; + client.startUpdateThread(10, "indexAndTaxo"); - + final Directory baseHandlerIndexDir = handlerIndexDir.getDelegate(); int numRevisions = atLeast(20) + 2; for (int i = 2; i < numRevisions && failed.get() == false; i++) { @@ -448,7 +469,7 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { } // disable errors -- maybe randomness didn't exhaust all allowed failures, - // and we don't want e.g. CheckIndex to hit false errors. + // and we don't want e.g. CheckIndex to hit false errors. handlerIndexDir.setMaxSizeInBytes(0); handlerIndexDir.setRandomIOExceptionRate(0.0); handlerIndexDir.setRandomIOExceptionRateOnOpen(0.0); @@ -456,5 +477,4 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase { handlerTaxoDir.setRandomIOExceptionRate(0.0); handlerTaxoDir.setRandomIOExceptionRateOnOpen(0.0); } - } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyRevision.java b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyRevision.java index 78c74135d46..d57d30a2e42 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyRevision.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyRevision.java @@ -21,7 +21,6 @@ import java.io.InputStream; import java.util.List; import java.util.Map; import java.util.Map.Entry; - import org.apache.lucene.document.Document; import org.apache.lucene.facet.FacetField; import org.apache.lucene.facet.FacetsConfig; @@ -38,39 +37,41 @@ import org.apache.lucene.util.IOUtils; import org.junit.Test; public class TestIndexAndTaxonomyRevision extends ReplicatorTestCase { - + private Document newDocument(TaxonomyWriter taxoWriter) throws IOException { FacetsConfig config = new FacetsConfig(); Document doc = new Document(); doc.add(new FacetField("A", "1")); return config.build(taxoWriter, doc); } - + @Test public void testNoCommit() throws Exception { Directory indexDir = newDirectory(); IndexWriterConfig conf = new IndexWriterConfig(null); conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); IndexWriter indexWriter = new IndexWriter(indexDir, conf); - + Directory taxoDir = newDirectory(); SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir); // should fail when there are no commits to snapshot - expectThrows(IllegalStateException.class, () -> { - new IndexAndTaxonomyRevision(indexWriter, taxoWriter); - }); + expectThrows( + IllegalStateException.class, + () -> { + new IndexAndTaxonomyRevision(indexWriter, taxoWriter); + }); indexWriter.close(); IOUtils.close(taxoWriter, taxoDir, indexDir); } - + @Test public void testRevisionRelease() throws Exception { Directory indexDir = newDirectory(); IndexWriterConfig conf = new IndexWriterConfig(null); conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); IndexWriter indexWriter = new IndexWriter(indexDir, conf); - + Directory taxoDir = newDirectory(); SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir); try { @@ -82,8 +83,10 @@ public class TestIndexAndTaxonomyRevision extends ReplicatorTestCase { rev1.release(); assertTrue(slowFileExists(indexDir, IndexFileNames.SEGMENTS + "_1")); assertTrue(slowFileExists(taxoDir, IndexFileNames.SEGMENTS + "_1")); - - rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter); // create revision again, so the files are snapshotted + + rev1 = + new IndexAndTaxonomyRevision( + indexWriter, taxoWriter); // create revision again, so the files are snapshotted indexWriter.addDocument(newDocument(taxoWriter)); indexWriter.commit(); taxoWriter.commit(); @@ -95,14 +98,14 @@ public class TestIndexAndTaxonomyRevision extends ReplicatorTestCase { IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir); } } - + @Test public void testSegmentsFileLast() throws Exception { Directory indexDir = newDirectory(); IndexWriterConfig conf = new IndexWriterConfig(null); conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); IndexWriter indexWriter = new IndexWriter(indexDir, conf); - + Directory taxoDir = newDirectory(); SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir); try { @@ -110,7 +113,7 @@ public class TestIndexAndTaxonomyRevision extends ReplicatorTestCase { indexWriter.commit(); taxoWriter.commit(); Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter); - Map> sourceFiles = rev.getSourceFiles(); + Map> sourceFiles = rev.getSourceFiles(); assertEquals(2, sourceFiles.size()); for (List files : sourceFiles.values()) { String lastFile = files.get(files.size() - 1).fileName; @@ -121,14 +124,14 @@ public class TestIndexAndTaxonomyRevision extends ReplicatorTestCase { IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir); } } - + @Test public void testOpen() throws Exception { Directory indexDir = newDirectory(); IndexWriterConfig conf = new IndexWriterConfig(null); conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); IndexWriter indexWriter = new IndexWriter(indexDir, conf); - + Directory taxoDir = newDirectory(); SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir); try { @@ -136,7 +139,7 @@ public class TestIndexAndTaxonomyRevision extends ReplicatorTestCase { indexWriter.commit(); taxoWriter.commit(); Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter); - for (Entry> e : rev.getSourceFiles().entrySet()) { + for (Entry> e : rev.getSourceFiles().entrySet()) { String source = e.getKey(); @SuppressWarnings("resource") // silly, both directories are closed in the end Directory dir = source.equals(IndexAndTaxonomyRevision.INDEX_SOURCE) ? indexDir : taxoDir; @@ -167,5 +170,4 @@ public class TestIndexAndTaxonomyRevision extends ReplicatorTestCase { IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir); } } - } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexReplicationClient.java b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexReplicationClient.java index 129af31fb72..1c8b8208996 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexReplicationClient.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexReplicationClient.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -39,13 +38,13 @@ import org.junit.Before; import org.junit.Test; public class TestIndexReplicationClient extends ReplicatorTestCase { - + private static class IndexReadyCallback implements Callable, Closeable { - + private final Directory indexDir; - private DirectoryReader reader; + private DirectoryReader reader; private long lastGeneration = -1; - + public IndexReadyCallback(Directory indexDir) throws IOException { this.indexDir = indexDir; if (DirectoryReader.indexExists(indexDir)) { @@ -53,7 +52,7 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { lastGeneration = reader.getIndexCommit().getGeneration(); } } - + @Override public Boolean call() throws Exception { if (reader == null) { @@ -61,9 +60,12 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { lastGeneration = reader.getIndexCommit().getGeneration(); } else { DirectoryReader newReader = DirectoryReader.openIfChanged(reader); - assertNotNull("should not have reached here if no changes were made to the index", newReader); + assertNotNull( + "should not have reached here if no changes were made to the index", newReader); long newGeneration = newReader.getIndexCommit().getGeneration(); - assertTrue("expected newer generation; current=" + lastGeneration + " new=" + newGeneration, newGeneration > lastGeneration); + assertTrue( + "expected newer generation; current=" + lastGeneration + " new=" + newGeneration, + newGeneration > lastGeneration); reader.close(); reader = newReader; lastGeneration = newGeneration; @@ -71,13 +73,13 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { } return null; } - + @Override public void close() throws IOException { IOUtils.close(reader); } } - + private MockDirectoryWrapper publishDir, handlerDir; private Replicator replicator; private SourceDirectoryFactory sourceDirFactory; @@ -85,9 +87,9 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { private ReplicationHandler handler; private IndexWriter publishWriter; private IndexReadyCallback callback; - + private static final String VERSION_ID = "version"; - + private void assertHandlerRevision(int expectedID, Directory dir) throws IOException { // loop as long as client is alive. test-framework will terminate us if // there's a serious bug, e.g. client doesn't really update. otherwise, @@ -103,11 +105,18 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { try { DirectoryReader reader = DirectoryReader.open(dir); try { - int handlerID = Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16); + int handlerID = + Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16); if (expectedID == handlerID) { return; } else if (VERBOSE) { - System.out.println("expectedID=" + expectedID + " actual=" + handlerID + " generation=" + reader.getIndexCommit().getGeneration()); + System.out.println( + "expectedID=" + + expectedID + + " actual=" + + handlerID + + " generation=" + + reader.getIndexCommit().getGeneration()); } } finally { reader.close(); @@ -119,16 +128,19 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { } } } - + private Revision createRevision(final int id) throws IOException { publishWriter.addDocument(new Document()); - publishWriter.setLiveCommitData(new HashMap() {{ - put(VERSION_ID, Integer.toString(id, 16)); - }}.entrySet()); + publishWriter.setLiveCommitData( + new HashMap() { + { + put(VERSION_ID, Integer.toString(id, 16)); + } + }.entrySet()); publishWriter.commit(); return new IndexRevision(publishWriter); } - + @Override @Before public void setUp() throws Exception { @@ -140,12 +152,12 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { callback = new IndexReadyCallback(handlerDir); handler = new IndexReplicationHandler(handlerDir, callback); client = new ReplicationClient(replicator, handler, sourceDirFactory); - + IndexWriterConfig conf = newIndexWriterConfig(null); conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); publishWriter = new IndexWriter(publishDir, conf); } - + @After @Override public void tearDown() throws Exception { @@ -153,52 +165,52 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { IOUtils.close(client, callback, replicator, publishDir, handlerDir); super.tearDown(); } - + @Test public void testNoUpdateThread() throws Exception { assertNull("no version expected at start", handler.currentVersion()); - + // Callback validates the replicated index replicator.publish(createRevision(1)); client.updateNow(); - + replicator.publish(createRevision(2)); client.updateNow(); - + // Publish two revisions without update, handler should be upgraded to latest replicator.publish(createRevision(3)); replicator.publish(createRevision(4)); client.updateNow(); } - + @Test public void testUpdateThread() throws Exception { client.startUpdateThread(10, "index"); - + replicator.publish(createRevision(1)); assertHandlerRevision(1, handlerDir); - + replicator.publish(createRevision(2)); assertHandlerRevision(2, handlerDir); - + // Publish two revisions without update, handler should be upgraded to latest replicator.publish(createRevision(3)); replicator.publish(createRevision(4)); assertHandlerRevision(4, handlerDir); } - + @Test public void testRestart() throws Exception { replicator.publish(createRevision(1)); client.updateNow(); - + replicator.publish(createRevision(2)); client.updateNow(); - + client.stopUpdateThread(); client.close(); client = new ReplicationClient(replicator, handler, sourceDirFactory); - + // Publish two revisions without update, handler should be upgraded to latest replicator.publish(createRevision(3)); replicator.publish(createRevision(4)); @@ -218,105 +230,112 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { client.updateNow(); client.close(); callback.close(); - + // wrap sourceDirFactory to return a MockDirWrapper so we can simulate errors final SourceDirectoryFactory in = sourceDirFactory; final AtomicInteger failures = new AtomicInteger(atLeast(10)); - sourceDirFactory = new SourceDirectoryFactory() { - - private long clientMaxSize = 100, handlerMaxSize = 100; - private double clientExRate = 1.0, handlerExRate = 1.0; - - @Override - public void cleanupSession(String sessionID) throws IOException { - in.cleanupSession(sessionID); - } - - @SuppressWarnings("synthetic-access") - @Override - public Directory getDirectory(String sessionID, String source) throws IOException { - Directory dir = in.getDirectory(sessionID, source); - if (random().nextBoolean() && failures.get() > 0) { // client should fail, return wrapped dir - MockDirectoryWrapper mdw = new MockDirectoryWrapper(random(), dir); - mdw.setRandomIOExceptionRateOnOpen(clientExRate); - mdw.setMaxSizeInBytes(clientMaxSize); - mdw.setRandomIOExceptionRate(clientExRate); - mdw.setCheckIndexOnClose(false); - clientMaxSize *= 2; - clientExRate /= 2; - return mdw; - } + sourceDirFactory = + new SourceDirectoryFactory() { - if (failures.get() > 0 && random().nextBoolean()) { // handler should fail - handlerDir.setMaxSizeInBytes(handlerMaxSize); - handlerDir.setRandomIOExceptionRateOnOpen(handlerExRate); - handlerDir.setRandomIOExceptionRate(handlerExRate); - handlerMaxSize *= 2; - handlerExRate /= 2; - } else { - // disable errors - handlerDir.setMaxSizeInBytes(0); - handlerDir.setRandomIOExceptionRate(0.0); - handlerDir.setRandomIOExceptionRateOnOpen(0.0); - } - return dir; - } - }; - - handler = new IndexReplicationHandler(handlerDir, new Callable() { - @Override - public Boolean call() throws Exception { - if (random().nextDouble() < 0.2 && failures.get() > 0) { - throw new RuntimeException("random exception from callback"); - } - return null; - } - }); - - // wrap handleUpdateException so we can act on the thrown exception - client = new ReplicationClient(replicator, handler, sourceDirFactory) { - @SuppressWarnings("synthetic-access") - @Override - protected void handleUpdateException(Throwable t) { - if (t instanceof IOException) { - if (VERBOSE) { - System.out.println("hit exception during update: " + t); - t.printStackTrace(System.out); + private long clientMaxSize = 100, handlerMaxSize = 100; + private double clientExRate = 1.0, handlerExRate = 1.0; + + @Override + public void cleanupSession(String sessionID) throws IOException { + in.cleanupSession(sessionID); } - try { - // test that the index can be read and also some basic statistics - DirectoryReader reader = DirectoryReader.open(handlerDir.getDelegate()); - try { - int numDocs = reader.numDocs(); - int version = Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16); - assertEquals(numDocs, version); - } finally { - reader.close(); + + @SuppressWarnings("synthetic-access") + @Override + public Directory getDirectory(String sessionID, String source) throws IOException { + Directory dir = in.getDirectory(sessionID, source); + if (random().nextBoolean() + && failures.get() > 0) { // client should fail, return wrapped dir + MockDirectoryWrapper mdw = new MockDirectoryWrapper(random(), dir); + mdw.setRandomIOExceptionRateOnOpen(clientExRate); + mdw.setMaxSizeInBytes(clientMaxSize); + mdw.setRandomIOExceptionRate(clientExRate); + mdw.setCheckIndexOnClose(false); + clientMaxSize *= 2; + clientExRate /= 2; + return mdw; } - // verify index consistency - TestUtil.checkIndex(handlerDir.getDelegate()); - } catch (IOException e) { - // exceptions here are bad, don't ignore them - throw new RuntimeException(e); - } finally { - // count-down number of failures - failures.decrementAndGet(); - assert failures.get() >= 0 : "handler failed too many times: " + failures.get(); - if (VERBOSE) { - if (failures.get() == 0) { - System.out.println("no more failures expected"); - } else { - System.out.println("num failures left: " + failures.get()); + + if (failures.get() > 0 && random().nextBoolean()) { // handler should fail + handlerDir.setMaxSizeInBytes(handlerMaxSize); + handlerDir.setRandomIOExceptionRateOnOpen(handlerExRate); + handlerDir.setRandomIOExceptionRate(handlerExRate); + handlerMaxSize *= 2; + handlerExRate /= 2; + } else { + // disable errors + handlerDir.setMaxSizeInBytes(0); + handlerDir.setRandomIOExceptionRate(0.0); + handlerDir.setRandomIOExceptionRateOnOpen(0.0); + } + return dir; + } + }; + + handler = + new IndexReplicationHandler( + handlerDir, + new Callable() { + @Override + public Boolean call() throws Exception { + if (random().nextDouble() < 0.2 && failures.get() > 0) { + throw new RuntimeException("random exception from callback"); + } + return null; } + }); + + // wrap handleUpdateException so we can act on the thrown exception + client = + new ReplicationClient(replicator, handler, sourceDirFactory) { + @SuppressWarnings("synthetic-access") + @Override + protected void handleUpdateException(Throwable t) { + if (t instanceof IOException) { + if (VERBOSE) { + System.out.println("hit exception during update: " + t); + t.printStackTrace(System.out); + } + try { + // test that the index can be read and also some basic statistics + DirectoryReader reader = DirectoryReader.open(handlerDir.getDelegate()); + try { + int numDocs = reader.numDocs(); + int version = + Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16); + assertEquals(numDocs, version); + } finally { + reader.close(); + } + // verify index consistency + TestUtil.checkIndex(handlerDir.getDelegate()); + } catch (IOException e) { + // exceptions here are bad, don't ignore them + throw new RuntimeException(e); + } finally { + // count-down number of failures + failures.decrementAndGet(); + assert failures.get() >= 0 : "handler failed too many times: " + failures.get(); + if (VERBOSE) { + if (failures.get() == 0) { + System.out.println("no more failures expected"); + } else { + System.out.println("num failures left: " + failures.get()); + } + } + } + } else { + if (t instanceof RuntimeException) throw (RuntimeException) t; + throw new RuntimeException(t); } } - } else { - if (t instanceof RuntimeException) throw (RuntimeException) t; - throw new RuntimeException(t); - } - } - }; - + }; + client.startUpdateThread(10, "index"); final Directory baseHandlerDir = handlerDir.getDelegate(); @@ -325,12 +344,11 @@ public class TestIndexReplicationClient extends ReplicatorTestCase { replicator.publish(createRevision(i)); assertHandlerRevision(i, baseHandlerDir); } - + // disable errors -- maybe randomness didn't exhaust all allowed failures, - // and we don't want e.g. CheckIndex to hit false errors. + // and we don't want e.g. CheckIndex to hit false errors. handlerDir.setMaxSizeInBytes(0); handlerDir.setRandomIOExceptionRate(0.0); handlerDir.setRandomIOExceptionRateOnOpen(0.0); } - } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexRevision.java b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexRevision.java index 993a7edb311..9d67e7eaf75 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexRevision.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexRevision.java @@ -19,7 +19,6 @@ package org.apache.lucene.replicator; import java.io.InputStream; import java.util.List; import java.util.Map; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexWriter; @@ -33,7 +32,7 @@ import org.apache.lucene.util.IOUtils; import org.junit.Test; public class TestIndexRevision extends ReplicatorTestCase { - + @Test public void testNoSnapshotDeletionPolicy() throws Exception { Directory dir = newDirectory(); @@ -41,14 +40,16 @@ public class TestIndexRevision extends ReplicatorTestCase { conf.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); IndexWriter writer = new IndexWriter(dir, conf); // should fail when IndexDeletionPolicy is not Snapshot - expectThrows(IllegalArgumentException.class, () -> { - new IndexRevision(writer); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + new IndexRevision(writer); + }); writer.close(); IOUtils.close(dir); } - + @Test public void testNoCommit() throws Exception { Directory dir = newDirectory(); @@ -56,14 +57,16 @@ public class TestIndexRevision extends ReplicatorTestCase { conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); IndexWriter writer = new IndexWriter(dir, conf); // should fail when there are no commits to snapshot" - expectThrows(IllegalStateException.class, () -> { - new IndexRevision(writer); - }); + expectThrows( + IllegalStateException.class, + () -> { + new IndexRevision(writer); + }); writer.close(); IOUtils.close(dir); } - + @Test public void testRevisionRelease() throws Exception { Directory dir = newDirectory(); @@ -77,7 +80,7 @@ public class TestIndexRevision extends ReplicatorTestCase { // releasing that revision should not delete the files rev1.release(); assertTrue(slowFileExists(dir, IndexFileNames.SEGMENTS + "_1")); - + rev1 = new IndexRevision(writer); // create revision again, so the files are snapshotted writer.addDocument(new Document()); writer.commit(); @@ -88,7 +91,7 @@ public class TestIndexRevision extends ReplicatorTestCase { IOUtils.close(writer, dir); } } - + @Test public void testSegmentsFileLast() throws Exception { Directory dir = newDirectory(); @@ -110,7 +113,7 @@ public class TestIndexRevision extends ReplicatorTestCase { IOUtils.close(dir); } } - + @Test public void testOpen() throws Exception { Directory dir = newDirectory(); @@ -150,5 +153,4 @@ public class TestIndexRevision extends ReplicatorTestCase { IOUtils.close(dir); } } - } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/TestLocalReplicator.java b/lucene/replicator/src/test/org/apache/lucene/replicator/TestLocalReplicator.java index 3b113f705f6..042ff934e32 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/TestLocalReplicator.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/TestLocalReplicator.java @@ -23,7 +23,6 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map.Entry; - import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexFileNames; @@ -38,13 +37,13 @@ import org.junit.Before; import org.junit.Test; public class TestLocalReplicator extends ReplicatorTestCase { - + private static final String VERSION_ID = "version"; - + private LocalReplicator replicator; private Directory sourceDir; private IndexWriter sourceWriter; - + @Before @Override public void setUp() throws Exception { @@ -55,7 +54,7 @@ public class TestLocalReplicator extends ReplicatorTestCase { sourceWriter = new IndexWriter(sourceDir, conf); replicator = new LocalReplicator(); } - + @After @Override public void tearDown() throws Exception { @@ -63,50 +62,59 @@ public class TestLocalReplicator extends ReplicatorTestCase { IOUtils.close(replicator, sourceDir); super.tearDown(); } - + private Revision createRevision(final int id) throws IOException { sourceWriter.addDocument(new Document()); - sourceWriter.setLiveCommitData(new HashMap() {{ - put(VERSION_ID, Integer.toString(id, 16)); - }}.entrySet()); + sourceWriter.setLiveCommitData( + new HashMap() { + { + put(VERSION_ID, Integer.toString(id, 16)); + } + }.entrySet()); sourceWriter.commit(); return new IndexRevision(sourceWriter); } - + @Test public void testCheckForUpdateNoRevisions() throws Exception { assertNull(replicator.checkForUpdate(null)); } - + @Test public void testObtainFileAlreadyClosed() throws IOException { replicator.publish(createRevision(1)); SessionToken res = replicator.checkForUpdate(null); assertNotNull(res); assertEquals(1, res.sourceFiles.size()); - Entry> entry = res.sourceFiles.entrySet().iterator().next(); + Entry> entry = res.sourceFiles.entrySet().iterator().next(); replicator.close(); - expectThrows(AlreadyClosedException.class, () -> { - replicator.obtainFile(res.id, entry.getKey(), entry.getValue().get(0).fileName); - }); + expectThrows( + AlreadyClosedException.class, + () -> { + replicator.obtainFile(res.id, entry.getKey(), entry.getValue().get(0).fileName); + }); } - + @Test public void testPublishAlreadyClosed() throws IOException { replicator.close(); - expectThrows(AlreadyClosedException.class, () -> { - replicator.publish(createRevision(2)); - }); + expectThrows( + AlreadyClosedException.class, + () -> { + replicator.publish(createRevision(2)); + }); } - + @Test public void testUpdateAlreadyClosed() throws IOException { replicator.close(); - expectThrows(AlreadyClosedException.class, () -> { - replicator.checkForUpdate(null); - }); + expectThrows( + AlreadyClosedException.class, + () -> { + replicator.checkForUpdate(null); + }); } - + @Test public void testPublishSameRevision() throws IOException { Revision rev = createRevision(1); @@ -118,35 +126,39 @@ public class TestLocalReplicator extends ReplicatorTestCase { replicator.publish(new IndexRevision(sourceWriter)); res = replicator.checkForUpdate(res.version); assertNull(res); - + // now make sure that publishing same revision doesn't leave revisions // "locked", i.e. that replicator releases revisions even when they are not // kept replicator.publish(createRevision(2)); assertEquals(1, DirectoryReader.listCommits(sourceDir).size()); } - + @Test public void testPublishOlderRev() throws IOException { replicator.publish(createRevision(1)); Revision old = new IndexRevision(sourceWriter); replicator.publish(createRevision(2)); // should fail to publish an older revision - expectThrows(IllegalArgumentException.class, () -> { - replicator.publish(old); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + replicator.publish(old); + }); assertEquals(1, DirectoryReader.listCommits(sourceDir).size()); } - + @Test public void testObtainMissingFile() throws IOException { replicator.publish(createRevision(1)); SessionToken res = replicator.checkForUpdate(null); - expectThrowsAnyOf(Arrays.asList(FileNotFoundException.class, NoSuchFileException.class), () -> { - replicator.obtainFile(res.id, res.sourceFiles.keySet().iterator().next(), "madeUpFile"); - }); + expectThrowsAnyOf( + Arrays.asList(FileNotFoundException.class, NoSuchFileException.class), + () -> { + replicator.obtainFile(res.id, res.sourceFiles.keySet().iterator().next(), "madeUpFile"); + }); } - + @Test public void testSessionExpiration() throws IOException, InterruptedException { replicator.publish(createRevision(1)); @@ -154,11 +166,16 @@ public class TestLocalReplicator extends ReplicatorTestCase { replicator.setExpirationThreshold(5); // expire quickly Thread.sleep(50); // sufficient for expiration // should fail to obtain a file for an expired session - expectThrows(SessionExpiredException.class, () -> { - replicator.obtainFile(session.id, session.sourceFiles.keySet().iterator().next(), session.sourceFiles.values().iterator().next().get(0).fileName); - }); + expectThrows( + SessionExpiredException.class, + () -> { + replicator.obtainFile( + session.id, + session.sourceFiles.keySet().iterator().next(), + session.sourceFiles.values().iterator().next().get(0).fileName); + }); } - + @Test public void testUpdateToLatest() throws IOException { replicator.publish(createRevision(1)); @@ -168,7 +185,7 @@ public class TestLocalReplicator extends ReplicatorTestCase { assertNotNull(res); assertEquals(0, rev.compareTo(res.version)); } - + @Test public void testRevisionRelease() throws Exception { replicator.publish(createRevision(1)); @@ -176,7 +193,8 @@ public class TestLocalReplicator extends ReplicatorTestCase { replicator.publish(createRevision(2)); // now the files of revision 1 can be deleted assertTrue(slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_2")); - assertFalse("segments_1 should not be found in index directory after revision is released", slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_1")); + assertFalse( + "segments_1 should not be found in index directory after revision is released", + slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_1")); } - } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/TestSessionToken.java b/lucene/replicator/src/test/org/apache/lucene/replicator/TestSessionToken.java index ed2643a39dd..f6cdb509f1a 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/TestSessionToken.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/TestSessionToken.java @@ -22,7 +22,6 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.util.List; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -32,7 +31,7 @@ import org.apache.lucene.util.IOUtils; import org.junit.Test; public class TestSessionToken extends ReplicatorTestCase { - + @Test public void testSerialization() throws IOException { Directory dir = newDirectory(); @@ -42,7 +41,7 @@ public class TestSessionToken extends ReplicatorTestCase { writer.addDocument(new Document()); writer.commit(); Revision rev = new IndexRevision(writer); - + SessionToken session1 = new SessionToken("17", rev); ByteArrayOutputStream baos = new ByteArrayOutputStream(); session1.serialize(new DataOutputStream(baos)); @@ -60,5 +59,4 @@ public class TestSessionToken extends ReplicatorTestCase { writer.close(); IOUtils.close(dir); } - } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/http/ReplicationServlet.java b/lucene/replicator/src/test/org/apache/lucene/replicator/http/ReplicationServlet.java index ccafc2a5268..99c1de32e46 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/http/ReplicationServlet.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/http/ReplicationServlet.java @@ -17,23 +17,23 @@ package org.apache.lucene.replicator.http; import java.io.IOException; - import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; public class ReplicationServlet extends HttpServlet { - + private final ReplicationService service; private boolean respondWithError = false; - + public ReplicationServlet(ReplicationService service) { this.service = service; } - + @Override - protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { + protected void doGet(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { if (respondWithError) { resp.sendError(500, "Fake error"); } else { @@ -44,5 +44,4 @@ public class ReplicationServlet extends HttpServlet { public void setRespondWithError(boolean respondWithError) { this.respondWithError = respondWithError; } - } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/http/TestHttpReplicator.java b/lucene/replicator/src/test/org/apache/lucene/replicator/http/TestHttpReplicator.java index 6f65552e111..d5b3ab81bab 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/http/TestHttpReplicator.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/http/TestHttpReplicator.java @@ -19,7 +19,6 @@ package org.apache.lucene.replicator.http; import java.io.IOException; import java.nio.file.Path; import java.util.Collections; - import org.apache.http.impl.conn.BasicHttpClientConnectionManager; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -51,18 +50,20 @@ public class TestHttpReplicator extends ReplicatorTestCase { private String host; private Directory serverIndexDir, handlerIndexDir; private ReplicationServlet replicationServlet; - + private void startServer() throws Exception { ServletHandler replicationHandler = new ServletHandler(); - ReplicationService service = new ReplicationService(Collections.singletonMap("s1", serverReplicator)); + ReplicationService service = + new ReplicationService(Collections.singletonMap("s1", serverReplicator)); replicationServlet = new ReplicationServlet(service); ServletHolder servlet = new ServletHolder(replicationServlet); - replicationHandler.addServletWithMapping(servlet, ReplicationService.REPLICATION_CONTEXT + "/*"); + replicationHandler.addServletWithMapping( + servlet, ReplicationService.REPLICATION_CONTEXT + "/*"); server = newHttpServer(replicationHandler); port = serverPort(server); host = serverHost(server); } - + @Before @Override public void setUp() throws Exception { @@ -75,13 +76,13 @@ public class TestHttpReplicator extends ReplicatorTestCase { serverIndexDir = newDirectory(); serverReplicator = new LocalReplicator(); startServer(); - + IndexWriterConfig conf = newIndexWriterConfig(null); conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); writer = new IndexWriter(serverIndexDir, conf); reader = DirectoryReader.open(writer); } - + @Override public void tearDown() throws Exception { stopHttpServer(server); @@ -89,7 +90,7 @@ public class TestHttpReplicator extends ReplicatorTestCase { IOUtils.close(reader, handlerIndexDir, serverIndexDir); super.tearDown(); } - + private void publishRevision(int id) throws IOException { Document doc = new Document(); writer.addDocument(doc); @@ -97,54 +98,65 @@ public class TestHttpReplicator extends ReplicatorTestCase { writer.commit(); serverReplicator.publish(new IndexRevision(writer)); } - + private void reopenReader() throws IOException { DirectoryReader newReader = DirectoryReader.openIfChanged(reader); assertNotNull(newReader); reader.close(); reader = newReader; } - + @Test public void testBasic() throws Exception { - Replicator replicator = new HttpReplicator(host, port, ReplicationService.REPLICATION_CONTEXT + "/s1", - getClientConnectionManager()); - ReplicationClient client = new ReplicationClient(replicator, new IndexReplicationHandler(handlerIndexDir, null), - new PerSessionDirectoryFactory(clientWorkDir)); - + Replicator replicator = + new HttpReplicator( + host, + port, + ReplicationService.REPLICATION_CONTEXT + "/s1", + getClientConnectionManager()); + ReplicationClient client = + new ReplicationClient( + replicator, + new IndexReplicationHandler(handlerIndexDir, null), + new PerSessionDirectoryFactory(clientWorkDir)); + publishRevision(1); client.updateNow(); reopenReader(); assertEquals(1, Integer.parseInt(reader.getIndexCommit().getUserData().get("ID"), 16)); - + publishRevision(2); client.updateNow(); reopenReader(); assertEquals(2, Integer.parseInt(reader.getIndexCommit().getUserData().get("ID"), 16)); - + client.close(); } - - @Test + + @Test public void testServerErrors() throws Exception { // tests the behaviour of the client when the server sends an error // must use BasicClientConnectionManager to test whether the client is closed correctly BasicHttpClientConnectionManager conMgr = new BasicHttpClientConnectionManager(); - Replicator replicator = new HttpReplicator(host, port, ReplicationService.REPLICATION_CONTEXT + "/s1", conMgr); - ReplicationClient client = new ReplicationClient(replicator, new IndexReplicationHandler(handlerIndexDir, null), - new PerSessionDirectoryFactory(clientWorkDir)); - + Replicator replicator = + new HttpReplicator(host, port, ReplicationService.REPLICATION_CONTEXT + "/s1", conMgr); + ReplicationClient client = + new ReplicationClient( + replicator, + new IndexReplicationHandler(handlerIndexDir, null), + new PerSessionDirectoryFactory(clientWorkDir)); + try { publishRevision(5); replicationServlet.setRespondWithError(true); expectThrows(Exception.class, client::updateNow); - + replicationServlet.setRespondWithError(false); client.updateNow(); // now it should work reopenReader(); assertEquals(5, Integer.parseInt(reader.getIndexCommit().getUserData().get("ID"), 16)); - + client.close(); } finally { replicationServlet.setRespondWithError(false); diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/Jobs.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/Jobs.java index 87feb33119a..75ac1792132 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/Jobs.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/Jobs.java @@ -20,12 +20,12 @@ package org.apache.lucene.replicator.nrt; import java.io.Closeable; import java.io.IOException; import java.util.PriorityQueue; - import org.apache.lucene.store.AlreadyClosedException; -/** Runs CopyJob(s) in background thread; each ReplicaNode has an instance of this - * running. At a given there could be one NRT copy job running, and multiple - * pre-warm merged segments jobs. */ +/** + * Runs CopyJob(s) in background thread; each ReplicaNode has an instance of this running. At a + * given there could be one NRT copy job running, and multiple pre-warm merged segments jobs. + */ class Jobs extends Thread implements Closeable { private final PriorityQueue queue = new PriorityQueue<>(); @@ -38,7 +38,10 @@ class Jobs extends Thread implements Closeable { private boolean finish; - /** Returns null if we are closing, else, returns the top job or waits for one to arrive if the queue is empty. */ + /** + * Returns null if we are closing, else, returns the top job or waits for one to arrive if the + * queue is empty. + */ private synchronized SimpleCopyJob getNextJob() { while (true) { if (finish) { @@ -111,7 +114,7 @@ class Jobs extends Thread implements Closeable { node.message("top: jobs now exit run thread"); - synchronized(this) { + synchronized (this) { // Gracefully cancel any jobs we didn't finish: while (queue.isEmpty() == false) { SimpleCopyJob job = (SimpleCopyJob) queue.poll(); @@ -145,7 +148,8 @@ class Jobs extends Thread implements Closeable { public synchronized void cancelConflictingJobs(CopyJob newJob) throws IOException { for (CopyJob job : queue) { if (job.conflicts(newJob)) { - node.message("top: now cancel existing conflicting job=" + job + " due to newJob=" + newJob); + node.message( + "top: now cancel existing conflicting job=" + job + " due to newJob=" + newJob); job.cancel("conflicts with new job", null); } } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/NodeProcess.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/NodeProcess.java index daeffc79f1c..0392fa4669c 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/NodeProcess.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/NodeProcess.java @@ -21,10 +21,12 @@ import java.io.Closeable; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantLock; - import org.apache.lucene.document.Document; -/** Parent JVM hold this "wrapper" to refer to each child JVM. This is roughly equivalent e.g. to a client-side "sugar" API. */ +/** + * Parent JVM hold this "wrapper" to refer to each child JVM. This is roughly equivalent e.g. to a + * client-side "sugar" API. + */ class NodeProcess implements Closeable { final Process p; @@ -50,7 +52,15 @@ class NodeProcess implements Closeable { final AtomicBoolean nodeIsClosing; - public NodeProcess(Process p, int id, int tcpPort, Thread pumper, boolean isPrimary, long initCommitVersion, long initInfosVersion, AtomicBoolean nodeIsClosing) { + public NodeProcess( + Process p, + int id, + int tcpPort, + Thread pumper, + boolean isPrimary, + long initCommitVersion, + long initInfosVersion, + AtomicBoolean nodeIsClosing) { this.p = p; this.id = id; this.tcpPort = tcpPort; @@ -59,7 +69,8 @@ class NodeProcess implements Closeable { this.initCommitVersion = initCommitVersion; this.initInfosVersion = initInfosVersion; this.nodeIsClosing = nodeIsClosing; - assert initInfosVersion >= initCommitVersion: "initInfosVersion=" + initInfosVersion + " initCommitVersion=" + initCommitVersion; + assert initInfosVersion >= initCommitVersion + : "initInfosVersion=" + initInfosVersion + " initCommitVersion=" + initCommitVersion; lock = new ReentrantLock(); } @@ -114,8 +125,11 @@ class NodeProcess implements Closeable { } } - /** Ask the primary node process to flush. We send it all currently up replicas so it can notify them about the new NRT point. Returns the newly - * flushed version, or a negative (current) version if there were no changes. */ + /** + * Ask the primary node process to flush. We send it all currently up replicas so it can notify + * them about the new NRT point. Returns the newly flushed version, or a negative (current) + * version if there were no changes. + */ public synchronized long flush(int atLeastMarkerCount) throws IOException { assert isPrimary; try (Connection c = new Connection(tcpPort)) { @@ -135,11 +149,11 @@ class NodeProcess implements Closeable { public synchronized boolean shutdown() { lock.lock(); try { - //System.out.println("PARENT: now shutdown node=" + id + " isOpen=" + isOpen); + // System.out.println("PARENT: now shutdown node=" + id + " isOpen=" + isOpen); if (isOpen) { // Ask the child process to shutdown gracefully: isOpen = false; - //System.out.println("PARENT: send CMD_CLOSE to node=" + id); + // System.out.println("PARENT: send CMD_CLOSE to node=" + id); try (Connection c = new Connection(tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_CLOSE); c.flush(); @@ -248,4 +262,3 @@ class NodeProcess implements Closeable { c.in.readByte(); } } - diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleCopyJob.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleCopyJob.java index 7e148810b47..f7cd7f1da24 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleCopyJob.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleCopyJob.java @@ -25,18 +25,27 @@ import java.util.Map; import java.util.Set; import org.apache.lucene.util.IOUtils; -/** Handles one set of files that need copying, either because we have a - * new NRT point, or we are pre-copying merged files for merge warming. */ +/** + * Handles one set of files that need copying, either because we have a new NRT point, or we are + * pre-copying merged files for merge warming. + */ class SimpleCopyJob extends CopyJob { final Connection c; final byte[] copyBuffer = new byte[65536]; final CopyState copyState; - private Iterator> iter; + private Iterator> iter; - public SimpleCopyJob(String reason, Connection c, CopyState copyState, SimpleReplicaNode dest, Map files, boolean highPriority, OnceDone onceDone) - throws IOException { + public SimpleCopyJob( + String reason, + Connection c, + CopyState copyState, + SimpleReplicaNode dest, + Map files, + boolean highPriority, + OnceDone onceDone) + throws IOException { super(reason, files, dest, highPriority, onceDone); dest.message("create SimpleCopyJob o" + ord); this.c = c; @@ -59,7 +68,7 @@ class SimpleCopyJob extends CopyJob { totBytes += current.metaData.length; } - for (Map.Entry ent : toCopy) { + for (Map.Entry ent : toCopy) { String fileName = ent.getKey(); FileMetaData metaData = ent.getValue(); totBytes += metaData.length; @@ -72,15 +81,26 @@ class SimpleCopyJob extends CopyJob { c.s.shutdownOutput(); if (current != null) { - // Do this only at the end, after sending all requested files, so we don't deadlock due to socket buffering waiting for primary to + // Do this only at the end, after sending all requested files, so we don't deadlock due to + // socket buffering waiting for primary to // send us this length: long len = c.in.readVLong(); if (len != current.metaData.length) { - throw new IllegalStateException("file " + current.name + ": meta data says length=" + current.metaData.length + " but c.in says " + len); + throw new IllegalStateException( + "file " + + current.name + + ": meta data says length=" + + current.metaData.length + + " but c.in says " + + len); } } - dest.message("SimpleCopyJob.init: done start files count=" + toCopy.size() + " totBytes=" + totBytes); + dest.message( + "SimpleCopyJob.init: done start files count=" + + toCopy.size() + + " totBytes=" + + totBytes); } catch (Throwable t) { cancel("exc during start", t); @@ -99,7 +119,7 @@ class SimpleCopyJob extends CopyJob { @Override public Set getFileNamesToCopy() { Set fileNames = new HashSet<>(); - for(Map.Entry ent : toCopy) { + for (Map.Entry ent : toCopy) { fileNames.add(ent.getKey()); } return fileNames; @@ -127,16 +147,20 @@ class SimpleCopyJob extends CopyJob { @Override public void finish() throws IOException { - dest.message(String.format(Locale.ROOT, - "top: file copy done; took %.1f msec to copy %d bytes; now rename %d tmp files", - (System.nanoTime() - startNS)/1000000.0, - totBytesCopied, - copiedFiles.size())); + dest.message( + String.format( + Locale.ROOT, + "top: file copy done; took %.1f msec to copy %d bytes; now rename %d tmp files", + (System.nanoTime() - startNS) / 1000000.0, + totBytesCopied, + copiedFiles.size())); - // NOTE: if any of the files we copied overwrote a file in the current commit point, we (ReplicaNode) removed the commit point up - // front so that the commit is not corrupt. This way if we hit exc here, or if we crash here, we won't leave a corrupt commit in + // NOTE: if any of the files we copied overwrote a file in the current commit point, we + // (ReplicaNode) removed the commit point up + // front so that the commit is not corrupt. This way if we hit exc here, or if we crash here, + // we won't leave a corrupt commit in // the index: - for(Map.Entry ent : copiedFiles.entrySet()) { + for (Map.Entry ent : copiedFiles.entrySet()) { String tmpFileName = ent.getValue(); String fileName = ent.getKey(); @@ -144,8 +168,10 @@ class SimpleCopyJob extends CopyJob { dest.message("rename file " + tmpFileName + " to " + fileName); } - // NOTE: if this throws exception, then some files have been moved to their true names, and others are leftover .tmp files. I don't - // think heroic exception handling is necessary (no harm will come, except some leftover files), nor warranted here (would make the + // NOTE: if this throws exception, then some files have been moved to their true names, and + // others are leftover .tmp files. I don't + // think heroic exception handling is necessary (no harm will come, except some leftover + // files), nor warranted here (would make the // code more complex, for the exceptional cases when something is wrong w/ your IO system): dest.dir.rename(tmpFileName, fileName); } @@ -166,12 +192,18 @@ class SimpleCopyJob extends CopyJob { return true; } - Map.Entry next = iter.next(); + Map.Entry next = iter.next(); FileMetaData metaData = next.getValue(); String fileName = next.getKey(); long len = c.in.readVLong(); if (len != metaData.length) { - throw new IllegalStateException("file " + fileName + ": meta data says length=" + metaData.length + " but c.in says " + len); + throw new IllegalStateException( + "file " + + fileName + + ": meta data says length=" + + metaData.length + + " but c.in says " + + len); } current = new CopyOneFile(c.in, dest, fileName, metaData, copyBuffer); } @@ -180,7 +212,8 @@ class SimpleCopyJob extends CopyJob { // This file is done copying copiedFiles.put(current.name, current.tmpName); totBytesCopied += current.getBytesCopied(); - assert totBytesCopied <= totBytes: "totBytesCopied=" + totBytesCopied + " totBytes=" + totBytes; + assert totBytesCopied <= totBytes + : "totBytesCopied=" + totBytesCopied + " totBytes=" + totBytes; current = null; return false; } @@ -213,15 +246,30 @@ class SimpleCopyJob extends CopyJob { public boolean getFailed() { return exc != null; } - + @Override public String toString() { - return "SimpleCopyJob(ord=" + ord + " " + reason + " highPriority=" + highPriority + " files count=" + files.size() + " bytesCopied=" + totBytesCopied + " (of " + totBytes + ") filesCopied=" + copiedFiles.size() + ")"; + return "SimpleCopyJob(ord=" + + ord + + " " + + reason + + " highPriority=" + + highPriority + + " files count=" + + files.size() + + " bytesCopied=" + + totBytesCopied + + " (of " + + totBytes + + ") filesCopied=" + + copiedFiles.size() + + ")"; } @Override public void runBlocking() throws IOException { - while (visit() == false); + while (visit() == false) + ; if (getFailed()) { throw new RuntimeException("copy failed: " + cancelReason, exc); @@ -236,13 +284,13 @@ class SimpleCopyJob extends CopyJob { @Override public synchronized boolean conflicts(CopyJob _other) { Set filesToCopy = new HashSet<>(); - for(Map.Entry ent : toCopy) { + for (Map.Entry ent : toCopy) { filesToCopy.add(ent.getKey()); } SimpleCopyJob other = (SimpleCopyJob) _other; synchronized (other) { - for(Map.Entry ent : other.toCopy) { + for (Map.Entry ent : other.toCopy) { if (filesToCopy.contains(ent.getKey())) { return true; } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java index bb39135ec66..51f1fc2f9dc 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java @@ -35,7 +35,6 @@ import java.util.Map; import java.util.Random; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -68,7 +67,6 @@ import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.ThreadInterruptedException; /** A primary node that uses simple TCP connections to send commands and copy files */ - class SimplePrimaryNode extends PrimaryNode { final int tcpPort; @@ -88,10 +86,10 @@ class SimplePrimaryNode extends PrimaryNode { static class MergePreCopy { final List connections = Collections.synchronizedList(new ArrayList<>()); - final Map files; + final Map files; private boolean finished; - public MergePreCopy(Map files) { + public MergePreCopy(Map files) { this.files = files; } @@ -114,9 +112,24 @@ class SimplePrimaryNode extends PrimaryNode { } } - public SimplePrimaryNode(Random random, Path indexPath, int id, int tcpPort, long primaryGen, long forcePrimaryVersion, SearcherFactory searcherFactory, - boolean doFlipBitsDuringCopy, boolean doCheckIndexOnClose) throws IOException { - super(initWriter(id, random, indexPath, doCheckIndexOnClose), id, primaryGen, forcePrimaryVersion, searcherFactory, System.out); + public SimplePrimaryNode( + Random random, + Path indexPath, + int id, + int tcpPort, + long primaryGen, + long forcePrimaryVersion, + SearcherFactory searcherFactory, + boolean doFlipBitsDuringCopy, + boolean doCheckIndexOnClose) + throws IOException { + super( + initWriter(id, random, indexPath, doCheckIndexOnClose), + id, + primaryGen, + forcePrimaryVersion, + searcherFactory, + System.out); this.tcpPort = tcpPort; this.random = new Random(random.nextLong()); this.doFlipBitsDuringCopy = doFlipBitsDuringCopy; @@ -124,12 +137,17 @@ class SimplePrimaryNode extends PrimaryNode { /** Records currently alive replicas. */ public synchronized void setReplicas(int[] replicaIDs, int[] replicaTCPPorts) { - message("top: set replicasIDs=" + Arrays.toString(replicaIDs) + " tcpPorts=" + Arrays.toString(replicaTCPPorts)); + message( + "top: set replicasIDs=" + + Arrays.toString(replicaIDs) + + " tcpPorts=" + + Arrays.toString(replicaTCPPorts)); this.replicaIDs = replicaIDs; this.replicaTCPPorts = replicaTCPPorts; } - private static IndexWriter initWriter(int id, Random random, Path indexPath, boolean doCheckIndexOnClose) throws IOException { + private static IndexWriter initWriter( + int id, Random random, Path indexPath, boolean doCheckIndexOnClose) throws IOException { Directory dir = SimpleReplicaNode.getDirectory(random, id, indexPath, doCheckIndexOnClose); MockAnalyzer analyzer = new MockAnalyzer(random); @@ -137,7 +155,7 @@ class SimplePrimaryNode extends PrimaryNode { IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(random, analyzer); MergePolicy mp = iwc.getMergePolicy(); - //iwc.setInfoStream(new PrintStreamInfoStream(System.out)); + // iwc.setInfoStream(new PrintStreamInfoStream(System.out)); // Force more frequent merging so we stress merge warming: if (mp instanceof TieredMergePolicy) { @@ -156,14 +174,23 @@ class SimplePrimaryNode extends PrimaryNode { } @Override - protected void preCopyMergedSegmentFiles(SegmentCommitInfo info, Map files) throws IOException { + protected void preCopyMergedSegmentFiles(SegmentCommitInfo info, Map files) + throws IOException { int[] replicaTCPPorts = this.replicaTCPPorts; if (replicaTCPPorts == null) { message("no replicas; skip warming " + info); return; } - message("top: warm merge " + info + " to " + replicaTCPPorts.length + " replicas; tcpPort=" + tcpPort + ": files=" + files.keySet()); + message( + "top: warm merge " + + info + + " to " + + replicaTCPPorts.length + + " replicas; tcpPort=" + + tcpPort + + ": files=" + + files.keySet()); MergePreCopy preCopy = new MergePreCopy(files); warmingSegments.add(preCopy); @@ -185,15 +212,17 @@ class SimplePrimaryNode extends PrimaryNode { message("warm connection " + c.s); preCopy.connections.add(c); } catch (Throwable t) { - message("top: ignore exception trying to warm to replica port " + replicaTCPPort + ": " + t); - //t.printStackTrace(System.out); + message( + "top: ignore exception trying to warm to replica port " + replicaTCPPort + ": " + t); + // t.printStackTrace(System.out); } } long startNS = System.nanoTime(); long lastWarnNS = startNS; - // TODO: maybe ... place some sort of time limit on how long we are willing to wait for slow replica(s) to finish copying? + // TODO: maybe ... place some sort of time limit on how long we are willing to wait for slow + // replica(s) to finish copying? while (preCopy.finished() == false) { try { Thread.sleep(10); @@ -203,7 +232,7 @@ class SimplePrimaryNode extends PrimaryNode { if (isClosed()) { message("top: primary is closing: now cancel segment warming"); - synchronized(preCopy.connections) { + synchronized (preCopy.connections) { IOUtils.closeWhileHandlingException(preCopy.connections); } return; @@ -211,12 +240,20 @@ class SimplePrimaryNode extends PrimaryNode { long ns = System.nanoTime(); if (ns - lastWarnNS > 1000000000L) { - message(String.format(Locale.ROOT, "top: warning: still warming merge " + info + " to " + preCopy.connections.size() + " replicas for %.1f sec...", (ns - startNS)/1000000000.0)); + message( + String.format( + Locale.ROOT, + "top: warning: still warming merge " + + info + + " to " + + preCopy.connections.size() + + " replicas for %.1f sec...", + (ns - startNS) / 1000000000.0)); lastWarnNS = ns; } // Process keep-alives: - synchronized(preCopy.connections) { + synchronized (preCopy.connections) { Iterator it = preCopy.connections.iterator(); while (it.hasNext()) { Connection c = it.next(); @@ -234,7 +271,13 @@ class SimplePrimaryNode extends PrimaryNode { if (b != 1) { throw new IllegalArgumentException(); } - message("connection socket=" + c.s + " is done warming its merge " + info + " files=" + files.keySet()); + message( + "connection socket=" + + c.s + + " is done warming its merge " + + info + + " files=" + + files.keySet()); IOUtils.closeWhileHandlingException(c); it.remove(); done = true; @@ -244,21 +287,37 @@ class SimplePrimaryNode extends PrimaryNode { // If > 2 sec since we saw a keep-alive, assume this replica is dead: if (done == false && nowNS - c.lastKeepAliveNS > 2000000000L) { - message("top: warning: replica socket=" + c.s + " for segment=" + info + " seems to be dead; closing files=" + files.keySet()); + message( + "top: warning: replica socket=" + + c.s + + " for segment=" + + info + + " seems to be dead; closing files=" + + files.keySet()); IOUtils.closeWhileHandlingException(c); it.remove(); done = true; } if (done == false && random.nextInt(1000) == 17) { - message("top: warning: now randomly dropping replica from merge warming; files=" + files.keySet()); + message( + "top: warning: now randomly dropping replica from merge warming; files=" + + files.keySet()); IOUtils.closeWhileHandlingException(c); it.remove(); done = true; } } catch (Throwable t) { - message("top: ignore exception trying to read byte during warm for segment=" + info + " to replica socket=" + c.s + ": " + t + " files=" + files.keySet()); + message( + "top: ignore exception trying to read byte during warm for segment=" + + info + + " to replica socket=" + + c.s + + ": " + + t + + " files=" + + files.keySet()); IOUtils.closeWhileHandlingException(c); it.remove(); } @@ -271,7 +330,8 @@ class SimplePrimaryNode extends PrimaryNode { } /** Flushes all indexing ops to disk and notifies all replicas that they should now copy */ - private void handleFlush(DataInput topIn, DataOutput topOut, BufferedOutputStream bos) throws IOException { + private void handleFlush(DataInput topIn, DataOutput topOut, BufferedOutputStream bos) + throws IOException { Thread.currentThread().setName("flush"); int atLeastMarkerCount = topIn.readVInt(); @@ -289,8 +349,9 @@ class SimplePrimaryNode extends PrimaryNode { // Something did get flushed (there were indexing ops since the last flush): verifyAtLeastMarkerCount(atLeastMarkerCount, null); - - // Tell caller the version before pushing to replicas, so that even if we crash after this, caller will know what version we + + // Tell caller the version before pushing to replicas, so that even if we crash after this, + // caller will know what version we // (possibly) pushed to some replicas. Alternatively we could make this 2 separate ops? long version = getCopyStateVersion(); message("send flushed version=" + version); @@ -298,7 +359,7 @@ class SimplePrimaryNode extends PrimaryNode { bos.flush(); // Notify current replicas: - for(int i=0;i seen = new ArrayList<>(); - for(ScoreDoc hit : hits.scoreDocs) { + for (ScoreDoc hit : hits.scoreDocs) { Document doc = searcher.doc(hit.doc); seen.add(Integer.parseInt(doc.get("docid").substring(1))); } Collections.sort(seen); message("saw markers:"); - for(int marker : seen) { + for (int marker : seen) { message("saw m" + marker); } - throw new IllegalStateException("at flush: marker count " + hitCount + " but expected at least " + expectedAtLeastCount + " version=" + version); + throw new IllegalStateException( + "at flush: marker count " + + hitCount + + " but expected at least " + + expectedAtLeastCount + + " version=" + + version); } if (out != null) { diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java index 874b5685dcb..17f6fd17771 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java @@ -32,7 +32,6 @@ import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; - import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.Term; @@ -64,18 +63,27 @@ class SimpleReplicaNode extends ReplicaNode { /** Changes over time, as primary node crashes and moves around */ int curPrimaryTCPPort; - public SimpleReplicaNode(Random random, int id, int tcpPort, Path indexPath, long curPrimaryGen, int primaryTCPPort, - SearcherFactory searcherFactory, boolean doCheckIndexOnClose) throws IOException { - super(id, getDirectory(random, id, indexPath, doCheckIndexOnClose), searcherFactory, System.out); + public SimpleReplicaNode( + Random random, + int id, + int tcpPort, + Path indexPath, + long curPrimaryGen, + int primaryTCPPort, + SearcherFactory searcherFactory, + boolean doCheckIndexOnClose) + throws IOException { + super( + id, getDirectory(random, id, indexPath, doCheckIndexOnClose), searcherFactory, System.out); this.tcpPort = tcpPort; this.random = new Random(random.nextLong()); // Random IO throttling on file copies: 5 - 20 MB/sec: - double mbPerSec = 5 * (1.0 + 3*random.nextDouble()); + double mbPerSec = 5 * (1.0 + 3 * random.nextDouble()); message(String.format(Locale.ROOT, "top: will rate limit file fetch to %.2f MB/sec", mbPerSec)); fetchRateLimiter = new RateLimiter.SimpleRateLimiter(mbPerSec); this.curPrimaryTCPPort = primaryTCPPort; - + start(curPrimaryGen); // Handles fetching files from primary: @@ -95,7 +103,7 @@ class SimpleReplicaNode extends ReplicaNode { // Can't be sync'd when calling jobs since it can lead to deadlock: jobs.close(); message("top: jobs closed"); - synchronized(mergeCopyJobs) { + synchronized (mergeCopyJobs) { for (CopyJob job : mergeCopyJobs) { message("top: cancel merge copy job " + job); job.cancel("jobs closing", null); @@ -105,12 +113,18 @@ class SimpleReplicaNode extends ReplicaNode { } @Override - protected CopyJob newCopyJob(String reason, Map files, Map prevFiles, - boolean highPriority, CopyJob.OnceDone onceDone) throws IOException { + protected CopyJob newCopyJob( + String reason, + Map files, + Map prevFiles, + boolean highPriority, + CopyJob.OnceDone onceDone) + throws IOException { Connection c; CopyState copyState; - // Exceptions in here mean something went wrong talking over the socket, which are fine (e.g. primary node crashed): + // Exceptions in here mean something went wrong talking over the socket, which are fine (e.g. + // primary node crashed): try { c = new Connection(curPrimaryTCPPort); c.out.writeByte(SimplePrimaryNode.CMD_FETCH_FILES); @@ -132,14 +146,17 @@ class SimpleReplicaNode extends ReplicaNode { return new SimpleCopyJob(reason, c, copyState, this, files, highPriority, onceDone); } - static Directory getDirectory(Random random, int id, Path path, boolean doCheckIndexOnClose) throws IOException { + static Directory getDirectory(Random random, int id, Path path, boolean doCheckIndexOnClose) + throws IOException { MockDirectoryWrapper dir = LuceneTestCase.newMockFSDirectory(path); - + dir.setAssertNoUnrefencedFilesOnClose(true); dir.setCheckIndexOnClose(doCheckIndexOnClose); - // Corrupt any index files not referenced by current commit point; this is important (increases test evilness) because we may have done - // a hard crash of the previous JVM writing to this directory and so MDW's corrupt-unknown-files-on-close never ran: + // Corrupt any index files not referenced by current commit point; this is important (increases + // test evilness) because we may have done + // a hard crash of the previous JVM writing to this directory and so MDW's + // corrupt-unknown-files-on-close never ran: Node.nodeMessage(System.out, id, "top: corrupt unknown files"); dir.corruptUnknownFiles(); @@ -152,8 +169,16 @@ class SimpleReplicaNode extends ReplicaNode { static final byte CMD_PRE_COPY_MERGE = 17; /** Handles incoming request to the naive TCP server wrapping this node */ - void handleOneConnection(ServerSocket ss, AtomicBoolean stop, InputStream is, Socket socket, DataInput in, DataOutput out, BufferedOutputStream bos) throws IOException, InterruptedException { - //message("one connection: " + socket); + void handleOneConnection( + ServerSocket ss, + AtomicBoolean stop, + InputStream is, + Socket socket, + DataInput in, + DataOutput out, + BufferedOutputStream bos) + throws IOException, InterruptedException { + // message("one connection: " + socket); outer: while (true) { byte cmd; @@ -173,161 +198,177 @@ class SimpleReplicaNode extends ReplicaNode { break; } - switch(cmd) { - case CMD_NEW_NRT_POINT: - { - long version = in.readVLong(); - long newPrimaryGen = in.readVLong(); - Thread.currentThread().setName("recv-" + version); - curPrimaryTCPPort = in.readInt(); - message("newNRTPoint primaryTCPPort=" + curPrimaryTCPPort + " version=" + version + " newPrimaryGen=" + newPrimaryGen); - newNRTPoint(newPrimaryGen, version); - } - break; - - case SimplePrimaryNode.CMD_GET_SEARCHING_VERSION: - // This is called when primary has crashed and we need to elect a new primary from all the still running replicas: - - // Tricky: if a sync is just finishing up, i.e. managed to finish copying all files just before we crashed primary, and is now - // in the process of opening a new reader, we need to wait for it, to be sure we really pick the most current replica: - if (isCopying()) { - message("top: getSearchingVersion: now wait for finish sync"); - // TODO: use immediate concurrency instead of polling: - while (isCopying() && stop.get() == false) { - Thread.sleep(10); - message("top: curNRTCopy=" + curNRTCopy); + switch (cmd) { + case CMD_NEW_NRT_POINT: + { + long version = in.readVLong(); + long newPrimaryGen = in.readVLong(); + Thread.currentThread().setName("recv-" + version); + curPrimaryTCPPort = in.readInt(); + message( + "newNRTPoint primaryTCPPort=" + + curPrimaryTCPPort + + " version=" + + version + + " newPrimaryGen=" + + newPrimaryGen); + newNRTPoint(newPrimaryGen, version); } - message("top: getSearchingVersion: done wait for finish sync"); - } - if (stop.get() == false) { - out.writeVLong(getCurrentSearchingVersion()); - } else { - message("top: getSearchingVersion: stop waiting for finish sync: stop is set"); - } - break; + break; - case SimplePrimaryNode.CMD_SEARCH: - { - Thread.currentThread().setName("search"); - IndexSearcher searcher = mgr.acquire(); - try { - long version = ((DirectoryReader) searcher.getIndexReader()).getVersion(); - int hitCount = searcher.count(new TermQuery(new Term("body", "the"))); - //node.message("version=" + version + " searcher=" + searcher); - out.writeVLong(version); - out.writeVInt(hitCount); - bos.flush(); - } finally { - mgr.release(searcher); - } - } - continue outer; + case SimplePrimaryNode.CMD_GET_SEARCHING_VERSION: + // This is called when primary has crashed and we need to elect a new primary from all the + // still running replicas: - case SimplePrimaryNode.CMD_SEARCH_ALL: - { - Thread.currentThread().setName("search all"); - IndexSearcher searcher = mgr.acquire(); - try { - long version = ((DirectoryReader) searcher.getIndexReader()).getVersion(); - int hitCount = searcher.count(new MatchAllDocsQuery()); - //node.message("version=" + version + " searcher=" + searcher); - out.writeVLong(version); - out.writeVInt(hitCount); - bos.flush(); - } finally { - mgr.release(searcher); - } - } - continue outer; - - case SimplePrimaryNode.CMD_MARKER_SEARCH: - { - Thread.currentThread().setName("msearch"); - int expectedAtLeastCount = in.readVInt(); - IndexSearcher searcher = mgr.acquire(); - try { - long version = ((DirectoryReader) searcher.getIndexReader()).getVersion(); - int hitCount = searcher.count(new TermQuery(new Term("marker", "marker"))); - if (hitCount < expectedAtLeastCount) { - message("marker search: expectedAtLeastCount=" + expectedAtLeastCount + " but hitCount=" + hitCount); - TopDocs hits = searcher.search(new TermQuery(new Term("marker", "marker")), expectedAtLeastCount); - List seen = new ArrayList<>(); - for(ScoreDoc hit : hits.scoreDocs) { - Document doc = searcher.doc(hit.doc); - seen.add(Integer.parseInt(doc.get("docid").substring(1))); - } - Collections.sort(seen); - message("saw markers:"); - for(int marker : seen) { - message("saw m" + marker); - } + // Tricky: if a sync is just finishing up, i.e. managed to finish copying all files just + // before we crashed primary, and is now + // in the process of opening a new reader, we need to wait for it, to be sure we really + // pick the most current replica: + if (isCopying()) { + message("top: getSearchingVersion: now wait for finish sync"); + // TODO: use immediate concurrency instead of polling: + while (isCopying() && stop.get() == false) { + Thread.sleep(10); + message("top: curNRTCopy=" + curNRTCopy); } - - out.writeVLong(version); - out.writeVInt(hitCount); - bos.flush(); - } finally { - mgr.release(searcher); + message("top: getSearchingVersion: done wait for finish sync"); } - } - continue outer; + if (stop.get() == false) { + out.writeVLong(getCurrentSearchingVersion()); + } else { + message("top: getSearchingVersion: stop waiting for finish sync: stop is set"); + } + break; - case SimplePrimaryNode.CMD_COMMIT: - Thread.currentThread().setName("commit"); - commit(); - out.writeByte((byte) 1); - break; - - case SimplePrimaryNode.CMD_CLOSE: - Thread.currentThread().setName("close"); - ss.close(); - out.writeByte((byte) 1); - break outer; - - case CMD_PRE_COPY_MERGE: - Thread.currentThread().setName("merge copy"); - - long newPrimaryGen = in.readVLong(); - curPrimaryTCPPort = in.readVInt(); - Map files = SimpleServer.readFilesMetaData(in); - message("done reading files to copy files=" + files.keySet()); - AtomicBoolean finished = new AtomicBoolean(); - CopyJob job = launchPreCopyMerge(finished, newPrimaryGen, files); - message("done launching copy job files=" + files.keySet()); - - // Silly keep alive mechanism, else if e.g. we (replica node) crash, the primary - // won't notice for a very long time: - boolean success = false; - try { - int count = 0; - while (true) { - if (finished.get() || stop.get()) { - break; - } - Thread.sleep(10); - count++; - if (count == 100) { - // Once per second or so, we send a keep alive - message("send merge pre copy keep alive... files=" + files.keySet()); - - // To be evil, we sometimes fail to keep-alive, e.g. simulating a long GC pausing us: - if (random.nextBoolean()) { - out.writeByte((byte) 0); - count = 0; - } + case SimplePrimaryNode.CMD_SEARCH: + { + Thread.currentThread().setName("search"); + IndexSearcher searcher = mgr.acquire(); + try { + long version = ((DirectoryReader) searcher.getIndexReader()).getVersion(); + int hitCount = searcher.count(new TermQuery(new Term("body", "the"))); + // node.message("version=" + version + " searcher=" + searcher); + out.writeVLong(version); + out.writeVInt(hitCount); + bos.flush(); + } finally { + mgr.release(searcher); } } + continue outer; + case SimplePrimaryNode.CMD_SEARCH_ALL: + { + Thread.currentThread().setName("search all"); + IndexSearcher searcher = mgr.acquire(); + try { + long version = ((DirectoryReader) searcher.getIndexReader()).getVersion(); + int hitCount = searcher.count(new MatchAllDocsQuery()); + // node.message("version=" + version + " searcher=" + searcher); + out.writeVLong(version); + out.writeVInt(hitCount); + bos.flush(); + } finally { + mgr.release(searcher); + } + } + continue outer; + + case SimplePrimaryNode.CMD_MARKER_SEARCH: + { + Thread.currentThread().setName("msearch"); + int expectedAtLeastCount = in.readVInt(); + IndexSearcher searcher = mgr.acquire(); + try { + long version = ((DirectoryReader) searcher.getIndexReader()).getVersion(); + int hitCount = searcher.count(new TermQuery(new Term("marker", "marker"))); + if (hitCount < expectedAtLeastCount) { + message( + "marker search: expectedAtLeastCount=" + + expectedAtLeastCount + + " but hitCount=" + + hitCount); + TopDocs hits = + searcher.search( + new TermQuery(new Term("marker", "marker")), expectedAtLeastCount); + List seen = new ArrayList<>(); + for (ScoreDoc hit : hits.scoreDocs) { + Document doc = searcher.doc(hit.doc); + seen.add(Integer.parseInt(doc.get("docid").substring(1))); + } + Collections.sort(seen); + message("saw markers:"); + for (int marker : seen) { + message("saw m" + marker); + } + } + + out.writeVLong(version); + out.writeVInt(hitCount); + bos.flush(); + } finally { + mgr.release(searcher); + } + } + continue outer; + + case SimplePrimaryNode.CMD_COMMIT: + Thread.currentThread().setName("commit"); + commit(); out.writeByte((byte) 1); - bos.flush(); - success = true; - } finally { - message("done merge copy files=" + files.keySet() + " success=" + success); - } - break; + break; - default: - throw new IllegalArgumentException("unrecognized cmd=" + cmd); + case SimplePrimaryNode.CMD_CLOSE: + Thread.currentThread().setName("close"); + ss.close(); + out.writeByte((byte) 1); + break outer; + + case CMD_PRE_COPY_MERGE: + Thread.currentThread().setName("merge copy"); + + long newPrimaryGen = in.readVLong(); + curPrimaryTCPPort = in.readVInt(); + Map files = SimpleServer.readFilesMetaData(in); + message("done reading files to copy files=" + files.keySet()); + AtomicBoolean finished = new AtomicBoolean(); + CopyJob job = launchPreCopyMerge(finished, newPrimaryGen, files); + message("done launching copy job files=" + files.keySet()); + + // Silly keep alive mechanism, else if e.g. we (replica node) crash, the primary + // won't notice for a very long time: + boolean success = false; + try { + int count = 0; + while (true) { + if (finished.get() || stop.get()) { + break; + } + Thread.sleep(10); + count++; + if (count == 100) { + // Once per second or so, we send a keep alive + message("send merge pre copy keep alive... files=" + files.keySet()); + + // To be evil, we sometimes fail to keep-alive, e.g. simulating a long GC pausing + // us: + if (random.nextBoolean()) { + out.writeByte((byte) 0); + count = 0; + } + } + } + + out.writeByte((byte) 1); + bos.flush(); + success = true; + } finally { + message("done merge copy files=" + files.keySet() + " success=" + success); + } + break; + + default: + throw new IllegalArgumentException("unrecognized cmd=" + cmd); } bos.flush(); @@ -349,7 +390,9 @@ class SimpleReplicaNode extends ReplicaNode { } @Override - public IndexOutput createTempOutput(String prefix, String suffix, IOContext ioContext) throws IOException { - return new RateLimitedIndexOutput(fetchRateLimiter, super.createTempOutput(prefix, suffix, ioContext)); + public IndexOutput createTempOutput(String prefix, String suffix, IOContext ioContext) + throws IOException { + return new RateLimitedIndexOutput( + fetchRateLimiter, super.createTempOutput(prefix, suffix, ioContext)); } } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleServer.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleServer.java index 7e1e065f64a..4d91dfc8957 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleServer.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleServer.java @@ -36,28 +36,29 @@ import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.InputStreamDataInput; import org.apache.lucene.store.OutputStreamDataOutput; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks; -import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.SuppressForbidden; import org.apache.lucene.util.TestUtil; -/** Child process with silly naive TCP socket server to handle - * between-node commands, launched for each node by TestNRTReplication. */ +/** + * Child process with silly naive TCP socket server to handle between-node commands, launched for + * each node by TestNRTReplication. + */ @SuppressCodecs({"MockRandom", "Direct", "SimpleText"}) @SuppressSysoutChecks(bugUrl = "Stuff gets printed, important stuff for debugging a failure") @SuppressForbidden(reason = "We need Unsafe to actually crush :-)") public class SimpleServer extends LuceneTestCase { - final static Set clientThreads = Collections.synchronizedSet(new HashSet<>()); - final static AtomicBoolean stop = new AtomicBoolean(); + static final Set clientThreads = Collections.synchronizedSet(new HashSet<>()); + static final AtomicBoolean stop = new AtomicBoolean(); /** Handles one client connection */ private static class ClientHandler extends Thread { @@ -82,14 +83,15 @@ public class SimpleServer extends LuceneTestCase { public void run() { boolean success = false; try { - //node.message("using stream buffer size=" + bufferSize); + // node.message("using stream buffer size=" + bufferSize); InputStream is = new BufferedInputStream(socket.getInputStream(), bufferSize); DataInput in = new InputStreamDataInput(is); BufferedOutputStream bos = new BufferedOutputStream(socket.getOutputStream(), bufferSize); DataOutput out = new OutputStreamDataOutput(bos); if (node instanceof SimplePrimaryNode) { - ((SimplePrimaryNode) node).handleOneConnection(random(), ss, stop, is, socket, in, out, bos); + ((SimplePrimaryNode) node) + .handleOneConnection(random(), ss, stop, is, socket, in, out, bos); } else { ((SimpleReplicaNode) node).handleOneConnection(ss, stop, is, socket, in, out, bos); } @@ -101,7 +103,8 @@ public class SimpleServer extends LuceneTestCase { success = true; } catch (Throwable t) { - if (t instanceof SocketException == false && t instanceof NodeCommunicationException == false) { + if (t instanceof SocketException == false + && t instanceof NodeCommunicationException == false) { node.message("unexpected exception handling client connection; now failing test:"); t.printStackTrace(System.out); IOUtils.closeWhileHandlingException(ss); @@ -128,17 +131,14 @@ public class SimpleServer extends LuceneTestCase { } } - /** - * currently, this only works/tested on Sun and IBM. - */ + /** currently, this only works/tested on Sun and IBM. */ - // poached from TestIndexWriterOnJRECrash ... should we factor out to TestUtil? seems dangerous to give it such "publicity"? + // poached from TestIndexWriterOnJRECrash ... should we factor out to TestUtil? seems dangerous + // to give it such "publicity"? private static void crashJRE() { final String vendor = Constants.JAVA_VENDOR; - final boolean supportsUnsafeNpeDereference = - vendor.startsWith("Oracle") || - vendor.startsWith("Sun") || - vendor.startsWith("Apple"); + final boolean supportsUnsafeNpeDereference = + vendor.startsWith("Oracle") || vendor.startsWith("Sun") || vendor.startsWith("Apple"); try { if (supportsUnsafeNpeDereference) { @@ -151,7 +151,7 @@ public class SimpleServer extends LuceneTestCase { m.invoke(o, 0L, 0L); } catch (Throwable e) { System.out.println("Couldn't kill the JVM via Unsafe."); - e.printStackTrace(System.out); + e.printStackTrace(System.out); } } @@ -159,16 +159,17 @@ public class SimpleServer extends LuceneTestCase { Runtime.getRuntime().halt(-1); } catch (Exception e) { System.out.println("Couldn't kill the JVM."); - e.printStackTrace(System.out); + e.printStackTrace(System.out); } // We couldn't get the JVM to crash for some reason. throw new RuntimeException("JVM refuses to die!"); } - static void writeFilesMetaData(DataOutput out, Map files) throws IOException { + static void writeFilesMetaData(DataOutput out, Map files) + throws IOException { out.writeVInt(files.size()); - for(Map.Entry ent : files.entrySet()) { + for (Map.Entry ent : files.entrySet()) { out.writeString(ent.getKey()); FileMetaData fmd = ent.getValue(); @@ -181,13 +182,13 @@ public class SimpleServer extends LuceneTestCase { } } - static Map readFilesMetaData(DataInput in) throws IOException { + static Map readFilesMetaData(DataInput in) throws IOException { int fileCount = in.readVInt(); - //System.out.println("readFilesMetaData: fileCount=" + fileCount); - Map files = new HashMap<>(); - for(int i=0;i clientThreads = new ArrayList<>(); + // List clientThreads = new ArrayList<>(); // Naive thread-per-connection server: while (true) { @@ -358,14 +386,15 @@ public class SimpleServer extends LuceneTestCase { it.remove(); } } - //node.message(clientThreads.size() + " client threads are still alive"); + // node.message(clientThreads.size() + " client threads are still alive"); } stop.set(true); - // Make sure all client threads are done, else we get annoying (yet ultimately "harmless") messages about threads still running / + // Make sure all client threads are done, else we get annoying (yet ultimately "harmless") + // messages about threads still running / // lingering for them to finish from the child processes: - for(Thread clientThread : clientThreads) { + for (Thread clientThread : clientThreads) { node.message("top: join clientThread=" + clientThread); clientThread.join(); node.message("top: done join clientThread=" + clientThread); diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleTransLog.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleTransLog.java index 9968bd8880a..6509d9c997a 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleTransLog.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleTransLog.java @@ -24,7 +24,6 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.file.Path; import java.nio.file.StandardOpenOption; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; @@ -34,10 +33,12 @@ import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.DataInput; -/** This is a stupid yet functional transaction log: it never fsync's, never prunes, it's over-synchronized, it hard-wires id field name to "docid", can - * only handle specific docs/fields used by this test, etc. It's just barely enough to show how a translog could work on top of NRT - * replication to guarantee no data loss when nodes crash */ - +/** + * This is a stupid yet functional transaction log: it never fsync's, never prunes, it's + * over-synchronized, it hard-wires id field name to "docid", can only handle specific docs/fields + * used by this test, etc. It's just barely enough to show how a translog could work on top of NRT + * replication to guarantee no data loss when nodes crash + */ class SimpleTransLog implements Closeable { final FileChannel channel; @@ -45,12 +46,14 @@ class SimpleTransLog implements Closeable { final byte[] intBuffer = new byte[4]; final ByteBuffer intByteBuffer = ByteBuffer.wrap(intBuffer); - private final static byte OP_ADD_DOCUMENT = (byte) 0; - private final static byte OP_UPDATE_DOCUMENT = (byte) 1; - private final static byte OP_DELETE_DOCUMENTS = (byte) 2; + private static final byte OP_ADD_DOCUMENT = (byte) 0; + private static final byte OP_UPDATE_DOCUMENT = (byte) 1; + private static final byte OP_DELETE_DOCUMENTS = (byte) 2; public SimpleTransLog(Path path) throws IOException { - channel = FileChannel.open(path, StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + channel = + FileChannel.open( + path, StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); } public synchronized long getNextLocation() throws IOException { @@ -120,7 +123,10 @@ class SimpleTransLog implements Closeable { } } - /** Replays ops between start and end location against the provided writer. Can run concurrently with ongoing operations. */ + /** + * Replays ops between start and end location against the provided writer. Can run concurrently + * with ongoing operations. + */ public void replay(NodeProcess primary, long start, long end) throws IOException { try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); @@ -134,49 +140,51 @@ class SimpleTransLog implements Closeable { intByteBuffer.limit(4); readBytesFromChannel(pos, intByteBuffer); pos += 4; - int len = ((intBuffer[0] & 0xff) << 24) | - (intBuffer[1] & 0xff) << 16 | - (intBuffer[2] & 0xff) << 8 | - (intBuffer[3] & 0xff); + int len = + ((intBuffer[0] & 0xff) << 24) + | (intBuffer[1] & 0xff) << 16 + | (intBuffer[2] & 0xff) << 8 + | (intBuffer[3] & 0xff); byte[] bytes = new byte[len]; readBytesFromChannel(pos, ByteBuffer.wrap(bytes)); pos += len; in.reset(bytes); - + byte op = in.readByte(); - //System.out.println("xlog: replay op=" + op); + // System.out.println("xlog: replay op=" + op); switch (op) { - case 0: - // We replay add as update: - replayAddDocument(c, primary, in); - break; + case 0: + // We replay add as update: + replayAddDocument(c, primary, in); + break; - case 1: - // We replay add as update: - replayAddDocument(c, primary, in); - break; + case 1: + // We replay add as update: + replayAddDocument(c, primary, in); + break; - case 2: - replayDeleteDocuments(c, primary, in); - break; + case 2: + replayDeleteDocuments(c, primary, in); + break; - default: - throw new CorruptIndexException("invalid operation " + op, in); + default: + throw new CorruptIndexException("invalid operation " + op, in); } } assert pos == end; - //System.out.println("xlog: done replay"); + // System.out.println("xlog: done replay"); c.out.writeByte(SimplePrimaryNode.CMD_INDEXING_DONE); c.flush(); - //System.out.println("xlog: done flush"); + // System.out.println("xlog: done flush"); c.in.readByte(); - //System.out.println("xlog: done readByte"); + // System.out.println("xlog: done readByte"); } } - private void replayAddDocument(Connection c, NodeProcess primary, DataInput in) throws IOException { + private void replayAddDocument(Connection c, NodeProcess primary, DataInput in) + throws IOException { String id = in.readString(); Document doc = new Document(); @@ -193,7 +201,7 @@ class SimpleTransLog implements Closeable { } String marker = readNullableString(in); if (marker != null) { - //TestStressNRTReplication.message("xlog: replay marker=" + id); + // TestStressNRTReplication.message("xlog: replay marker=" + id); doc.add(new StringField("marker", marker, Field.Store.YES)); } @@ -203,16 +211,19 @@ class SimpleTransLog implements Closeable { primary.addOrUpdateDocument(c, doc, false); } - - private void replayDeleteDocuments(Connection c, NodeProcess primary, DataInput in) throws IOException { + private void replayDeleteDocuments(Connection c, NodeProcess primary, DataInput in) + throws IOException { String id = in.readString(); // nocomit what if this fails? primary.deleteDocument(c, id); } - /** Encodes doc into buffer. NOTE: this is NOT general purpose! It only handles the fields used in this test! */ + /** + * Encodes doc into buffer. NOTE: this is NOT general purpose! It only handles the fields used in + * this test! + */ private synchronized void encode(String id, Document doc) throws IOException { - assert id.equals(doc.get("docid")): "id=" + id + " vs docid=" + doc.get("docid"); + assert id.equals(doc.get("docid")) : "id=" + id + " vs docid=" + doc.get("docid"); buffer.writeString(id); writeNullableString(doc.get("title")); writeNullableString(doc.get("body")); diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestNRTReplication.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestNRTReplication.java index b41e0f92d14..1c42d07b1fc 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestNRTReplication.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestNRTReplication.java @@ -17,6 +17,7 @@ package org.apache.lucene.replicator.nrt; +import com.carrotsearch.randomizedtesting.SeedUtils; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; @@ -28,21 +29,18 @@ import java.util.Locale; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LineFileDocs; +import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks; -import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.SuppressForbidden; import org.apache.lucene.util.TestUtil; -import com.carrotsearch.randomizedtesting.SeedUtils; - // MockRandom's .sd file has no index header/footer: @SuppressCodecs({"MockRandom", "Direct", "SimpleText"}) @SuppressSysoutChecks(bugUrl = "Stuff gets printed, important stuff for debugging a failure") @@ -58,14 +56,17 @@ public class TestNRTReplication extends LuceneTestCase { /** Launches a child "server" (separate JVM), which is either primary or replica node */ @SuppressForbidden(reason = "ProcessBuilder requires java.io.File for CWD") - private NodeProcess startNode(int primaryTCPPort, final int id, Path indexPath, long forcePrimaryVersion, boolean willCrash) throws IOException { + private NodeProcess startNode( + int primaryTCPPort, final int id, Path indexPath, long forcePrimaryVersion, boolean willCrash) + throws IOException { List cmd = new ArrayList<>(); - cmd.add(System.getProperty("java.home") - + System.getProperty("file.separator") - + "bin" - + System.getProperty("file.separator") - + "java"); + cmd.add( + System.getProperty("java.home") + + System.getProperty("file.separator") + + "bin" + + System.getProperty("file.separator") + + "java"); cmd.add("-Xmx512m"); long myPrimaryGen; @@ -92,7 +93,8 @@ public class TestNRTReplication extends LuceneTestCase { cmd.add("-Dtests.nrtreplication.forcePrimaryVersion=" + forcePrimaryVersion); } - // Mixin our own counter because this is called from a fresh thread which means the seed otherwise isn't changing each time we spawn a + // Mixin our own counter because this is called from a fresh thread which means the seed + // otherwise isn't changing each time we spawn a // new node: long seed = random().nextLong() * nodeStartCounter.incrementAndGet(); @@ -165,30 +167,52 @@ public class TestNRTReplication extends LuceneTestCase { // Baby sits the child process, pulling its stdout and printing to our stdout: AtomicBoolean nodeClosing = new AtomicBoolean(); - Thread pumper = ThreadPumper.start( - new Runnable() { - @Override - public void run() { - message("now wait for process " + p); - try { - p.waitFor(); - } catch (Throwable t) { - throw new RuntimeException(t); - } + Thread pumper = + ThreadPumper.start( + new Runnable() { + @Override + public void run() { + message("now wait for process " + p); + try { + p.waitFor(); + } catch (Throwable t) { + throw new RuntimeException(t); + } - message("done wait for process " + p); - int exitValue = p.exitValue(); - message("exit value=" + exitValue + " willCrash=" + finalWillCrash); - if (exitValue != 0 && finalWillCrash == false) { - // should fail test - throw new RuntimeException("node " + id + " process had unexpected non-zero exit status=" + exitValue); - } - } - }, r, System.out, null, nodeClosing); + message("done wait for process " + p); + int exitValue = p.exitValue(); + message("exit value=" + exitValue + " willCrash=" + finalWillCrash); + if (exitValue != 0 && finalWillCrash == false) { + // should fail test + throw new RuntimeException( + "node " + id + " process had unexpected non-zero exit status=" + exitValue); + } + } + }, + r, + System.out, + null, + nodeClosing); pumper.setName("pump" + id); - message("top: node=" + id + " started at tcpPort=" + tcpPort + " initCommitVersion=" + initCommitVersion + " initInfosVersion=" + initInfosVersion); - return new NodeProcess(p, id, tcpPort, pumper, primaryTCPPort == -1, initCommitVersion, initInfosVersion, nodeClosing); + message( + "top: node=" + + id + + " started at tcpPort=" + + tcpPort + + " initCommitVersion=" + + initCommitVersion + + " initInfosVersion=" + + initInfosVersion); + return new NodeProcess( + p, + id, + tcpPort, + pumper, + primaryTCPPort == -1, + initCommitVersion, + initInfosVersion, + nodeClosing); } @Override @@ -221,7 +245,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); Connection primaryC = new Connection(primary.tcpPort); primaryC.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(primaryC, doc, false); } @@ -239,7 +263,7 @@ public class TestNRTReplication extends LuceneTestCase { // Delete all docs from primary if (random().nextBoolean()) { // Inefficiently: - for(int id=0;id<10;id++) { + for (int id = 0; id < 10; id++) { primary.deleteDocument(primaryC, Integer.toString(id)); } } else { @@ -249,7 +273,7 @@ public class TestNRTReplication extends LuceneTestCase { // Replica still shows 10 docs: assertVersionAndHits(replica, primaryVersion1, 10); - + // Refresh primary, which also pushes to replica: long primaryVersion2 = primary.flush(0); assertTrue(primaryVersion2 > primaryVersion1); @@ -258,7 +282,7 @@ public class TestNRTReplication extends LuceneTestCase { waitForVersionAndHits(replica, primaryVersion2, 0); // Index 10 docs again: - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(primaryC, doc, false); } @@ -291,7 +315,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); Connection primaryC = new Connection(primary.tcpPort); primaryC.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(primaryC, doc, false); } @@ -301,7 +325,7 @@ public class TestNRTReplication extends LuceneTestCase { assertTrue(primaryVersion1 > 0); // Index 10 more docs into primary: - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(primaryC, doc, false); } @@ -341,7 +365,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -387,7 +411,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -414,7 +438,8 @@ public class TestNRTReplication extends LuceneTestCase { primary.close(); } - // Start up, index 10 docs, replicate, commit, crash, index more docs, replicate, then restart the replica + // Start up, index 10 docs, replicate, commit, crash, index more docs, replicate, then restart the + // replica @Nightly public void testIndexingWhileReplicaIsDown() throws Exception { @@ -430,7 +455,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -452,7 +477,7 @@ public class TestNRTReplication extends LuceneTestCase { // Index 10 more docs, while replica is down try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -478,7 +503,7 @@ public class TestNRTReplication extends LuceneTestCase { replica.close(); primary.close(); } - + // Crash primary and promote a replica @Nightly public void testCrashPrimary1() throws Exception { @@ -495,7 +520,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -516,7 +541,7 @@ public class TestNRTReplication extends LuceneTestCase { // Promote replica: replica.commit(); replica.close(); - + primary = startNode(-1, 1, path2, -1, false); // Should still see 10 docs: @@ -541,7 +566,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -559,7 +584,7 @@ public class TestNRTReplication extends LuceneTestCase { // Index 10 docs, but crash before replicating or committing: try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -576,7 +601,7 @@ public class TestNRTReplication extends LuceneTestCase { // Index 10 more docs try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -593,7 +618,8 @@ public class TestNRTReplication extends LuceneTestCase { replica.close(); } - // Crash primary and then restart it, while a replica node is down, then bring replica node back up and make sure it properly "unforks" itself + // Crash primary and then restart it, while a replica node is down, then bring replica node back + // up and make sure it properly "unforks" itself @Nightly public void testCrashPrimary3() throws Exception { @@ -608,7 +634,7 @@ public class TestNRTReplication extends LuceneTestCase { // Index 10 docs into primary: try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -626,7 +652,8 @@ public class TestNRTReplication extends LuceneTestCase { replica.close(); primary.crash(); - // At this point replica is "in the future": it has 10 docs committed, but the primary crashed before committing so it has 0 docs + // At this point replica is "in the future": it has 10 docs committed, but the primary crashed + // before committing so it has 0 docs // Restart primary: primary = startNode(-1, 0, path1, -1, true); @@ -634,7 +661,7 @@ public class TestNRTReplication extends LuceneTestCase { // Index 20 docs into primary: try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<20;i++) { + for (int i = 0; i < 20; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -643,7 +670,8 @@ public class TestNRTReplication extends LuceneTestCase { // Flush primary, but there are no replicas to sync to: long primaryVersion2 = primary.flush(0); - // Now restart replica, which on init should detect on a "lost branch" because its 10 docs that were committed came from a different + // Now restart replica, which on init should detect on a "lost branch" because its 10 docs that + // were committed came from a different // primary node: replica = startNode(primary.tcpPort, 1, path2, -1, true); @@ -668,7 +696,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<100;i++) { + for (int i = 0; i < 100; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -703,7 +731,11 @@ public class TestNRTReplication extends LuceneTestCase { private void assertWriteLockHeld(Path path) throws Exception { try (FSDirectory dir = FSDirectory.open(path)) { - expectThrows(LockObtainFailedException.class, () -> {dir.obtainLock(IndexWriter.WRITE_LOCK_NAME);}); + expectThrows( + LockObtainFailedException.class, + () -> { + dir.obtainLock(IndexWriter.WRITE_LOCK_NAME); + }); } } @@ -723,7 +755,7 @@ public class TestNRTReplication extends LuceneTestCase { LineFileDocs docs = new LineFileDocs(random()); try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -743,11 +775,11 @@ public class TestNRTReplication extends LuceneTestCase { // Lots of new flushes while replica is down: long primaryVersion2 = 0; - for(int iter=0;iter<10;iter++) { + for (int iter = 0; iter < 10; iter++) { // Index 10 docs into primary: try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -788,10 +820,10 @@ public class TestNRTReplication extends LuceneTestCase { // Index 50 docs into primary: LineFileDocs docs = new LineFileDocs(random()); long primaryVersion1 = 0; - for (int iter=0;iter<5;iter++) { + for (int iter = 0; iter < 5; iter++) { try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -813,7 +845,7 @@ public class TestNRTReplication extends LuceneTestCase { // Index 10 more docs, but don't sync to replicas: try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_INDEXING); - for(int i=0;i<10;i++) { + for (int i = 0; i < 10; i++) { Document doc = docs.nextDoc(); primary.addOrUpdateDocument(c, doc, false); } @@ -830,7 +862,7 @@ public class TestNRTReplication extends LuceneTestCase { replica2 = startNode(primary.tcpPort, 2, path3, -1, true); // Only 50 because we didn't commit primary before the crash: - + // It's -1 because it's unpredictable how IW changes segments version on init: assertVersionAndHits(primary, -1, 50); assertVersionAndHits(replica1, primary.initInfosVersion, 50); @@ -843,11 +875,12 @@ public class TestNRTReplication extends LuceneTestCase { } /** Tell primary current replicas. */ - private void sendReplicasToPrimary(NodeProcess primary, NodeProcess... replicas) throws IOException { + private void sendReplicasToPrimary(NodeProcess primary, NodeProcess... replicas) + throws IOException { try (Connection c = new Connection(primary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_SET_REPLICAS); c.out.writeVInt(replicas.length); - for(int id=0;idOne node is primary, and segments are periodically flushed there, then concurrently the N + * replica nodes copy the new files over and open new readers, while primary also opens a new + * reader. * - * Nodes randomly crash and are restarted. If the primary crashes, a replica is promoted. + *

    Nodes randomly crash and are restarted. If the primary crashes, a replica is promoted. * - * Merges are currently first finished on the primary and then pre-copied out to replicas with a merged segment warmer so they don't block - * ongoing NRT reopens. Probably replicas could do their own merging instead, but this is more complex and may not be better overall - * (merging takes a lot of IO resources). + *

    Merges are currently first finished on the primary and then pre-copied out to replicas with a + * merged segment warmer so they don't block ongoing NRT reopens. Probably replicas could do their + * own merging instead, but this is more complex and may not be better overall (merging takes a lot + * of IO resources). * - * Slow network is simulated with a RateLimiter. + *

    Slow network is simulated with a RateLimiter. */ // MockRandom's .sd file has no index header/footer: @@ -121,7 +127,9 @@ public class TestStressNRTReplication extends LuceneTestCase { /** Randomly crash the current primary (losing data!) and promote the "next best" replica. */ static final boolean DO_CRASH_PRIMARY = true; - /** Randomly crash (JVM core dumps) a replica; it will later randomly be restarted and sync itself. */ + /** + * Randomly crash (JVM core dumps) a replica; it will later randomly be restarted and sync itself. + */ static final boolean DO_CRASH_REPLICA = true; /** Randomly gracefully close a replica; it will later be restarted and sync itself. */ @@ -157,7 +165,7 @@ public class TestStressNRTReplication extends LuceneTestCase { volatile NodeProcess[] nodes; volatile long[] nodeTimeStamps; volatile boolean[] starting; - + Path[] indexPaths; Path transLogPath; @@ -166,13 +174,16 @@ public class TestStressNRTReplication extends LuceneTestCase { final AtomicInteger markerID = new AtomicInteger(); /** Maps searcher version to how many hits the query body:the matched. */ - final Map hitCounts = new ConcurrentHashMap<>(); + final Map hitCounts = new ConcurrentHashMap<>(); - /** Maps searcher version to how many marker documents matched. This should only ever grow (we never delete marker documents). */ - final Map versionToMarker = new ConcurrentHashMap<>(); + /** + * Maps searcher version to how many marker documents matched. This should only ever grow (we + * never delete marker documents). + */ + final Map versionToMarker = new ConcurrentHashMap<>(); /** Maps searcher version to xlog location when refresh of this version started. */ - final Map versionToTransLogLocation = new ConcurrentHashMap<>(); + final Map versionToTransLogLocation = new ConcurrentHashMap<>(); final AtomicLong nodeStartCounter = new AtomicLong(); @@ -208,20 +219,20 @@ public class TestStressNRTReplication extends LuceneTestCase { transLogPath = createTempDir("NRTReplication").resolve("translog"); transLog = new SimpleTransLog(transLogPath); - //state.rateLimiters = new RateLimiter[numNodes]; + // state.rateLimiters = new RateLimiter[numNodes]; indexPaths = new Path[numNodes]; nodes = new NodeProcess[numNodes]; nodeTimeStamps = new long[numNodes]; Arrays.fill(nodeTimeStamps, Node.globalStartNS); starting = new boolean[numNodes]; - - for(int i=0;i toClose = new ArrayList<>(); - for(NodeProcess node : nodes) { + for (NodeProcess node : nodes) { if (node != primary && node != null) { toClose.add(node); } @@ -346,8 +366,8 @@ public class TestStressNRTReplication extends LuceneTestCase { IOUtils.close(transLog); if (failed.get() == false) { - message("TEST: top: now checkIndex"); - for(Path path : indexPaths) { + message("TEST: top: now checkIndex"); + for (Path path : indexPaths) { message("TEST: check " + path); MockDirectoryWrapper dir = newMockFSDirectory(path); // Just too slow otherwise @@ -360,7 +380,7 @@ public class TestStressNRTReplication extends LuceneTestCase { } private boolean anyNodesStarting() { - for(int id=0;id cmd = new ArrayList<>(); NodeProcess curPrimary = primary; - cmd.add(System.getProperty("java.home") - + System.getProperty("file.separator") - + "bin" - + System.getProperty("file.separator") - + "java"); + cmd.add( + System.getProperty("java.home") + + System.getProperty("file.separator") + + "bin" + + System.getProperty("file.separator") + + "java"); cmd.add("-Xmx512m"); if (curPrimary != null) { @@ -513,7 +576,8 @@ public class TestStressNRTReplication extends LuceneTestCase { return null; } - // This is very costly (takes more time to check than it did to index); we do this ourselves in the end instead of each time a replica + // This is very costly (takes more time to check than it did to index); we do this ourselves in + // the end instead of each time a replica // is restarted: // cmd.add("-Dtests.nrtreplication.checkonclose=true"); @@ -546,7 +610,8 @@ public class TestStressNRTReplication extends LuceneTestCase { long myPrimaryGen = primaryGen; cmd.add("-Dtests.nrtreplication.primaryGen=" + myPrimaryGen); - // Mixin our own counter because this is called from a fresh thread which means the seed otherwise isn't changing each time we spawn a + // Mixin our own counter because this is called from a fresh thread which means the seed + // otherwise isn't changing each time we spawn a // new node: long seed = random().nextLong() * nodeStartCounter.incrementAndGet(); @@ -562,13 +627,18 @@ public class TestStressNRTReplication extends LuceneTestCase { if (SEPARATE_CHILD_OUTPUT) { Path childOut = childTempDir.resolve(id + ".log"); message("logging to " + childOut); - childLog = Files.newBufferedWriter(childOut, StandardCharsets.UTF_8, StandardOpenOption.APPEND, StandardOpenOption.CREATE); + childLog = + Files.newBufferedWriter( + childOut, + StandardCharsets.UTF_8, + StandardOpenOption.APPEND, + StandardOpenOption.CREATE); childLog.write("\n\nSTART NEW CHILD:\n"); } else { childLog = null; } - //message("child process command: " + cmd); + // message("child process command: " + cmd); ProcessBuilder pb = new ProcessBuilder(cmd); pb.redirectErrorStream(true); @@ -605,12 +675,14 @@ public class TestStressNRTReplication extends LuceneTestCase { return null; } - // Hackity hack, in case primary crashed/closed and we haven't noticed (reaped the process) yet: + // Hackity hack, in case primary crashed/closed and we haven't noticed (reaped the process) + // yet: if (isPrimary == false) { - for(int i=0;i<100;i++) { + for (int i = 0; i < 100; i++) { NodeProcess primary2 = primary; if (primaryGen != myPrimaryGen || primary2 == null || primary2.nodeIsClosing.get()) { - // OK: primary crashed while we were trying to start, so it's expected/allowed that we could not start the replica: + // OK: primary crashed while we were trying to start, so it's expected/allowed that we + // could not start the replica: message("primary crashed/closed while replica R" + id + " tried to start; skipping"); return null; } else { @@ -656,45 +728,70 @@ public class TestStressNRTReplication extends LuceneTestCase { final boolean finalWillCrash = willCrash; final AtomicBoolean nodeIsClosing = new AtomicBoolean(); - // Baby sits the child process, pulling its stdout and printing to our stdout, calling nodeClosed once it exits: - Thread pumper = ThreadPumper.start( - new Runnable() { - @Override - public void run() { - message("now wait for process " + p); - try { - p.waitFor(); - } catch (Throwable t) { - throw new RuntimeException(t); - } + // Baby sits the child process, pulling its stdout and printing to our stdout, calling + // nodeClosed once it exits: + Thread pumper = + ThreadPumper.start( + new Runnable() { + @Override + public void run() { + message("now wait for process " + p); + try { + p.waitFor(); + } catch (Throwable t) { + throw new RuntimeException(t); + } - message("done wait for process " + p); - int exitValue = p.exitValue(); - message("exit value=" + exitValue + " willCrash=" + finalWillCrash); - if (childLog != null) { - try { - childLog.write("process done; exitValue=" + exitValue + "\n"); - childLog.close(); - } catch (IOException ioe) { - throw new RuntimeException(ioe); - } - } - if (exitValue != 0 && finalWillCrash == false && crashingNodes.remove(id) == false) { - // should fail test - failed.set(true); - if (childLog != null) { - throw new RuntimeException("node " + id + " process had unexpected non-zero exit status=" + exitValue + "; see " + childLog + " for details"); - } else { - throw new RuntimeException("node " + id + " process had unexpected non-zero exit status=" + exitValue); - } - } - nodeClosed(id); - } - }, r, System.out, childLog, nodeIsClosing); + message("done wait for process " + p); + int exitValue = p.exitValue(); + message("exit value=" + exitValue + " willCrash=" + finalWillCrash); + if (childLog != null) { + try { + childLog.write("process done; exitValue=" + exitValue + "\n"); + childLog.close(); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + } + if (exitValue != 0 + && finalWillCrash == false + && crashingNodes.remove(id) == false) { + // should fail test + failed.set(true); + if (childLog != null) { + throw new RuntimeException( + "node " + + id + + " process had unexpected non-zero exit status=" + + exitValue + + "; see " + + childLog + + " for details"); + } else { + throw new RuntimeException( + "node " + id + " process had unexpected non-zero exit status=" + exitValue); + } + } + nodeClosed(id); + } + }, + r, + System.out, + childLog, + nodeIsClosing); pumper.setName("pump" + id); - message("top: node=" + id + " started at tcpPort=" + tcpPort + " initCommitVersion=" + initCommitVersion + " initInfosVersion=" + initInfosVersion); - return new NodeProcess(p, id, tcpPort, pumper, isPrimary, initCommitVersion, initInfosVersion, nodeIsClosing); + message( + "top: node=" + + id + + " started at tcpPort=" + + tcpPort + + " initCommitVersion=" + + initCommitVersion + + " initInfosVersion=" + + initInfosVersion); + return new NodeProcess( + p, id, tcpPort, pumper, isPrimary, initCommitVersion, initInfosVersion, nodeIsClosing); } private void nodeClosed(int id) { @@ -715,7 +812,10 @@ public class TestStressNRTReplication extends LuceneTestCase { sendReplicasToPrimary(); } - /** Sends currently alive replicas to primary, which uses this to know who to notify when it does a refresh */ + /** + * Sends currently alive replicas to primary, which uses this to know who to notify when it does a + * refresh + */ private void sendReplicasToPrimary() { NodeProcess curPrimary = primary; if (curPrimary != null) { @@ -730,27 +830,38 @@ public class TestStressNRTReplication extends LuceneTestCase { try (Connection c = new Connection(curPrimary.tcpPort)) { c.out.writeByte(SimplePrimaryNode.CMD_SET_REPLICAS); - c.out.writeVInt(replicas.size()); - for(NodeProcess replica : replicas) { + c.out.writeVInt(replicas.size()); + for (NodeProcess replica : replicas) { c.out.writeVInt(replica.id); c.out.writeVInt(replica.tcpPort); } c.flush(); c.in.readByte(); } catch (Throwable t) { - message("top: ignore exc sending replicas to primary P" + curPrimary.id + " at tcpPort=" + curPrimary.tcpPort); + message( + "top: ignore exc sending replicas to primary P" + + curPrimary.id + + " at tcpPort=" + + curPrimary.tcpPort); t.printStackTrace(System.out); } } } void addVersionMarker(long version, int count) { - //System.out.println("ADD VERSION MARKER version=" + version + " count=" + count); + // System.out.println("ADD VERSION MARKER version=" + version + " count=" + count); if (versionToMarker.containsKey(version)) { int curCount = versionToMarker.get(version); if (curCount != count) { - message("top: wrong marker count version=" + version + " count=" + count + " curCount=" + curCount); - throw new IllegalStateException("version=" + version + " count=" + count + " curCount=" + curCount); + message( + "top: wrong marker count version=" + + version + + " count=" + + count + + " curCount=" + + curCount); + throw new IllegalStateException( + "version=" + version + " count=" + count + " curCount=" + curCount); } } else { message("top: record marker count: version=" + version + " count=" + count); @@ -773,12 +884,12 @@ public class TestStressNRTReplication extends LuceneTestCase { try { while (stop.get() == false) { Thread.sleep(TestUtil.nextInt(random(), 50, 500)); - //message("top: restarter cycle"); + // message("top: restarter cycle"); // Randomly crash full cluster: if (DO_FULL_CLUSTER_CRASH && random().nextInt(500) == 17) { message("top: full cluster crash"); - for(int i=0;i downNodes = new ArrayList<>(); StringBuilder b = new StringBuilder(); long nowNS = System.nanoTime(); - for(int i=0;i 0) { Thread.sleep(10); @@ -896,7 +1009,7 @@ public class TestStressNRTReplication extends LuceneTestCase { Query theQuery = new TermQuery(new Term("body", "the")); // Persists connections - Map connections = new HashMap<>(); + Map connections = new HashMap<>(); while (stop.get() == false) { NodeProcess node = nodes[random().nextInt(nodes.length)]; @@ -915,18 +1028,20 @@ public class TestStressNRTReplication extends LuceneTestCase { Thread.currentThread().setName("Searcher node=" + node); - //System.out.println("S: cycle; conns=" + connections); + // System.out.println("S: cycle; conns=" + connections); Connection c = connections.get(node.id); long version; try { if (c == null) { - //System.out.println("S: new connection " + node.id + " " + Thread.currentThread().getName()); + // System.out.println("S: new connection " + node.id + " " + + // Thread.currentThread().getName()); c = new Connection(node.tcpPort); connections.put(node.id, c); } else { - //System.out.println("S: reuse connection " + node.id + " " + Thread.currentThread().getName()); + // System.out.println("S: reuse connection " + node.id + " " + + // Thread.currentThread().getName()); } c.out.writeByte(SimplePrimaryNode.CMD_SEARCH); @@ -959,11 +1074,23 @@ public class TestStressNRTReplication extends LuceneTestCase { // TODO: we never prune this map... if (oldHitCount == null) { hitCounts.put(version, hitCount); - message("top: searcher: record search hitCount version=" + version + " hitCount=" + hitCount + " node=" + node); + message( + "top: searcher: record search hitCount version=" + + version + + " hitCount=" + + hitCount + + " node=" + + node); if (nodeIsPrimary && version > lastPrimaryVersion) { - // It's possible a search request sees a new primary version because it's in the process of flushing, but then the primary - // crashes. In this case we need to ensure new primary forces its version beyond this: - message("top: searcher: set lastPrimaryVersion=" + lastPrimaryVersion + " vs " + version); + // It's possible a search request sees a new primary version because it's in the + // process of flushing, but then the primary + // crashes. In this case we need to ensure new primary forces its version beyond + // this: + message( + "top: searcher: set lastPrimaryVersion=" + + lastPrimaryVersion + + " vs " + + version); lastPrimaryVersion = version; } } else { @@ -972,24 +1099,40 @@ public class TestStressNRTReplication extends LuceneTestCase { if (oldHitCount.intValue() != hitCount) { failed.set(true); stop.set(true); - message("top: searcher: wrong version hitCount: version=" + version + " oldHitCount=" + oldHitCount.intValue() + " hitCount=" + hitCount); - fail("version=" + version + " oldHitCount=" + oldHitCount.intValue() + " hitCount=" + hitCount); + message( + "top: searcher: wrong version hitCount: version=" + + version + + " oldHitCount=" + + oldHitCount.intValue() + + " hitCount=" + + hitCount); + fail( + "version=" + + version + + " oldHitCount=" + + oldHitCount.intValue() + + " hitCount=" + + hitCount); } } } catch (IOException ioe) { - //message("top: searcher: ignore exc talking to node " + node + ": " + ioe); - //ioe.printStackTrace(System.out); + // message("top: searcher: ignore exc talking to node " + node + ": " + ioe); + // ioe.printStackTrace(System.out); IOUtils.closeWhileHandlingException(c); connections.remove(node.id); continue; } - // This can be null if primary is flushing, has already refreshed its searcher, but is e.g. still notifying replicas and hasn't - // yet returned the version to us, in which case this searcher thread can see the version before the main thread has added it to + // This can be null if primary is flushing, has already refreshed its searcher, but is + // e.g. still notifying replicas and hasn't + // yet returned the version to us, in which case this searcher thread can see the version + // before the main thread has added it to // versionToMarker: Integer expectedAtLeastHitCount = versionToMarker.get(version); - if (expectedAtLeastHitCount != null && expectedAtLeastHitCount > 0 && random().nextInt(10) == 7) { + if (expectedAtLeastHitCount != null + && expectedAtLeastHitCount > 0 + && random().nextInt(10) == 7) { try { c.out.writeByte(SimplePrimaryNode.CMD_MARKER_SEARCH); c.out.writeVInt(expectedAtLeastHitCount); @@ -1019,19 +1162,27 @@ public class TestStressNRTReplication extends LuceneTestCase { int hitCount = c.in.readVInt(); // Look for data loss: make sure all marker docs are visible: - + if (hitCount < expectedAtLeastHitCount) { - String failMessage = "node=" + node + ": documents were lost version=" + version + " hitCount=" + hitCount + " vs expectedAtLeastHitCount=" + expectedAtLeastHitCount; + String failMessage = + "node=" + + node + + ": documents were lost version=" + + version + + " hitCount=" + + hitCount + + " vs expectedAtLeastHitCount=" + + expectedAtLeastHitCount; message(failMessage); failed.set(true); stop.set(true); fail(failMessage); } } catch (IOException ioe) { - //message("top: searcher: ignore exc talking to node " + node + ": " + ioe); - //throw new RuntimeException(ioe); - //ioe.printStackTrace(System.out); + // message("top: searcher: ignore exc talking to node " + node + ": " + ioe); + // throw new RuntimeException(ioe); + // ioe.printStackTrace(System.out); IOUtils.closeWhileHandlingException(c); connections.remove(node.id); continue; @@ -1057,7 +1208,7 @@ public class TestStressNRTReplication extends LuceneTestCase { @Override public void run() { - LineFileDocs docs=null; + LineFileDocs docs = null; try { docs = new LineFileDocs(random()); int docCount = 0; @@ -1071,7 +1222,7 @@ public class TestStressNRTReplication extends LuceneTestCase { message("top: indexer: updatePct=" + updatePct + " sleepChance=" + sleepChance); long lastTransLogLoc = transLog.getNextLocation(); - + NodeProcess curPrimary = null; Connection c = null; @@ -1098,14 +1249,15 @@ public class TestStressNRTReplication extends LuceneTestCase { // We use the marker docs to check for data loss in search thread: Document doc = new Document(); int id = markerID.getAndIncrement(); - String idString = "m"+id; + String idString = "m" + id; doc.add(newStringField("docid", idString, Field.Store.YES)); doc.add(newStringField("marker", "marker", Field.Store.YES)); curPrimary.addOrUpdateDocument(c, doc, false); transLog.addDocument(idString, doc); // Only increment after primary replies: markerUpto.getAndIncrement(); - //message("index marker=" + idString + "; translog is " + Node.bytesToString(Files.size(transLogPath))); + // message("index marker=" + idString + "; translog is " + + // Node.bytesToString(Files.size(transLogPath))); } if (docCount > 0 && random().nextDouble() < updatePct) { @@ -1183,20 +1335,24 @@ public class TestStressNRTReplication extends LuceneTestCase { static void message(String message) { long now = System.nanoTime(); - System.out.println(String.format(Locale.ROOT, - "%5.3fs : parent [%11s] %s", - (now-Node.globalStartNS)/1000000000., - Thread.currentThread().getName(), - message)); + System.out.println( + String.format( + Locale.ROOT, + "%5.3fs : parent [%11s] %s", + (now - Node.globalStartNS) / 1000000000., + Thread.currentThread().getName(), + message)); } static void message(String message, long localStartNS) { long now = System.nanoTime(); - System.out.println(String.format(Locale.ROOT, - "%5.3fs %5.1fs: parent [%11s] %s", - (now-Node.globalStartNS)/1000000000., - (now-localStartNS)/1000000000., - Thread.currentThread().getName(), - message)); + System.out.println( + String.format( + Locale.ROOT, + "%5.3fs %5.1fs: parent [%11s] %s", + (now - Node.globalStartNS) / 1000000000., + (now - localStartNS) / 1000000000., + Thread.currentThread().getName(), + message)); } } diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/ThreadPumper.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/ThreadPumper.java index ff57ea67c98..41abb49fb79 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/ThreadPumper.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/ThreadPumper.java @@ -26,37 +26,43 @@ import java.util.regex.Pattern; /** A pipe thread. It'd be nice to reuse guava's implementation for this... */ class ThreadPumper { - public static Thread start(final Runnable onExit, final BufferedReader from, final PrintStream to, final Writer toFile, final AtomicBoolean nodeClosing) { - Thread t = new Thread() { - @Override - public void run() { - try { - long startTimeNS = System.nanoTime(); - Pattern logTimeStart = Pattern.compile("^[0-9\\.]+s .*"); - String line; - while ((line = from.readLine()) != null) { - if (toFile != null) { - toFile.write(line); - toFile.write("\n"); - toFile.flush(); - } else if (logTimeStart.matcher(line).matches()) { - // Already a well-formed log output: - System.out.println(line); - } else { - TestStressNRTReplication.message(line, startTimeNS); - } - if (line.contains("now force close server socket after")) { - nodeClosing.set(true); + public static Thread start( + final Runnable onExit, + final BufferedReader from, + final PrintStream to, + final Writer toFile, + final AtomicBoolean nodeClosing) { + Thread t = + new Thread() { + @Override + public void run() { + try { + long startTimeNS = System.nanoTime(); + Pattern logTimeStart = Pattern.compile("^[0-9\\.]+s .*"); + String line; + while ((line = from.readLine()) != null) { + if (toFile != null) { + toFile.write(line); + toFile.write("\n"); + toFile.flush(); + } else if (logTimeStart.matcher(line).matches()) { + // Already a well-formed log output: + System.out.println(line); + } else { + TestStressNRTReplication.message(line, startTimeNS); + } + if (line.contains("now force close server socket after")) { + nodeClosing.set(true); + } } + // Sub-process finished + } catch (IOException e) { + System.err.println("ignore IOExc reading from forked process pipe: " + e); + } finally { + onExit.run(); } - // Sub-process finished - } catch (IOException e) { - System.err.println("ignore IOExc reading from forked process pipe: " + e); - } finally { - onExit.run(); } - } - }; + }; t.start(); return t; } diff --git a/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanCollector.java b/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanCollector.java index cbade087bd2..8997142d509 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanCollector.java +++ b/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanCollector.java @@ -16,18 +16,15 @@ */ package org.apache.lucene.payloads; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; import org.apache.lucene.search.spans.SpanCollector; import org.apache.lucene.util.BytesRef; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; - -/** - * SpanCollector for collecting payloads - */ +/** SpanCollector for collecting payloads */ public class PayloadSpanCollector implements SpanCollector { private final Collection payloads = new ArrayList<>(); @@ -35,8 +32,9 @@ public class PayloadSpanCollector implements SpanCollector { @Override public void collectLeaf(PostingsEnum postings, int position, Term term) throws IOException { BytesRef payload = postings.getPayload(); - if (payload == null) + if (payload == null) { return; + } final byte[] bytes = new byte[payload.length]; System.arraycopy(payload.bytes, payload.offset, bytes, 0, payload.length); payloads.add(bytes); @@ -47,9 +45,7 @@ public class PayloadSpanCollector implements SpanCollector { payloads.clear(); } - /** - * @return the collected payloads - */ + /** @return the collected payloads */ public Collection getPayloads() { return payloads; } diff --git a/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanUtil.java b/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanUtil.java index 3cc7701fb46..05a33abf099 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanUtil.java +++ b/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanUtil.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; @@ -38,25 +37,21 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; -import org.apache.lucene.search.spans.Spans; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.SpanWeight; +import org.apache.lucene.search.spans.Spans; /** - * Experimental class to get set of payloads for most standard Lucene queries. - * Operates like Highlighter - IndexReader should only contain doc of interest, - * best to use MemoryIndex. + * Experimental class to get set of payloads for most standard Lucene queries. Operates like + * Highlighter - IndexReader should only contain doc of interest, best to use MemoryIndex. * * @lucene.experimental - * */ public class PayloadSpanUtil { private IndexReaderContext context; /** - * @param context - * that contains doc with payloads to extract - * + * @param context that contains doc with payloads to extract * @see IndexReader#getContext() */ public PayloadSpanUtil(IndexReaderContext context) { @@ -65,7 +60,7 @@ public class PayloadSpanUtil { /** * Query should be rewritten for wild/fuzzy support. - * + * * @param query rewritten query * @return payloads Collection * @throws IOException if there is a low-level I/O error @@ -76,8 +71,7 @@ public class PayloadSpanUtil { return payloads; } - private void queryToSpanQuery(Query query, Collection payloads) - throws IOException { + private void queryToSpanQuery(Query query, Collection payloads) throws IOException { if (query instanceof BooleanQuery) { for (BooleanClause clause : (BooleanQuery) query) { if (!clause.isProhibited()) { @@ -108,8 +102,8 @@ public class PayloadSpanUtil { getPayloads(payloads, (SpanQuery) query); } else if (query instanceof DisjunctionMaxQuery) { - for (Iterator iterator = ((DisjunctionMaxQuery) query).iterator(); iterator - .hasNext();) { + for (Iterator iterator = ((DisjunctionMaxQuery) query).iterator(); + iterator.hasNext(); ) { queryToSpanQuery(iterator.next(), payloads); } @@ -126,16 +120,15 @@ public class PayloadSpanUtil { } } - @SuppressWarnings({"rawtypes","unchecked"}) final List[] disjunctLists = - new List[maxPosition + 1]; + @SuppressWarnings({"rawtypes", "unchecked"}) + final List[] disjunctLists = new List[maxPosition + 1]; int distinctPositions = 0; for (int i = 0; i < termArrays.length; ++i) { final Term[] termArray = termArrays[i]; List disjuncts = disjunctLists[positions[i]]; if (disjuncts == null) { - disjuncts = (disjunctLists[positions[i]] = new ArrayList<>( - termArray.length)); + disjuncts = (disjunctLists[positions[i]] = new ArrayList<>(termArray.length)); ++distinctPositions; } for (final Term term : termArray) { @@ -149,8 +142,8 @@ public class PayloadSpanUtil { for (int i = 0; i < disjunctLists.length; ++i) { List disjuncts = disjunctLists[i]; if (disjuncts != null) { - clauses[position++] = new SpanOrQuery(disjuncts - .toArray(new SpanQuery[disjuncts.size()])); + clauses[position++] = + new SpanOrQuery(disjuncts.toArray(new SpanQuery[disjuncts.size()])); } else { ++positionGaps; } @@ -159,15 +152,13 @@ public class PayloadSpanUtil { final int slop = mpq.getSlop(); final boolean inorder = (slop == 0); - SpanNearQuery sp = new SpanNearQuery(clauses, slop + positionGaps, - inorder); + SpanNearQuery sp = new SpanNearQuery(clauses, slop + positionGaps, inorder); getPayloads(payloads, sp); } } } - private void getPayloads(Collection payloads, SpanQuery query) - throws IOException { + private void getPayloads(Collection payloads, SpanQuery query) throws IOException { final IndexSearcher searcher = new IndexSearcher(context); searcher.setQueryCache(null); diff --git a/lucene/sandbox/src/java/org/apache/lucene/payloads/package-info.java b/lucene/sandbox/src/java/org/apache/lucene/payloads/package-info.java index b2a6bb3ad32..26d74a31818 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/payloads/package-info.java +++ b/lucene/sandbox/src/java/org/apache/lucene/payloads/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Experimental classes for interacting with payloads - */ +/** Experimental classes for interacting with payloads */ package org.apache.lucene.payloads; diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsFormat.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsFormat.java index bf20de290bc..614e172e75b 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsFormat.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsFormat.java @@ -17,7 +17,6 @@ package org.apache.lucene.sandbox.codecs.idversion; import java.io.IOException; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.PostingsFormat; @@ -30,31 +29,27 @@ import org.apache.lucene.search.LiveFieldValues; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -/** A PostingsFormat optimized for primary-key (ID) fields that also - * record a version (long) for each ID, delivered as a payload - * created by {@link #longToBytes} during indexing. At search time, - * the TermsEnum implementation {@link IDVersionSegmentTermsEnum} - * enables fast (using only the terms index when possible) lookup for - * whether a given ID was previously indexed with version > N (see - * {@link IDVersionSegmentTermsEnum#seekExact(BytesRef,long)}. +/** + * A PostingsFormat optimized for primary-key (ID) fields that also record a version (long) for each + * ID, delivered as a payload created by {@link #longToBytes} during indexing. At search time, the + * TermsEnum implementation {@link IDVersionSegmentTermsEnum} enables fast (using only the terms + * index when possible) lookup for whether a given ID was previously indexed with version > N + * (see {@link IDVersionSegmentTermsEnum#seekExact(BytesRef,long)}. * - *

    This is most effective if the app assigns monotonically - * increasing global version to each indexed doc. Then, during - * indexing, use {@link - * IDVersionSegmentTermsEnum#seekExact(BytesRef,long)} (along with - * {@link LiveFieldValues}) to decide whether the document you are - * about to index was already indexed with a higher version, and skip - * it if so. + *

    This is most effective if the app assigns monotonically increasing global version to each + * indexed doc. Then, during indexing, use {@link + * IDVersionSegmentTermsEnum#seekExact(BytesRef,long)} (along with {@link LiveFieldValues}) to + * decide whether the document you are about to index was already indexed with a higher version, and + * skip it if so. * - *

    The field is effectively indexed as DOCS_ONLY and the docID is - * pulsed into the terms dictionary, but the user must feed in the - * version as a payload on the first token. + *

    The field is effectively indexed as DOCS_ONLY and the docID is pulsed into the terms + * dictionary, but the user must feed in the version as a payload on the first token. * - *

    NOTE: term vectors cannot be indexed with this field (not that - * you should really ever want to do this). + *

    NOTE: term vectors cannot be indexed with this field (not that you should really ever want to + * do this). * - * @lucene.experimental */ - + * @lucene.experimental + */ public class IDVersionPostingsFormat extends PostingsFormat { /** version must be >= this. */ @@ -84,10 +79,8 @@ public class IDVersionPostingsFormat extends PostingsFormat { PostingsWriterBase postingsWriter = new IDVersionPostingsWriter(state.liveDocs); boolean success = false; try { - FieldsConsumer ret = new VersionBlockTreeTermsWriter(state, - postingsWriter, - minTermsInBlock, - maxTermsInBlock); + FieldsConsumer ret = + new VersionBlockTreeTermsWriter(state, postingsWriter, minTermsInBlock, maxTermsInBlock); success = true; return ret; } finally { @@ -101,31 +94,38 @@ public class IDVersionPostingsFormat extends PostingsFormat { public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new IDVersionPostingsReader(); boolean success = false; - try { - FieldsProducer ret = new VersionBlockTreeTermsReader(postingsReader, state); - success = true; - return ret; - } finally { - if (!success) { - IOUtils.closeWhileHandlingException(postingsReader); - } - } + try { + FieldsProducer ret = new VersionBlockTreeTermsReader(postingsReader, state); + success = true; + return ret; + } finally { + if (!success) { + IOUtils.closeWhileHandlingException(postingsReader); + } + } } public static long bytesToLong(BytesRef bytes) { - return ((bytes.bytes[bytes.offset]&0xFFL) << 56) | - ((bytes.bytes[bytes.offset+1]&0xFFL) << 48) | - ((bytes.bytes[bytes.offset+2]&0xFFL) << 40) | - ((bytes.bytes[bytes.offset+3]&0xFFL) << 32) | - ((bytes.bytes[bytes.offset+4]&0xFFL) << 24) | - ((bytes.bytes[bytes.offset+5]&0xFFL) << 16) | - ((bytes.bytes[bytes.offset+6]&0xFFL) << 8) | - (bytes.bytes[bytes.offset+7]&0xFFL); + return ((bytes.bytes[bytes.offset] & 0xFFL) << 56) + | ((bytes.bytes[bytes.offset + 1] & 0xFFL) << 48) + | ((bytes.bytes[bytes.offset + 2] & 0xFFL) << 40) + | ((bytes.bytes[bytes.offset + 3] & 0xFFL) << 32) + | ((bytes.bytes[bytes.offset + 4] & 0xFFL) << 24) + | ((bytes.bytes[bytes.offset + 5] & 0xFFL) << 16) + | ((bytes.bytes[bytes.offset + 6] & 0xFFL) << 8) + | (bytes.bytes[bytes.offset + 7] & 0xFFL); } public static void longToBytes(long v, BytesRef bytes) { if (v > MAX_VERSION || v < MIN_VERSION) { - throw new IllegalArgumentException("version must be >= MIN_VERSION=" + MIN_VERSION + " and <= MAX_VERSION=" + MAX_VERSION + " (got: " + v + ")"); + throw new IllegalArgumentException( + "version must be >= MIN_VERSION=" + + MIN_VERSION + + " and <= MAX_VERSION=" + + MAX_VERSION + + " (got: " + + v + + ")"); } bytes.offset = 0; bytes.length = 8; @@ -137,6 +137,6 @@ public class IDVersionPostingsFormat extends PostingsFormat { bytes.bytes[5] = (byte) (v >> 16); bytes.bytes[6] = (byte) (v >> 8); bytes.bytes[7] = (byte) v; - assert bytesToLong(bytes) == v: bytesToLong(bytes) + " vs " + v + " bytes=" + bytes; + assert bytesToLong(bytes) == v : bytesToLong(bytes) + " vs " + v + " bytes=" + bytes; } } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsReader.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsReader.java index cae6ab48836..b892a0604f7 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsReader.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsReader.java @@ -17,7 +17,6 @@ package org.apache.lucene.sandbox.codecs.idversion; import java.io.IOException; - import org.apache.lucene.codecs.BlockTermState; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.PostingsReaderBase; @@ -33,11 +32,13 @@ final class IDVersionPostingsReader extends PostingsReaderBase { @Override public void init(IndexInput termsIn, SegmentReadState state) throws IOException { // Make sure we are talking to the matching postings writer - CodecUtil.checkIndexHeader(termsIn, - IDVersionPostingsWriter.TERMS_CODEC, - IDVersionPostingsWriter.VERSION_START, - IDVersionPostingsWriter.VERSION_CURRENT, - state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.checkIndexHeader( + termsIn, + IDVersionPostingsWriter.TERMS_CODEC, + IDVersionPostingsWriter.VERSION_START, + IDVersionPostingsWriter.VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix); } @Override @@ -46,12 +47,12 @@ final class IDVersionPostingsReader extends PostingsReaderBase { } @Override - public void close() throws IOException { - } + public void close() throws IOException {} @Override - public void decodeTerm(DataInput in, FieldInfo fieldInfo, BlockTermState _termState, boolean absolute) - throws IOException { + public void decodeTerm( + DataInput in, FieldInfo fieldInfo, BlockTermState _termState, boolean absolute) + throws IOException { final IDVersionTermState termState = (IDVersionTermState) _termState; termState.docID = in.readVInt(); if (absolute) { @@ -62,7 +63,9 @@ final class IDVersionPostingsReader extends PostingsReaderBase { } @Override - public PostingsEnum postings(FieldInfo fieldInfo, BlockTermState termState, PostingsEnum reuse, int flags) throws IOException { + public PostingsEnum postings( + FieldInfo fieldInfo, BlockTermState termState, PostingsEnum reuse, int flags) + throws IOException { SingleDocsEnum docsEnum; if (PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS)) { @@ -89,8 +92,10 @@ final class IDVersionPostingsReader extends PostingsReaderBase { } @Override - public ImpactsEnum impacts(FieldInfo fieldInfo, BlockTermState state, int flags) throws IOException { - throw new UnsupportedOperationException("Should never be called, IDVersionSegmentTermsEnum implements impacts directly"); + public ImpactsEnum impacts(FieldInfo fieldInfo, BlockTermState state, int flags) + throws IOException { + throw new UnsupportedOperationException( + "Should never be called, IDVersionSegmentTermsEnum implements impacts directly"); } @Override @@ -99,8 +104,7 @@ final class IDVersionPostingsReader extends PostingsReaderBase { } @Override - public void checkIntegrity() throws IOException { - } + public void checkIntegrity() throws IOException {} @Override public String toString() { diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsWriter.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsWriter.java index e454f766ee0..477fa2e4a6c 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsWriter.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionPostingsWriter.java @@ -17,7 +17,6 @@ package org.apache.lucene.sandbox.codecs.idversion; import java.io.IOException; - import org.apache.lucene.codecs.BlockTermState; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.PushPostingsWriterBase; @@ -32,13 +31,13 @@ import org.apache.lucene.util.BytesRef; final class IDVersionPostingsWriter extends PushPostingsWriterBase { - final static String TERMS_CODEC = "IDVersionPostingsWriterTerms"; + static final String TERMS_CODEC = "IDVersionPostingsWriterTerms"; // Increment version to change it - final static int VERSION_START = 1; - final static int VERSION_CURRENT = VERSION_START; + static final int VERSION_START = 1; + static final int VERSION_CURRENT = VERSION_START; - final static IDVersionTermState emptyState = new IDVersionTermState(); + static final IDVersionTermState emptyState = new IDVersionTermState(); IDVersionTermState lastState; int lastDocID; @@ -58,19 +57,23 @@ final class IDVersionPostingsWriter extends PushPostingsWriterBase { @Override public void init(IndexOutput termsOut, SegmentWriteState state) throws IOException { - CodecUtil.writeIndexHeader(termsOut, TERMS_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.writeIndexHeader( + termsOut, TERMS_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); } @Override public void setField(FieldInfo fieldInfo) { super.setField(fieldInfo); if (fieldInfo.getIndexOptions() != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) { - throw new IllegalArgumentException("field must be index using IndexOptions.DOCS_AND_FREQS_AND_POSITIONS"); + throw new IllegalArgumentException( + "field must be index using IndexOptions.DOCS_AND_FREQS_AND_POSITIONS"); } - // LUCENE-5693: because CheckIndex cross-checks term vectors with postings even for deleted docs, and because our PF only indexes the + // LUCENE-5693: because CheckIndex cross-checks term vectors with postings even for deleted + // docs, and because our PF only indexes the // non-deleted documents on flush, CheckIndex will see this as corruption: if (fieldInfo.hasVectors()) { - throw new IllegalArgumentException("field cannot index term vectors: CheckIndex will report this as index corruption"); + throw new IllegalArgumentException( + "field cannot index term vectors: CheckIndex will report this as index corruption"); } lastState = emptyState; } @@ -82,12 +85,14 @@ final class IDVersionPostingsWriter extends PushPostingsWriterBase { @Override public void startDoc(int docID, int termDocFreq) throws IOException { - // TODO: LUCENE-5693: we don't need this check if we fix IW to not send deleted docs to us on flush: + // TODO: LUCENE-5693: we don't need this check if we fix IW to not send deleted docs to us on + // flush: if (liveDocs != null && liveDocs.get(docID) == false) { return; } if (lastDocID != -1) { - throw new IllegalArgumentException("term appears in more than one document: " + lastDocID + " and " + docID); + throw new IllegalArgumentException( + "term appears in more than one document: " + lastDocID + " and " + docID); } if (termDocFreq != 1) { throw new IllegalArgumentException("term appears more than once in the document"); @@ -99,7 +104,8 @@ final class IDVersionPostingsWriter extends PushPostingsWriterBase { } @Override - public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException { + public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) + throws IOException { if (lastDocID == -1) { // Doc is deleted; skip it return; @@ -117,10 +123,24 @@ final class IDVersionPostingsWriter extends PushPostingsWriterBase { lastVersion = IDVersionPostingsFormat.bytesToLong(payload); if (lastVersion < IDVersionPostingsFormat.MIN_VERSION) { - throw new IllegalArgumentException("version must be >= MIN_VERSION=" + IDVersionPostingsFormat.MIN_VERSION + " (got: " + lastVersion + "; payload=" + payload + ")"); + throw new IllegalArgumentException( + "version must be >= MIN_VERSION=" + + IDVersionPostingsFormat.MIN_VERSION + + " (got: " + + lastVersion + + "; payload=" + + payload + + ")"); } if (lastVersion > IDVersionPostingsFormat.MAX_VERSION) { - throw new IllegalArgumentException("version must be <= MAX_VERSION=" + IDVersionPostingsFormat.MAX_VERSION + " (got: " + lastVersion + "; payload=" + payload + ")"); + throw new IllegalArgumentException( + "version must be <= MAX_VERSION=" + + IDVersionPostingsFormat.MAX_VERSION + + " (got: " + + lastVersion + + "; payload=" + + payload + + ")"); } } @@ -151,7 +171,9 @@ final class IDVersionPostingsWriter extends PushPostingsWriterBase { private long lastEncodedVersion; @Override - public void encodeTerm(DataOutput out, FieldInfo fieldInfo, BlockTermState _state, boolean absolute) throws IOException { + public void encodeTerm( + DataOutput out, FieldInfo fieldInfo, BlockTermState _state, boolean absolute) + throws IOException { IDVersionTermState state = (IDVersionTermState) _state; out.writeVInt(state.docID); if (absolute) { @@ -164,6 +186,5 @@ final class IDVersionPostingsWriter extends PushPostingsWriterBase { } @Override - public void close() throws IOException { - } + public void close() throws IOException {} } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionSegmentTermsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionSegmentTermsEnum.java index 76617feec4d..163060b3943 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionSegmentTermsEnum.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionSegmentTermsEnum.java @@ -18,7 +18,6 @@ package org.apache.lucene.sandbox.codecs.idversion; import java.io.IOException; import java.io.PrintStream; - import org.apache.lucene.codecs.BlockTermState; import org.apache.lucene.index.BaseTermsEnum; import org.apache.lucene.index.ImpactsEnum; @@ -35,10 +34,11 @@ import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.PairOutputs.Pair; import org.apache.lucene.util.fst.Util; -/** Iterates through terms in this field; this class is public so users - * can cast it to call {@link #seekExact(BytesRef, long)} for - * optimistic-concurrency, and also {@link #getVersion} to get the - * version of the currently seek'd term. */ +/** + * Iterates through terms in this field; this class is public so users can cast it to call {@link + * #seekExact(BytesRef, long)} for optimistic-concurrency, and also {@link #getVersion} to get the + * version of the currently seek'd term. + */ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // Lazy init: @@ -65,15 +65,15 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { final BytesRefBuilder term = new BytesRefBuilder(); private final FST.BytesReader fstReader; - @SuppressWarnings({"rawtypes","unchecked"}) private FST.Arc>[] arcs = - new FST.Arc[1]; + @SuppressWarnings({"rawtypes", "unchecked"}) + private FST.Arc>[] arcs = new FST.Arc[1]; IDVersionSegmentTermsEnum(VersionFieldReader fr) throws IOException { this.fr = fr; - //if (DEBUG) System.out.println("BTTR.init seg=" + segment); + // if (DEBUG) System.out.println("BTTR.init seg=" + segment); stack = new IDVersionSegmentTermsEnumFrame[0]; - + // Used to hold seek by TermState, or cached seek staticFrame = new IDVersionSegmentTermsEnumFrame(this, -1); @@ -85,12 +85,12 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // Init w/ root block; don't use index since it may // not (and need not) have been loaded - for(int arcIdx=0;arcIdx(); } currentFrame = staticFrame; - final FST.Arc> arc; + final FST.Arc> arc; if (fr.index != null) { arc = fr.index.getFirstArc(arcs[0]); // Empty string prefix must have an output in the index! @@ -99,18 +99,18 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { arc = null; } currentFrame = staticFrame; - //currentFrame = pushFrame(arc, rootCode, 0); - //currentFrame.loadBlock(); + // currentFrame = pushFrame(arc, rootCode, 0); + // currentFrame.loadBlock(); validIndexPrefix = 0; // if (DEBUG) { // System.out.println("init frame state " + currentFrame.ord); // printSeekState(); // } - //System.out.println(); + // System.out.println(); // computeBlockStats().print(System.out); } - + // Not private to avoid synthetic access$NNN methods void initIndexInput() { if (this.in == null) { @@ -120,9 +120,11 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { private IDVersionSegmentTermsEnumFrame getFrame(int ord) throws IOException { if (ord >= stack.length) { - final IDVersionSegmentTermsEnumFrame[] next = new IDVersionSegmentTermsEnumFrame[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; + final IDVersionSegmentTermsEnumFrame[] next = + new IDVersionSegmentTermsEnumFrame + [ArrayUtil.oversize(1 + ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; System.arraycopy(stack, 0, next, 0, stack.length); - for(int stackOrd=stack.length;stackOrd> getArc(int ord) { + private FST.Arc> getArc(int ord) { if (ord >= arcs.length) { - @SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc>[] next = - new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; + @SuppressWarnings({"rawtypes", "unchecked"}) + final FST.Arc>[] next = + new FST.Arc[ArrayUtil.oversize(1 + ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; System.arraycopy(arcs, 0, next, 0, arcs.length); - for(int arcOrd=arcs.length;arcOrd(); } arcs = next; @@ -145,11 +148,14 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { } // Pushes a frame we seek'd to - IDVersionSegmentTermsEnumFrame pushFrame(FST.Arc> arc, Pair frameData, int length) throws IOException { - scratchReader.reset(frameData.output1.bytes, frameData.output1.offset, frameData.output1.length); + IDVersionSegmentTermsEnumFrame pushFrame( + FST.Arc> arc, Pair frameData, int length) + throws IOException { + scratchReader.reset( + frameData.output1.bytes, frameData.output1.offset, frameData.output1.length); final long code = scratchReader.readVLong(); final long fpSeek = code >>> VersionBlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS; - final IDVersionSegmentTermsEnumFrame f = getFrame(1+currentFrame.ord); + final IDVersionSegmentTermsEnumFrame f = getFrame(1 + currentFrame.ord); f.maxIDVersion = Long.MAX_VALUE - frameData.output2; f.hasTerms = (code & VersionBlockTreeTermsWriter.OUTPUT_FLAG_HAS_TERMS) != 0; f.hasTermsOrig = f.hasTerms; @@ -164,11 +170,15 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // Pushes next'd frame or seek'd frame; we later // lazy-load the frame only when needed - IDVersionSegmentTermsEnumFrame pushFrame(FST.Arc> arc, long fp, int length) throws IOException { - final IDVersionSegmentTermsEnumFrame f = getFrame(1+currentFrame.ord); + IDVersionSegmentTermsEnumFrame pushFrame(FST.Arc> arc, long fp, int length) + throws IOException { + final IDVersionSegmentTermsEnumFrame f = getFrame(1 + currentFrame.ord); f.arc = arc; if (f.fpOrig == fp && f.nextEnt != -1) { - //if (DEBUG) System.out.println(" push reused frame ord=" + f.ord + " fp=" + f.fp + " isFloor?=" + f.isFloor + " hasTerms=" + f.hasTerms + " pref=" + term + " nextEnt=" + f.nextEnt + " targetBeforeCurrentLength=" + targetBeforeCurrentLength + " term.length=" + term.length + " vs prefix=" + f.prefix); + // if (DEBUG) System.out.println(" push reused frame ord=" + f.ord + " fp=" + f.fp + " + // isFloor?=" + f.isFloor + " hasTerms=" + f.hasTerms + " pref=" + term + " nextEnt=" + + // f.nextEnt + " targetBeforeCurrentLength=" + targetBeforeCurrentLength + " term.length=" + + // term.length + " vs prefix=" + f.prefix); if (f.prefix > targetBeforeCurrentLength) { f.rewind(); } else { @@ -186,7 +196,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // if (DEBUG) { // final int sav = term.length; // term.length = length; - // System.out.println(" push new frame ord=" + f.ord + " fp=" + f.fp + " hasTerms=" + f.hasTerms + " isFloor=" + f.isFloor + " pref=" + brToString(term)); + // System.out.println(" push new frame ord=" + f.ord + " fp=" + f.fp + " hasTerms=" + + // f.hasTerms + " isFloor=" + f.isFloor + " pref=" + brToString(term)); // term.length = sav; // } } @@ -224,17 +235,17 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { } } - /** Get the version of the currently seek'd term; only valid if we are - * positioned. */ + /** Get the version of the currently seek'd term; only valid if we are positioned. */ public long getVersion() { return ((IDVersionTermState) currentFrame.state).idVersion; } - /** Optimized version of {@link #seekExact(BytesRef)} that can - * sometimes fail-fast if the version indexed with the requested ID - * is less than the specified minIDVersion. Applications that index - * a monotonically increasing global version with each document can - * use this for fast optimistic concurrency. */ + /** + * Optimized version of {@link #seekExact(BytesRef)} that can sometimes fail-fast if the version + * indexed with the requested ID is less than the specified minIDVersion. Applications that index + * a monotonically increasing global version with each document can use this for fast optimistic + * concurrency. + */ public boolean seekExact(final BytesRef target, long minIDVersion) throws IOException { if (fr.index == null) { @@ -246,13 +257,15 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { assert clearEOF(); // if (DEBUG) { - // System.out.println("\nBTTR.seekExact seg=" + fr.parent.segment + " target=" + fr.fieldInfo.name + ":" + brToString(target) + " minIDVersion=" + minIDVersion + " current=" + brToString(term) + " (exists?=" + termExists + ") validIndexPrefix=" + validIndexPrefix); + // System.out.println("\nBTTR.seekExact seg=" + fr.parent.segment + " target=" + + // fr.fieldInfo.name + ":" + brToString(target) + " minIDVersion=" + minIDVersion + " current=" + // + brToString(term) + " (exists?=" + termExists + ") validIndexPrefix=" + validIndexPrefix); // printSeekState(System.out); // } - FST.Arc> arc; + FST.Arc> arc; int targetUpto; - Pair output; + Pair output; long startFrameFP = currentFrame.fp; @@ -260,7 +273,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { boolean changed = false; - // TODO: we could stop earlier w/ the version check, every time we traverse an index arc we can check? + // TODO: we could stop earlier w/ the version check, every time we traverse an index arc we can + // check? if (currentFrame != staticFrame) { @@ -281,7 +295,13 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { targetUpto = 0; IDVersionSegmentTermsEnumFrame lastFrame = stack[0]; - assert validIndexPrefix <= term.length(): "validIndexPrefix=" + validIndexPrefix + " term.length=" + term.length() + " seg=" + fr.parent; + assert validIndexPrefix <= term.length() + : "validIndexPrefix=" + + validIndexPrefix + + " term.length=" + + term.length() + + " seg=" + + fr.parent; final int targetLimit = Math.min(target.length, validIndexPrefix); @@ -292,23 +312,31 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // First compare up to valid seek frames: while (targetUpto < targetLimit) { - cmp = (term.byteAt(targetUpto)&0xFF) - (target.bytes[target.offset + targetUpto]&0xFF); + cmp = (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF); // if (DEBUG) { - // System.out.println(" cycle targetUpto=" + targetUpto + " (vs limit=" + targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")" + " arc.output=" + arc.output + " output=" + output); + // System.out.println(" cycle targetUpto=" + targetUpto + " (vs limit=" + targetLimit + // + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + + // " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")" + " arc.output=" + arc.output + // + " output=" + output); // } if (cmp != 0) { break; } - arc = arcs[1+targetUpto]; - //if (arc.label != (target.bytes[target.offset + targetUpto] & 0xFF)) { - //System.out.println("FAIL: arc.label=" + (char) arc.label + " targetLabel=" + (char) (target.bytes[target.offset + targetUpto] & 0xFF)); - //} - assert arc.label() == (target.bytes[target.offset + targetUpto] & 0xFF): "arc.label=" + (char) arc.label() + " targetLabel=" + (char) (target.bytes[target.offset + targetUpto] & 0xFF); + arc = arcs[1 + targetUpto]; + // if (arc.label != (target.bytes[target.offset + targetUpto] & 0xFF)) { + // System.out.println("FAIL: arc.label=" + (char) arc.label + " targetLabel=" + (char) + // (target.bytes[target.offset + targetUpto] & 0xFF)); + // } + assert arc.label() == (target.bytes[target.offset + targetUpto] & 0xFF) + : "arc.label=" + + (char) arc.label() + + " targetLabel=" + + (char) (target.bytes[target.offset + targetUpto] & 0xFF); if (arc.output() != VersionBlockTreeTermsWriter.NO_OUTPUT) { output = VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.output()); } if (arc.isFinal()) { - lastFrame = stack[1+lastFrame.ord]; + lastFrame = stack[1 + lastFrame.ord]; } targetUpto++; } @@ -322,9 +350,12 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // equal or after the current term final int targetLimit2 = Math.min(target.length, term.length()); while (targetUpto < targetLimit2) { - cmp = (term.byteAt(targetUpto)&0xFF) - (target.bytes[target.offset + targetUpto]&0xFF); + cmp = + (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF); // if (DEBUG) { - // System.out.println(" cycle2 targetUpto=" + targetUpto + " (vs limit=" + targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")"); + // System.out.println(" cycle2 targetUpto=" + targetUpto + " (vs limit=" + + // targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + + // targetUpto]) + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")"); // } if (cmp != 0) { break; @@ -343,7 +374,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // term, ie, app is seeking multiple terms // in sorted order // if (DEBUG) { - // System.out.println(" target is after current (shares prefixLen=" + targetUpto + "); frame.ord=" + lastFrame.ord + "; targetUpto=" + targetUpto); + // System.out.println(" target is after current (shares prefixLen=" + targetUpto + "); + // frame.ord=" + lastFrame.ord + "; targetUpto=" + targetUpto); // } currentFrame = lastFrame; @@ -355,7 +387,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { targetBeforeCurrentLength = 0; changed = true; // if (DEBUG) { - // System.out.println(" target is before current (shares prefixLen=" + targetUpto + "); rewind frame ord=" + lastFrame.ord); + // System.out.println(" target is before current (shares prefixLen=" + targetUpto + "); + // rewind frame ord=" + lastFrame.ord); // } currentFrame = lastFrame; currentFrame.rewind(); @@ -367,7 +400,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { if (currentFrame.maxIDVersion < minIDVersion) { // The max version for all terms in this block is lower than the minVersion // if (DEBUG) { - // System.out.println(" target is same as current maxIDVersion=" + currentFrame.maxIDVersion + " is < minIDVersion=" + minIDVersion + "; return false"); + // System.out.println(" target is same as current maxIDVersion=" + + // currentFrame.maxIDVersion + " is < minIDVersion=" + minIDVersion + "; return false"); // } return false; } @@ -376,11 +410,15 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { if (((IDVersionTermState) currentFrame.state).idVersion < minIDVersion) { // This term's version is lower than the minVersion // if (DEBUG) { - // System.out.println(" target is same as current but version=" + ((IDVersionTermState) currentFrame.state).idVersion + " is < minIDVersion=" + minIDVersion + "; return false"); + // System.out.println(" target is same as current but version=" + + // ((IDVersionTermState) currentFrame.state).idVersion + " is < minIDVersion=" + + // minIDVersion + "; return false"); // } return false; } - // System.out.println(" term version=" + ((IDVersionTermState) currentFrame.state).idVersion + " frame version=" + currentFrame.maxIDVersion + " frame ord=" + currentFrame.ord); + // System.out.println(" term version=" + ((IDVersionTermState) + // currentFrame.state).idVersion + " frame version=" + currentFrame.maxIDVersion + " frame + // ord=" + currentFrame.ord); // if (DEBUG) { // System.out.println(" target is same as current; return true"); @@ -391,16 +429,16 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // System.out.println(" target is same as current but term doesn't exist"); // } } - //validIndexPrefix = currentFrame.depth; - //term.length = target.length; - //return termExists; + // validIndexPrefix = currentFrame.depth; + // term.length = target.length; + // return termExists; } } else { targetBeforeCurrentLength = -1; arc = fr.index.getFirstArc(arcs[0]); - //System.out.println("first arc=" + arc); + // System.out.println("first arc=" + arc); // Empty string prefix must have an output (block) in the index! assert arc.isFinal(); @@ -414,66 +452,78 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { currentFrame = staticFrame; - //term.length = 0; + // term.length = 0; targetUpto = 0; - currentFrame = pushFrame(arc, VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput()), 0); + currentFrame = + pushFrame( + arc, VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput()), 0); } // if (DEBUG) { - // System.out.println(" start index loop targetUpto=" + targetUpto + " output=" + output + " currentFrame.ord=" + currentFrame.ord + " targetBeforeCurrentLength=" + targetBeforeCurrentLength + " termExists=" + termExists); + // System.out.println(" start index loop targetUpto=" + targetUpto + " output=" + output + " + // currentFrame.ord=" + currentFrame.ord + " targetBeforeCurrentLength=" + + // targetBeforeCurrentLength + " termExists=" + termExists); // } - // We are done sharing the common prefix with the incoming target and where we are currently seek'd; now continue walking the index: + // We are done sharing the common prefix with the incoming target and where we are currently + // seek'd; now continue walking the index: while (targetUpto < target.length) { final int targetLabel = target.bytes[target.offset + targetUpto] & 0xFF; - final FST.Arc> nextArc = fr.index.findTargetArc(targetLabel, arc, getArc(1+targetUpto), fstReader); + final FST.Arc> nextArc = + fr.index.findTargetArc(targetLabel, arc, getArc(1 + targetUpto), fstReader); if (nextArc == null) { // Index is exhausted // if (DEBUG) { - // System.out.println(" index: index exhausted label=" + ((char) targetLabel) + " " + Integer.toHexString(targetLabel) + " termExists=" + termExists); + // System.out.println(" index: index exhausted label=" + ((char) targetLabel) + " " + + // Integer.toHexString(targetLabel) + " termExists=" + termExists); // } - + validIndexPrefix = currentFrame.prefix; - //validIndexPrefix = targetUpto; + // validIndexPrefix = targetUpto; currentFrame.scanToFloorFrame(target); if (!currentFrame.hasTerms) { termExists = false; term.setByteAt(targetUpto, (byte) targetLabel); - term.setLength(1+targetUpto); + term.setLength(1 + targetUpto); // if (DEBUG) { // System.out.println(" FAST NOT_FOUND term=" + brToString(term)); // } return false; } - //System.out.println(" check maxVersion=" + currentFrame.maxIDVersion + " vs " + minIDVersion); + // System.out.println(" check maxVersion=" + currentFrame.maxIDVersion + " vs " + + // minIDVersion); // if (DEBUG) { - // System.out.println(" frame.maxIDVersion=" + currentFrame.maxIDVersion + " vs minIDVersion=" + minIDVersion); + // System.out.println(" frame.maxIDVersion=" + currentFrame.maxIDVersion + " vs + // minIDVersion=" + minIDVersion); // } if (currentFrame.maxIDVersion < minIDVersion) { // The max version for all terms in this block is lower than the minVersion if (currentFrame.fp != startFrameFP || changed) { - //if (targetUpto+1 > term.length) { + // if (targetUpto+1 > term.length) { termExists = false; term.setByteAt(targetUpto, (byte) targetLabel); - term.setLength(1+targetUpto); + term.setLength(1 + targetUpto); // if (DEBUG) { // System.out.println(" reset current term"); // } validIndexPrefix = Math.min(validIndexPrefix, term.length()); } - //if (currentFrame.ord != startFrameOrd) { - //termExists = false; - //} + // if (currentFrame.ord != startFrameOrd) { + // termExists = false; + // } // if (DEBUG) { - // System.out.println(" FAST version NOT_FOUND term=" + brToString(term) + " targetUpto=" + targetUpto + " currentFrame.maxIDVersion=" + currentFrame.maxIDVersion + " validIndexPrefix=" + validIndexPrefix + " startFrameFP=" + startFrameFP + " vs " + currentFrame.fp + " termExists=" + termExists); + // System.out.println(" FAST version NOT_FOUND term=" + brToString(term) + " + // targetUpto=" + targetUpto + " currentFrame.maxIDVersion=" + currentFrame.maxIDVersion + + // " validIndexPrefix=" + validIndexPrefix + " startFrameFP=" + startFrameFP + " vs " + + // currentFrame.fp + " termExists=" + termExists); // } return false; } @@ -483,13 +533,14 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // if (DEBUG) { // System.out.println(" scan currentFrame ord=" + currentFrame.ord); // } - final SeekStatus result = currentFrame.scanToTerm(target, true); + final SeekStatus result = currentFrame.scanToTerm(target, true); if (result == SeekStatus.FOUND) { currentFrame.decodeMetaData(); if (((IDVersionTermState) currentFrame.state).idVersion < minIDVersion) { // This term's version is lower than the minVersion // if (DEBUG) { - // System.out.println(" return NOT_FOUND: idVersion=" + ((IDVersionTermState) currentFrame.state).idVersion + " vs minIDVersion=" + minIDVersion); + // System.out.println(" return NOT_FOUND: idVersion=" + ((IDVersionTermState) + // currentFrame.state).idVersion + " vs minIDVersion=" + minIDVersion); // } return false; } @@ -501,7 +552,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { return true; } else { // if (DEBUG) { - // System.out.println(" got " + result + "; return NOT_FOUND term=" + brToString(term)); + // System.out.println(" got " + result + "; return NOT_FOUND term=" + + // brToString(term)); // } return false; } @@ -510,7 +562,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { arc = nextArc; if (term.byteAt(targetUpto) != (byte) targetLabel) { // if (DEBUG) { - // System.out.println(" now set termExists=false targetUpto=" + targetUpto + " term=" + term.bytes[targetUpto] + " targetLabel=" + targetLabel); + // System.out.println(" now set termExists=false targetUpto=" + targetUpto + " term=" + + // term.bytes[targetUpto] + " targetLabel=" + targetLabel); // } changed = true; term.setByteAt(targetUpto, (byte) targetLabel); @@ -523,19 +576,25 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { } // if (DEBUG) { - // System.out.println(" index: follow label=" + (char) ((target.bytes[target.offset + targetUpto]&0xff)) + " arc.output=" + arc.output + " arc.nfo=" + arc.nextFinalOutput); + // System.out.println(" index: follow label=" + (char) ((target.bytes[target.offset + + // targetUpto]&0xff)) + " arc.output=" + arc.output + " arc.nfo=" + arc.nextFinalOutput); // } targetUpto++; if (arc.isFinal()) { // if (DEBUG) System.out.println(" arc is final!"); - currentFrame = pushFrame(arc, VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput()), targetUpto); - // if (DEBUG) System.out.println(" curFrame.ord=" + currentFrame.ord + " hasTerms=" + currentFrame.hasTerms); + currentFrame = + pushFrame( + arc, + VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput()), + targetUpto); + // if (DEBUG) System.out.println(" curFrame.ord=" + currentFrame.ord + " hasTerms=" + + // currentFrame.hasTerms); } } } - //validIndexPrefix = targetUpto; + // validIndexPrefix = targetUpto; validIndexPrefix = currentFrame.prefix; currentFrame.scanToFloorFrame(target); @@ -551,7 +610,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { } // if (DEBUG) { - // System.out.println(" frame.maxIDVersion=" + currentFrame.maxIDVersion + " vs minIDVersion=" + minIDVersion); + // System.out.println(" frame.maxIDVersion=" + currentFrame.maxIDVersion + " vs + // minIDVersion=" + minIDVersion); // } if (currentFrame.maxIDVersion < minIDVersion) { @@ -563,7 +623,7 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { currentFrame.loadBlock(); - final SeekStatus result = currentFrame.scanToTerm(target, true); + final SeekStatus result = currentFrame.scanToTerm(target, true); if (result == SeekStatus.FOUND) { // if (DEBUG) { // System.out.println(" return FOUND term=" + term.utf8ToString() + " " + term); @@ -576,7 +636,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { return true; } else { // if (DEBUG) { - // System.out.println(" got result " + result + "; return NOT_FOUND term=" + term.utf8ToString()); + // System.out.println(" got result " + result + "; return NOT_FOUND term=" + + // term.utf8ToString()); // } return false; @@ -593,14 +654,16 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { assert clearEOF(); - //if (DEBUG) { - //System.out.println("\nBTTR.seekCeil seg=" + segment + " target=" + fieldInfo.name + ":" + target.utf8ToString() + " " + target + " current=" + brToString(term) + " (exists?=" + termExists + ") validIndexPrefix= " + validIndexPrefix); - //printSeekState(); - //} + // if (DEBUG) { + // System.out.println("\nBTTR.seekCeil seg=" + segment + " target=" + fieldInfo.name + ":" + + // target.utf8ToString() + " " + target + " current=" + brToString(term) + " (exists?=" + + // termExists + ") validIndexPrefix= " + validIndexPrefix); + // printSeekState(); + // } - FST.Arc> arc; + FST.Arc> arc; int targetUpto; - Pair output; + Pair output; targetBeforeCurrentLength = currentFrame.ord; @@ -613,15 +676,15 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // seeks to foobaz, we can re-use the seek state // for the first 5 bytes. - //if (DEBUG) { - //System.out.println(" re-use current seek state validIndexPrefix=" + validIndexPrefix); - //} + // if (DEBUG) { + // System.out.println(" re-use current seek state validIndexPrefix=" + validIndexPrefix); + // } arc = arcs[0]; assert arc.isFinal(); output = arc.output(); targetUpto = 0; - + IDVersionSegmentTermsEnumFrame lastFrame = stack[0]; assert validIndexPrefix <= term.length(); @@ -634,15 +697,22 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // First compare up to valid seek frames: while (targetUpto < targetLimit) { - cmp = (term.byteAt(targetUpto)&0xFF) - (target.bytes[target.offset + targetUpto]&0xFF); - //if (DEBUG) { - //System.out.println(" cycle targetUpto=" + targetUpto + " (vs limit=" + targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")" + " arc.output=" + arc.output + " output=" + output); - //} + cmp = (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF); + // if (DEBUG) { + // System.out.println(" cycle targetUpto=" + targetUpto + " (vs limit=" + targetLimit + + // ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " + // vs termLabel=" + (char) (term.bytes[targetUpto]) + ")" + " arc.output=" + arc.output + + // " output=" + output); + // } if (cmp != 0) { break; } - arc = arcs[1+targetUpto]; - assert arc.label() == (target.bytes[target.offset + targetUpto] & 0xFF): "arc.label=" + (char) arc.label() + " targetLabel=" + (char) (target.bytes[target.offset + targetUpto] & 0xFF); + arc = arcs[1 + targetUpto]; + assert arc.label() == (target.bytes[target.offset + targetUpto] & 0xFF) + : "arc.label=" + + (char) arc.label() + + " targetLabel=" + + (char) (target.bytes[target.offset + targetUpto] & 0xFF); // TODO: we could save the outputs in local // byte[][] instead of making new objs ever // seek; but, often the FST doesn't have any @@ -652,22 +722,24 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { output = VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.output()); } if (arc.isFinal()) { - lastFrame = stack[1+lastFrame.ord]; + lastFrame = stack[1 + lastFrame.ord]; } targetUpto++; } - if (cmp == 0) { final int targetUptoMid = targetUpto; // Second compare the rest of the term, but // don't save arc/output/frame: final int targetLimit2 = Math.min(target.length, term.length()); while (targetUpto < targetLimit2) { - cmp = (term.byteAt(targetUpto)&0xFF) - (target.bytes[target.offset + targetUpto]&0xFF); - //if (DEBUG) { - //System.out.println(" cycle2 targetUpto=" + targetUpto + " (vs limit=" + targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")"); - //} + cmp = + (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF); + // if (DEBUG) { + // System.out.println(" cycle2 targetUpto=" + targetUpto + " (vs limit=" + targetLimit + // + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + // + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")"); + // } if (cmp != 0) { break; } @@ -684,9 +756,10 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // Common case: target term is after current // term, ie, app is seeking multiple terms // in sorted order - //if (DEBUG) { - //System.out.println(" target is after current (shares prefixLen=" + targetUpto + "); clear frame.scanned ord=" + lastFrame.ord); - //} + // if (DEBUG) { + // System.out.println(" target is after current (shares prefixLen=" + targetUpto + "); + // clear frame.scanned ord=" + lastFrame.ord); + // } currentFrame = lastFrame; } else if (cmp > 0) { @@ -695,23 +768,24 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // keep the currentFrame but we must rewind it // (so we scan from the start) targetBeforeCurrentLength = 0; - //if (DEBUG) { - //System.out.println(" target is before current (shares prefixLen=" + targetUpto + "); rewind frame ord=" + lastFrame.ord); - //} + // if (DEBUG) { + // System.out.println(" target is before current (shares prefixLen=" + targetUpto + "); + // rewind frame ord=" + lastFrame.ord); + // } currentFrame = lastFrame; currentFrame.rewind(); } else { // Target is exactly the same as current term assert term.length() == target.length; if (termExists) { - //if (DEBUG) { - //System.out.println(" target is same as current; return FOUND"); - //} + // if (DEBUG) { + // System.out.println(" target is same as current; return FOUND"); + // } return SeekStatus.FOUND; } else { - //if (DEBUG) { - //System.out.println(" target is same as current but term doesn't exist"); - //} + // if (DEBUG) { + // System.out.println(" target is same as current but term doesn't exist"); + // } } } @@ -724,39 +798,46 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { assert arc.isFinal(); assert arc.output() != null; - //if (DEBUG) { - //System.out.println(" no seek state; push root frame"); - //} + // if (DEBUG) { + // System.out.println(" no seek state; push root frame"); + // } output = arc.output(); currentFrame = staticFrame; - //term.length = 0; + // term.length = 0; targetUpto = 0; - currentFrame = pushFrame(arc, VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput()), 0); + currentFrame = + pushFrame( + arc, VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput()), 0); } - //if (DEBUG) { - //System.out.println(" start index loop targetUpto=" + targetUpto + " output=" + output + " currentFrame.ord+1=" + currentFrame.ord + " targetBeforeCurrentLength=" + targetBeforeCurrentLength); - //} + // if (DEBUG) { + // System.out.println(" start index loop targetUpto=" + targetUpto + " output=" + output + " + // currentFrame.ord+1=" + currentFrame.ord + " targetBeforeCurrentLength=" + + // targetBeforeCurrentLength); + // } - // We are done sharing the common prefix with the incoming target and where we are currently seek'd; now continue walking the index: + // We are done sharing the common prefix with the incoming target and where we are currently + // seek'd; now continue walking the index: while (targetUpto < target.length) { final int targetLabel = target.bytes[target.offset + targetUpto] & 0xFF; - final FST.Arc> nextArc = fr.index.findTargetArc(targetLabel, arc, getArc(1+targetUpto), fstReader); + final FST.Arc> nextArc = + fr.index.findTargetArc(targetLabel, arc, getArc(1 + targetUpto), fstReader); if (nextArc == null) { // Index is exhausted // if (DEBUG) { - // System.out.println(" index: index exhausted label=" + ((char) targetLabel) + " " + toHex(targetLabel)); + // System.out.println(" index: index exhausted label=" + ((char) targetLabel) + " " + + // toHex(targetLabel)); // } - + validIndexPrefix = currentFrame.prefix; - //validIndexPrefix = targetUpto; + // validIndexPrefix = targetUpto; currentFrame.scanToFloorFrame(target); @@ -768,20 +849,20 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { termExists = false; if (next() != null) { - //if (DEBUG) { - //System.out.println(" return NOT_FOUND term=" + brToString(term) + " " + term); - //} + // if (DEBUG) { + // System.out.println(" return NOT_FOUND term=" + brToString(term) + " " + term); + // } return SeekStatus.NOT_FOUND; } else { - //if (DEBUG) { - //System.out.println(" return END"); - //} + // if (DEBUG) { + // System.out.println(" return END"); + // } return SeekStatus.END; } } else { - //if (DEBUG) { - //System.out.println(" return " + result + " term=" + brToString(term) + " " + term); - //} + // if (DEBUG) { + // System.out.println(" return " + result + " term=" + brToString(term) + " " + term); + // } return result; } } else { @@ -794,20 +875,26 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { output = VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.output()); } - //if (DEBUG) { - //System.out.println(" index: follow label=" + toHex(target.bytes[target.offset + targetUpto]&0xff) + " arc.output=" + arc.output + " arc.nfo=" + arc.nextFinalOutput); - //} + // if (DEBUG) { + // System.out.println(" index: follow label=" + toHex(target.bytes[target.offset + + // targetUpto]&0xff) + " arc.output=" + arc.output + " arc.nfo=" + arc.nextFinalOutput); + // } targetUpto++; if (arc.isFinal()) { - //if (DEBUG) System.out.println(" arc is final!"); - currentFrame = pushFrame(arc, VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput()), targetUpto); - //if (DEBUG) System.out.println(" curFrame.ord=" + currentFrame.ord + " hasTerms=" + currentFrame.hasTerms); + // if (DEBUG) System.out.println(" arc is final!"); + currentFrame = + pushFrame( + arc, + VersionBlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput()), + targetUpto); + // if (DEBUG) System.out.println(" curFrame.ord=" + currentFrame.ord + " hasTerms=" + + // currentFrame.hasTerms); } } } - //validIndexPrefix = targetUpto; + // validIndexPrefix = targetUpto; validIndexPrefix = currentFrame.prefix; currentFrame.scanToFloorFrame(target); @@ -820,14 +907,14 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { term.copyBytes(target); termExists = false; if (next() != null) { - //if (DEBUG) { - //System.out.println(" return NOT_FOUND term=" + term.utf8ToString() + " " + term); - //} + // if (DEBUG) { + // System.out.println(" return NOT_FOUND term=" + term.utf8ToString() + " " + term); + // } return SeekStatus.NOT_FOUND; } else { - //if (DEBUG) { - //System.out.println(" return END"); - //} + // if (DEBUG) { + // System.out.println(" return END"); + // } return SeekStatus.END; } } else { @@ -843,31 +930,100 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { out.println(" prior seek state:"); int ord = 0; boolean isSeekFrame = true; - while(true) { + while (true) { IDVersionSegmentTermsEnumFrame f = getFrame(ord); assert f != null; final BytesRef prefix = new BytesRef(term.bytes(), 0, f.prefix); if (f.nextEnt == -1) { - out.println(" frame " + (isSeekFrame ? "(seek)" : "(next)") + " ord=" + ord + " fp=" + f.fp + (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "") + " prefixLen=" + f.prefix + " prefix=" + brToString(prefix) + (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")")) + " hasTerms=" + f.hasTerms + " isFloor=" + f.isFloor + " code=" + ((f.fp< output = Util.get(fr.index, prefix); + Pair output = Util.get(fr.index, prefix); if (output == null) { out.println(" broken seek state: prefix is not final in index"); throw new RuntimeException("seek state is broken"); } else if (isSeekFrame && !f.isFloor) { - final ByteArrayDataInput reader = new ByteArrayDataInput(output.output1.bytes, output.output1.offset, output.output1.length); + final ByteArrayDataInput reader = + new ByteArrayDataInput( + output.output1.bytes, output.output1.offset, output.output1.length); final long codeOrig = reader.readVLong(); - final long code = (f.fp << VersionBlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS) | (f.hasTerms ? VersionBlockTreeTermsWriter.OUTPUT_FLAG_HAS_TERMS:0) | (f.isFloor ? VersionBlockTreeTermsWriter.OUTPUT_FLAG_IS_FLOOR:0); + final long code = + (f.fp << VersionBlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS) + | (f.hasTerms ? VersionBlockTreeTermsWriter.OUTPUT_FLAG_HAS_TERMS : 0) + | (f.isFloor ? VersionBlockTreeTermsWriter.OUTPUT_FLAG_IS_FLOOR : 0); if (codeOrig != code) { - out.println(" broken seek state: output code=" + codeOrig + " doesn't match frame code=" + code); + out.println( + " broken seek state: output code=" + + codeOrig + + " doesn't match frame code=" + + code); throw new RuntimeException("seek state is broken"); } } @@ -884,14 +1040,14 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { } /* Decodes only the term bytes of the next term. If caller then asks for - metadata, ie docFreq, totalTermFreq or pulls a D/&PEnum, we then (lazily) - decode all metadata up to the current term. */ + metadata, ie docFreq, totalTermFreq or pulls a D/&PEnum, we then (lazily) + decode all metadata up to the current term. */ @Override public BytesRef next() throws IOException { if (in == null) { // Fresh TermsEnum; seek to first term: - final FST.Arc> arc; + final FST.Arc> arc; if (fr.index != null) { arc = fr.index.getFirstArc(arcs[0]); // Empty string prefix must have an output in the index! @@ -906,10 +1062,12 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { targetBeforeCurrentLength = currentFrame.ord; assert !eof; - //if (DEBUG) { - //System.out.println("\nBTTR.next seg=" + segment + " term=" + brToString(term) + " termExists?=" + termExists + " field=" + fieldInfo.name + " termBlockOrd=" + currentFrame.state.termBlockOrd + " validIndexPrefix=" + validIndexPrefix); - //printSeekState(); - //} + // if (DEBUG) { + // System.out.println("\nBTTR.next seg=" + segment + " term=" + brToString(term) + " + // termExists?=" + termExists + " field=" + fieldInfo.name + " termBlockOrd=" + + // currentFrame.state.termBlockOrd + " validIndexPrefix=" + validIndexPrefix); + // printSeekState(); + // } if (currentFrame == staticFrame) { // If seek was previously called and the term was @@ -918,7 +1076,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // docFreq, etc. But, if they then call next(), // this method catches up all internal state so next() // works properly: - //if (DEBUG) System.out.println(" re-seek to pending term=" + term.utf8ToString() + " " + term); + // if (DEBUG) System.out.println(" re-seek to pending term=" + term.utf8ToString() + " " + + // term); final boolean result = seekExact(term.get()); assert result; } @@ -928,9 +1087,9 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { if (!currentFrame.isLastInFloor) { currentFrame.loadNextFloorBlock(); } else { - //if (DEBUG) System.out.println(" pop frame"); + // if (DEBUG) System.out.println(" pop frame"); if (currentFrame.ord == 0) { - //if (DEBUG) System.out.println(" return null"); + // if (DEBUG) System.out.println(" return null"); assert setEOF(); term.clear(); validIndexPrefix = 0; @@ -939,7 +1098,7 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { return null; } final long lastFP = currentFrame.fpOrig; - currentFrame = stack[currentFrame.ord-1]; + currentFrame = stack[currentFrame.ord - 1]; if (currentFrame.nextEnt == -1 || currentFrame.lastSubFP != lastFP) { // We popped into a frame that's not loaded @@ -952,25 +1111,26 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // Note that the seek state (last seek) has been // invalidated beyond this depth validIndexPrefix = Math.min(validIndexPrefix, currentFrame.prefix); - //if (DEBUG) { - //System.out.println(" reset validIndexPrefix=" + validIndexPrefix); - //} + // if (DEBUG) { + // System.out.println(" reset validIndexPrefix=" + validIndexPrefix); + // } } } - while(true) { + while (true) { if (currentFrame.next()) { // Push to new block: - //if (DEBUG) System.out.println(" push frame"); + // if (DEBUG) System.out.println(" push frame"); currentFrame = pushFrame(null, currentFrame.lastSubFP, term.length()); // This is a "next" frame -- even if it's // floor'd we must pretend it isn't so we don't // try to scan to the right floor frame: currentFrame.isFloor = false; - //currentFrame.hasTerms = true; + // currentFrame.hasTerms = true; currentFrame.loadBlock(); } else { - //if (DEBUG) System.out.println(" return term=" + term.utf8ToString() + " " + term + " currentFrame.ord=" + currentFrame.ord); + // if (DEBUG) System.out.println(" return term=" + term.utf8ToString() + " " + term + " + // currentFrame.ord=" + currentFrame.ord); return term.get(); } } @@ -997,13 +1157,13 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { @Override public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { assert !eof; - //if (DEBUG) { - //System.out.println("BTTR.docs seg=" + segment); - //} + // if (DEBUG) { + // System.out.println("BTTR.docs seg=" + segment); + // } currentFrame.decodeMetaData(); - //if (DEBUG) { - //System.out.println(" state=" + currentFrame.state); - //} + // if (DEBUG) { + // System.out.println(" state=" + currentFrame.state); + // } return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.state, reuse, flags); } @@ -1017,7 +1177,8 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { @Override public void seekExact(BytesRef target, TermState otherState) { // if (DEBUG) { - // System.out.println("BTTR.seekExact termState seg=" + segment + " target=" + target.utf8ToString() + " " + target + " state=" + otherState); + // System.out.println("BTTR.seekExact termState seg=" + segment + " target=" + + // target.utf8ToString() + " " + target + " state=" + otherState); // } assert clearEOF(); if (target.compareTo(term.get()) != 0 || !termExists) { @@ -1034,13 +1195,13 @@ public final class IDVersionSegmentTermsEnum extends BaseTermsEnum { // } } } - + @Override public TermState termState() throws IOException { assert !eof; currentFrame.decodeMetaData(); TermState ts = currentFrame.state.clone(); - //if (DEBUG) System.out.println("BTTR.termState seg=" + segment + " state=" + ts); + // if (DEBUG) System.out.println("BTTR.termState seg=" + segment + " state=" + ts); return ts; } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionSegmentTermsEnumFrame.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionSegmentTermsEnumFrame.java index 9e959954e31..9f75b23519b 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionSegmentTermsEnumFrame.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/IDVersionSegmentTermsEnumFrame.java @@ -17,7 +17,6 @@ package org.apache.lucene.sandbox.codecs.idversion; import java.io.IOException; - import org.apache.lucene.codecs.BlockTermState; import org.apache.lucene.index.TermsEnum.SeekStatus; import org.apache.lucene.store.ByteArrayDataInput; @@ -39,7 +38,7 @@ final class IDVersionSegmentTermsEnumFrame { /** Highest version of any term in this block. */ long maxIDVersion; - FST.Arc> arc; + FST.Arc> arc; // File pointer where this block was loaded from long fp; @@ -101,13 +100,15 @@ final class IDVersionSegmentTermsEnumFrame { if (numBytes > floorData.length) { floorData = new byte[ArrayUtil.oversize(numBytes, 1)]; } - System.arraycopy(source.bytes, source.offset+in.getPosition(), floorData, 0, numBytes); + System.arraycopy(source.bytes, source.offset + in.getPosition(), floorData, 0, numBytes); floorDataReader.reset(floorData, 0, numBytes); numFollowFloorBlocks = floorDataReader.readVInt(); nextFloorLabel = floorDataReader.readByte() & 0xff; - //if (DEBUG) { - //System.out.println(" setFloorData fpOrig=" + fpOrig + " bytes=" + new BytesRef(source.bytes, source.offset + in.getPosition(), numBytes) + " numFollowFloorBlocks=" + numFollowFloorBlocks + " nextFloorLabel=" + toHex(nextFloorLabel)); - //} + // if (DEBUG) { + // System.out.println(" setFloorData fpOrig=" + fpOrig + " bytes=" + new + // BytesRef(source.bytes, source.offset + in.getPosition(), numBytes) + " numFollowFloorBlocks=" + // + numFollowFloorBlocks + " nextFloorLabel=" + toHex(nextFloorLabel)); + // } } public int getTermBlockOrd() { @@ -115,24 +116,24 @@ final class IDVersionSegmentTermsEnumFrame { } void loadNextFloorBlock() throws IOException { - //if (DEBUG) { - //System.out.println(" loadNextFloorBlock fp=" + fp + " fpEnd=" + fpEnd); - //} - assert arc == null || isFloor: "arc=" + arc + " isFloor=" + isFloor; + // if (DEBUG) { + // System.out.println(" loadNextFloorBlock fp=" + fp + " fpEnd=" + fpEnd); + // } + assert arc == null || isFloor : "arc=" + arc + " isFloor=" + isFloor; fp = fpEnd; nextEnt = -1; loadBlock(); } /* Does initial decode of next block of terms; this - doesn't actually decode the docFreq, totalTermFreq, - postings details (frq/prx offset, etc.) metadata; - it just loads them as byte[] blobs which are then - decoded on-demand if the metadata is ever requested - for any term in this block. This enables terms-only - intensive consumes (eg certain MTQs, respelling) to - not pay the price of decoding metadata they won't - use. */ + doesn't actually decode the docFreq, totalTermFreq, + postings details (frq/prx offset, etc.) metadata; + it just loads them as byte[] blobs which are then + decoded on-demand if the metadata is ever requested + for any term in this block. This enables terms-only + intensive consumes (eg certain MTQs, respelling) to + not pay the price of decoding metadata they won't + use. */ void loadBlock() throws IOException { // Clone the IndexInput lazily, so that consumers @@ -144,7 +145,7 @@ final class IDVersionSegmentTermsEnumFrame { // Already loaded return; } - //System.out.println("blc=" + blockLoadCount); + // System.out.println("blc=" + blockLoadCount); ste.in.seek(fp); int code = ste.in.readVInt(); @@ -169,12 +170,12 @@ final class IDVersionSegmentTermsEnumFrame { suffixesReader.reset(suffixBytes, 0, numBytes); /*if (DEBUG) { - if (arc == null) { - System.out.println(" loadBlock (next) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock); - } else { - System.out.println(" loadBlock (seek) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " hasTerms?=" + hasTerms + " isFloor?=" + isFloor + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock); - } - }*/ + if (arc == null) { + System.out.println(" loadBlock (next) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock); + } else { + System.out.println(" loadBlock (seek) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " hasTerms?=" + hasTerms + " isFloor?=" + isFloor + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock); + } + }*/ metaDataUpto = 0; @@ -256,8 +257,10 @@ final class IDVersionSegmentTermsEnumFrame { // Decodes next entry; returns true if it's a sub-block public boolean nextLeaf() { - //if (DEBUG) System.out.println(" frame.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount); - assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp; + // if (DEBUG) System.out.println(" frame.next ord=" + ord + " nextEnt=" + nextEnt + " + // entCount=" + entCount); + assert nextEnt != -1 && nextEnt < entCount + : "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp; nextEnt++; suffix = suffixesReader.readVInt(); startBytePos = suffixesReader.getPosition(); @@ -270,8 +273,10 @@ final class IDVersionSegmentTermsEnumFrame { } public boolean nextNonLeaf() { - // if (DEBUG) System.out.println(" frame.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount); - assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp; + // if (DEBUG) System.out.println(" frame.next ord=" + ord + " nextEnt=" + nextEnt + " + // entCount=" + entCount); + assert nextEnt != -1 && nextEnt < entCount + : "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp; nextEnt++; final int code = suffixesReader.readVInt(); suffix = code >>> 1; @@ -296,7 +301,7 @@ final class IDVersionSegmentTermsEnumFrame { return true; } } - + // TODO: make this array'd so we can do bin search? // likely not worth it? need to measure how many // floor blocks we "typically" get @@ -304,7 +309,8 @@ final class IDVersionSegmentTermsEnumFrame { if (!isFloor || target.length <= prefix) { // if (DEBUG) { - // System.out.println(" scanToFloorFrame skip: isFloor=" + isFloor + " target.length=" + target.length + " vs prefix=" + prefix); + // System.out.println(" scanToFloorFrame skip: isFloor=" + isFloor + " target.length=" + + // target.length + " vs prefix=" + prefix); // } return; } @@ -312,7 +318,9 @@ final class IDVersionSegmentTermsEnumFrame { final int targetLabel = target.bytes[target.offset + prefix] & 0xFF; // if (DEBUG) { - // System.out.println(" scanToFloorFrame fpOrig=" + fpOrig + " targetLabel=" + ((char) targetLabel) + " vs nextFloorLabel=" + ((char) nextFloorLabel) + " numFollowFloorBlocks=" + numFollowFloorBlocks); + // System.out.println(" scanToFloorFrame fpOrig=" + fpOrig + " targetLabel=" + ((char) + // targetLabel) + " vs nextFloorLabel=" + ((char) nextFloorLabel) + " numFollowFloorBlocks=" + + // numFollowFloorBlocks); // } if (targetLabel < nextFloorLabel) { @@ -330,16 +338,18 @@ final class IDVersionSegmentTermsEnumFrame { newFP = fpOrig + (code >>> 1); hasTerms = (code & 1) != 0; // if (DEBUG) { - // System.out.println(" label=" + ((char) nextFloorLabel) + " fp=" + newFP + " hasTerms?=" + hasTerms + " numFollowFloor=" + numFollowFloorBlocks); + // System.out.println(" label=" + ((char) nextFloorLabel) + " fp=" + newFP + " + // hasTerms?=" + hasTerms + " numFollowFloor=" + numFollowFloorBlocks); // } - + isLastInFloor = numFollowFloorBlocks == 1; numFollowFloorBlocks--; if (isLastInFloor) { nextFloorLabel = 256; // if (DEBUG) { - // System.out.println(" stop! last block nextFloorLabel=" + ((char) nextFloorLabel)); + // System.out.println(" stop! last block nextFloorLabel=" + ((char) + // nextFloorLabel)); // } break; } else { @@ -366,10 +376,11 @@ final class IDVersionSegmentTermsEnumFrame { // } } } - + public void decodeMetaData() throws IOException { - //if (DEBUG) System.out.println("\nBTTR.decodeMetadata seg=" + ste.fr.parent.segment + " mdUpto=" + metaDataUpto + " vs termBlockOrd=" + state.termBlockOrd); + // if (DEBUG) System.out.println("\nBTTR.decodeMetadata seg=" + ste.fr.parent.segment + " + // mdUpto=" + metaDataUpto + " vs termBlockOrd=" + state.termBlockOrd); assert nextEnt >= 0; @@ -392,7 +403,7 @@ final class IDVersionSegmentTermsEnumFrame { // stats state.docFreq = 1; state.totalTermFreq = 1; - //if (DEBUG) System.out.println(" dF=" + state.docFreq); + // if (DEBUG) System.out.println(" dF=" + state.docFreq); // metadata ste.fr.parent.postingsReader.decodeTerm(bytesReader, ste.fr.fieldInfo, state, absolute); @@ -404,7 +415,7 @@ final class IDVersionSegmentTermsEnumFrame { // Used only by assert private boolean prefixMatches(BytesRef target) { - for(int bytePos=0;bytePos fields = new TreeMap<>(); + private final TreeMap fields = new TreeMap<>(); /** Sole constructor. */ - public VersionBlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state) throws IOException { - + public VersionBlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state) + throws IOException { + this.postingsReader = postingsReader; - String termsFile = IndexFileNames.segmentFileName(state.segmentInfo.name, - state.segmentSuffix, - VersionBlockTreeTermsWriter.TERMS_EXTENSION); + String termsFile = + IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + VersionBlockTreeTermsWriter.TERMS_EXTENSION); in = state.directory.openInput(termsFile, state.context); boolean success = false; IndexInput indexIn = null; try { - int termsVersion = CodecUtil.checkIndexHeader(in, VersionBlockTreeTermsWriter.TERMS_CODEC_NAME, - VersionBlockTreeTermsWriter.VERSION_START, - VersionBlockTreeTermsWriter.VERSION_CURRENT, - state.segmentInfo.getId(), state.segmentSuffix); - - String indexFile = IndexFileNames.segmentFileName(state.segmentInfo.name, - state.segmentSuffix, - VersionBlockTreeTermsWriter.TERMS_INDEX_EXTENSION); + int termsVersion = + CodecUtil.checkIndexHeader( + in, + VersionBlockTreeTermsWriter.TERMS_CODEC_NAME, + VersionBlockTreeTermsWriter.VERSION_START, + VersionBlockTreeTermsWriter.VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix); + + String indexFile = + IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + VersionBlockTreeTermsWriter.TERMS_INDEX_EXTENSION); indexIn = state.directory.openInput(indexFile, state.context); - int indexVersion = CodecUtil.checkIndexHeader(indexIn, VersionBlockTreeTermsWriter.TERMS_INDEX_CODEC_NAME, - VersionBlockTreeTermsWriter.VERSION_START, - VersionBlockTreeTermsWriter.VERSION_CURRENT, - state.segmentInfo.getId(), state.segmentSuffix); - + int indexVersion = + CodecUtil.checkIndexHeader( + indexIn, + VersionBlockTreeTermsWriter.TERMS_INDEX_CODEC_NAME, + VersionBlockTreeTermsWriter.VERSION_START, + VersionBlockTreeTermsWriter.VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix); + if (indexVersion != termsVersion) { - throw new CorruptIndexException("mixmatched version files: " + in + "=" + termsVersion + "," + indexIn + "=" + indexVersion, indexIn); + throw new CorruptIndexException( + "mixmatched version files: " + + in + + "=" + + termsVersion + + "," + + indexIn + + "=" + + indexVersion, + indexIn); } - + // verify CodecUtil.checksumEntireFile(indexIn); // Have PostingsReader init itself postingsReader.init(in, state); - + // NOTE: data file is too costly to verify checksum against all the bytes on open, // but for now we at least verify proper structure of the checksum footer: which looks // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption @@ -111,7 +131,7 @@ public final class VersionBlockTreeTermsReader extends FieldsProducer { throw new CorruptIndexException("invalid numFields: " + numFields, in); } - for(int i=0;i= 0; @@ -120,9 +140,10 @@ public final class VersionBlockTreeTermsReader extends FieldsProducer { in.readBytes(code.bytes, 0, numBytes); code.length = numBytes; final long version = in.readVLong(); - final Pair rootCode = VersionBlockTreeTermsWriter.FST_OUTPUTS.newPair(code, version); + final Pair rootCode = + VersionBlockTreeTermsWriter.FST_OUTPUTS.newPair(code, version); final FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field); - assert fieldInfo != null: "field=" + field; + assert fieldInfo != null : "field=" + field; final long sumTotalTermFreq = numTerms; final long sumDocFreq = numTerms; assert numTerms <= Integer.MAX_VALUE; @@ -130,19 +151,35 @@ public final class VersionBlockTreeTermsReader extends FieldsProducer { BytesRef minTerm = readBytesRef(in); BytesRef maxTerm = readBytesRef(in); - if (docCount < 0 || docCount > state.segmentInfo.maxDoc()) { // #docs with field must be <= #docs - throw new CorruptIndexException("invalid docCount: " + docCount + " maxDoc: " + state.segmentInfo.maxDoc(), in); + // #docs with field must be <= #docs + if (docCount < 0 || docCount > state.segmentInfo.maxDoc()) { + throw new CorruptIndexException( + "invalid docCount: " + docCount + " maxDoc: " + state.segmentInfo.maxDoc(), in); } - if (sumDocFreq < docCount) { // #postings must be >= #docs with field - throw new CorruptIndexException("invalid sumDocFreq: " + sumDocFreq + " docCount: " + docCount, in); + if (sumDocFreq < docCount) { // #postings must be >= #docs with field + throw new CorruptIndexException( + "invalid sumDocFreq: " + sumDocFreq + " docCount: " + docCount, in); } if (sumTotalTermFreq < sumDocFreq) { // #positions must be >= #postings - throw new CorruptIndexException("invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq, in); + throw new CorruptIndexException( + "invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq, in); } final long indexStartFP = indexIn.readVLong(); - VersionFieldReader previous = fields.put(fieldInfo.name, - new VersionFieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq, docCount, - indexStartFP, indexIn, minTerm, maxTerm)); + VersionFieldReader previous = + fields.put( + fieldInfo.name, + new VersionFieldReader( + this, + fieldInfo, + numTerms, + rootCode, + sumTotalTermFreq, + sumDocFreq, + docCount, + indexStartFP, + indexIn, + minTerm, + maxTerm)); if (previous != null) { throw new CorruptIndexException("duplicate field: " + fieldInfo.name, in); } @@ -182,7 +219,7 @@ public final class VersionBlockTreeTermsReader extends FieldsProducer { public void close() throws IOException { try { IOUtils.close(in, postingsReader); - } finally { + } finally { // Clear so refs to terms index is GCable even if // app hangs onto us: fields.clear(); @@ -224,12 +261,12 @@ public final class VersionBlockTreeTermsReader extends FieldsProducer { @Override public long ramBytesUsed() { long sizeInBytes = postingsReader.ramBytesUsed(); - for(VersionFieldReader reader : fields.values()) { + for (VersionFieldReader reader : fields.values()) { sizeInBytes += reader.ramBytesUsed(); } return sizeInBytes; } - + @Override public Collection getChildResources() { List resources = new ArrayList<>(Accountables.namedAccountables("field", fields)); @@ -241,13 +278,18 @@ public final class VersionBlockTreeTermsReader extends FieldsProducer { public void checkIntegrity() throws IOException { // term dictionary CodecUtil.checksumEntireFile(in); - + // postings postingsReader.checkIntegrity(); } - + @Override public String toString() { - return getClass().getSimpleName() + "(fields=" + fields.size() + ",delegate=" + postingsReader.toString() + ")"; + return getClass().getSimpleName() + + "(fields=" + + fields.size() + + ",delegate=" + + postingsReader.toString() + + ")"; } } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/VersionBlockTreeTermsWriter.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/VersionBlockTreeTermsWriter.java index 8e666fbd422..0f959a4e31d 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/VersionBlockTreeTermsWriter.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/VersionBlockTreeTermsWriter.java @@ -19,7 +19,6 @@ package org.apache.lucene.sandbox.codecs.idversion; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.codecs.BlockTermState; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; @@ -43,10 +42,10 @@ import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IntsRefBuilder; import org.apache.lucene.util.StringHelper; -import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.ByteSequenceOutputs; import org.apache.lucene.util.fst.BytesRefFSTEnum; import org.apache.lucene.util.fst.FST; +import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.PairOutputs; import org.apache.lucene.util.fst.PairOutputs.Pair; import org.apache.lucene.util.fst.PositiveIntOutputs; @@ -54,7 +53,7 @@ import org.apache.lucene.util.fst.Util; /* TODO: - + - Currently there is a one-to-one mapping of indexed term to term block, but we could decouple the two, ie, put more terms into the index than there are blocks. @@ -83,32 +82,35 @@ import org.apache.lucene.util.fst.Util; */ /** - * This is just like {@link BlockTreeTermsWriter}, except it also stores a version per term, and adds a method to its TermsEnum - * implementation to seekExact only if the version is >= the specified version. The version is added to the terms index to avoid seeking if - * no term in the block has a high enough version. The term blocks file is .tiv and the terms index extension is .tipv. + * This is just like {@link BlockTreeTermsWriter}, except it also stores a version per term, and + * adds a method to its TermsEnum implementation to seekExact only if the version is >= the + * specified version. The version is added to the terms index to avoid seeking if no term in the + * block has a high enough version. The term blocks file is .tiv and the terms index extension is + * .tipv. * * @lucene.experimental */ - public final class VersionBlockTreeTermsWriter extends FieldsConsumer { - static final PairOutputs FST_OUTPUTS = new PairOutputs<>(ByteSequenceOutputs.getSingleton(), - PositiveIntOutputs.getSingleton()); + static final PairOutputs FST_OUTPUTS = + new PairOutputs<>(ByteSequenceOutputs.getSingleton(), PositiveIntOutputs.getSingleton()); - static final Pair NO_OUTPUT = FST_OUTPUTS.getNoOutput(); + static final Pair NO_OUTPUT = FST_OUTPUTS.getNoOutput(); - /** Suggested default value for the {@code - * minItemsInBlock} parameter to {@link - * #VersionBlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}. */ - public final static int DEFAULT_MIN_BLOCK_SIZE = 25; + /** + * Suggested default value for the {@code minItemsInBlock} parameter to {@link + * #VersionBlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}. + */ + public static final int DEFAULT_MIN_BLOCK_SIZE = 25; - /** Suggested default value for the {@code - * maxItemsInBlock} parameter to {@link - * #VersionBlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}. */ - public final static int DEFAULT_MAX_BLOCK_SIZE = 48; + /** + * Suggested default value for the {@code maxItemsInBlock} parameter to {@link + * #VersionBlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}. + */ + public static final int DEFAULT_MAX_BLOCK_SIZE = 48; // public final static boolean DEBUG = false; - //private final static boolean SAVE_DOT_FILES = false; + // private final static boolean SAVE_DOT_FILES = false; static final int OUTPUT_FLAGS_NUM_BITS = 2; static final int OUTPUT_FLAGS_MASK = 0x3; @@ -117,7 +119,8 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { /** Extension of terms file */ static final String TERMS_EXTENSION = "tiv"; - final static String TERMS_CODEC_NAME = "VersionBlockTreeTermsDict"; + + static final String TERMS_CODEC_NAME = "VersionBlockTreeTermsDict"; /** Initial terms format. */ public static final int VERSION_START = 1; @@ -127,7 +130,8 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { /** Extension of terms index file */ static final String TERMS_INDEX_EXTENSION = "tipv"; - final static String TERMS_INDEX_CODEC_NAME = "VersionBlockTreeTermsIndex"; + + static final String TERMS_INDEX_CODEC_NAME = "VersionBlockTreeTermsIndex"; private final IndexOutput out; private final IndexOutput indexOut; @@ -140,17 +144,22 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { private static class FieldMetaData { public final FieldInfo fieldInfo; - public final Pair rootCode; + public final Pair rootCode; public final long numTerms; public final long indexStartFP; public final BytesRef minTerm; public final BytesRef maxTerm; - public FieldMetaData(FieldInfo fieldInfo, Pair rootCode, long numTerms, long indexStartFP, - BytesRef minTerm, BytesRef maxTerm) { + public FieldMetaData( + FieldInfo fieldInfo, + Pair rootCode, + long numTerms, + long indexStartFP, + BytesRef minTerm, + BytesRef maxTerm) { assert numTerms > 0; this.fieldInfo = fieldInfo; - assert rootCode != null: "field=" + fieldInfo.name + " numTerms=" + numTerms; + assert rootCode != null : "field=" + fieldInfo.name + " numTerms=" + numTerms; this.rootCode = rootCode; this.indexStartFP = indexStartFP; this.numTerms = numTerms; @@ -161,21 +170,23 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { private final List fields = new ArrayList<>(); - /** Create a new writer. The number of items (terms or - * sub-blocks) per block will aim to be between - * minItemsPerBlock and maxItemsPerBlock, though in some - * cases the blocks may be smaller than the min. */ + /** + * Create a new writer. The number of items (terms or sub-blocks) per block will aim to be between + * minItemsPerBlock and maxItemsPerBlock, though in some cases the blocks may be smaller than the + * min. + */ public VersionBlockTreeTermsWriter( - SegmentWriteState state, - PostingsWriterBase postingsWriter, - int minItemsInBlock, - int maxItemsInBlock) - throws IOException - { + SegmentWriteState state, + PostingsWriterBase postingsWriter, + int minItemsInBlock, + int maxItemsInBlock) + throws IOException { BlockTreeTermsWriter.validateSettings(minItemsInBlock, maxItemsInBlock); maxDoc = state.segmentInfo.maxDoc(); - final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION); + final String termsFileName = + IndexFileNames.segmentFileName( + state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION); out = state.directory.createOutput(termsFileName, state.context); boolean success = false; IndexOutput indexOut = null; @@ -183,20 +194,28 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { fieldInfos = state.fieldInfos; this.minItemsInBlock = minItemsInBlock; this.maxItemsInBlock = maxItemsInBlock; - CodecUtil.writeIndexHeader(out, TERMS_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.writeIndexHeader( + out, TERMS_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); - //DEBUG = state.segmentName.equals("_4a"); + // DEBUG = state.segmentName.equals("_4a"); - final String termsIndexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION); + final String termsIndexFileName = + IndexFileNames.segmentFileName( + state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION); indexOut = state.directory.createOutput(termsIndexFileName, state.context); - CodecUtil.writeIndexHeader(indexOut, TERMS_INDEX_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.writeIndexHeader( + indexOut, + TERMS_INDEX_CODEC_NAME, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix); this.postingsWriter = postingsWriter; // segment = state.segmentInfo.name; // System.out.println("BTW.init seg=" + state.segmentName); - postingsWriter.init(out, state); // have consumer write its format/header + postingsWriter.init(out, state); // have consumer write its format/header success = true; } finally { if (!success) { @@ -208,19 +227,19 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { /** Writes the terms file trailer. */ private void writeTrailer(IndexOutput out, long dirStart) throws IOException { - out.writeLong(dirStart); + out.writeLong(dirStart); } /** Writes the index file trailer. */ private void writeIndexTrailer(IndexOutput indexOut, long dirStart) throws IOException { - indexOut.writeLong(dirStart); + indexOut.writeLong(dirStart); } @Override public void write(Fields fields, NormsProducer norms) throws IOException { String lastField = null; - for(String field : fields) { + for (String field : fields) { assert lastField == null || lastField.compareTo(field) < 0; lastField = field; @@ -243,10 +262,12 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { termsWriter.finish(); } } - + static long encodeOutput(long fp, boolean hasTerms, boolean isFloor) { assert fp < (1L << 62); - return (fp << 2) | (hasTerms ? OUTPUT_FLAG_HAS_TERMS : 0) | (isFloor ? OUTPUT_FLAG_IS_FLOOR : 0); + return (fp << 2) + | (hasTerms ? OUTPUT_FLAG_HAS_TERMS : 0) + | (isFloor ? OUTPUT_FLAG_IS_FLOOR : 0); } private static class PendingEntry { @@ -297,15 +318,22 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { private static final class PendingBlock extends PendingEntry { public final BytesRef prefix; public final long fp; - public FST> index; - public List>> subIndices; + public FST> index; + public List>> subIndices; public final boolean hasTerms; public final boolean isFloor; public final int floorLeadByte; /** Max version for all terms in this block. */ private final long maxVersion; - public PendingBlock(BytesRef prefix, long maxVersion, long fp, boolean hasTerms, boolean isFloor, int floorLeadByte, List>> subIndices) { + public PendingBlock( + BytesRef prefix, + long maxVersion, + long fp, + boolean hasTerms, + boolean isFloor, + int floorLeadByte, + List>> subIndices) { super(false); this.prefix = prefix; this.maxVersion = maxVersion; @@ -321,9 +349,14 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { return "BLOCK: " + brToString(prefix); } - public void compileIndex(List blocks, ByteBuffersDataOutput scratchBytes, IntsRefBuilder scratchIntsRef) throws IOException { + public void compileIndex( + List blocks, + ByteBuffersDataOutput scratchBytes, + IntsRefBuilder scratchIntsRef) + throws IOException { - assert (isFloor && blocks.size() > 1) || (isFloor == false && blocks.size() == 1): "isFloor=" + isFloor + " blocks=" + blocks; + assert (isFloor && blocks.size() > 1) || (isFloor == false && blocks.size() == 1) + : "isFloor=" + isFloor + " blocks=" + blocks; assert this == blocks.get(0); assert scratchBytes.size() == 0; @@ -335,34 +368,40 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { // outputs sharing in the FST scratchBytes.writeVLong(encodeOutput(fp, hasTerms, isFloor)); if (isFloor) { - scratchBytes.writeVInt(blocks.size()-1); - for (int i=1;i= minItemsInBlock && end - nextBlockStart > maxItemsInBlock) { + // The count is too large for one block, so we must break it into "floor" blocks, where + // we record the leading label of the suffix of the first term in each floor block, so + // at search time we can jump to the right floor block. We just use a naive greedy + // segmenter here: make a new floor block as soon as we have at least minItemsInBlock. + // This is not always best: it often produces a too-small block as the final block: boolean isFloor = itemsInBlock < count; - newBlocks.add(writeBlock(prefixLength, isFloor, nextFloorLeadLabel, nextBlockStart, i, hasTerms, hasSubBlocks)); + newBlocks.add( + writeBlock( + prefixLength, + isFloor, + nextFloorLeadLabel, + nextBlockStart, + i, + hasTerms, + hasSubBlocks)); hasTerms = false; hasSubBlocks = false; @@ -510,7 +563,15 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { if (nextBlockStart < end) { int itemsInBlock = end - nextBlockStart; boolean isFloor = itemsInBlock < count; - newBlocks.add(writeBlock(prefixLength, isFloor, nextFloorLeadLabel, nextBlockStart, end, hasTerms, hasSubBlocks)); + newBlocks.add( + writeBlock( + prefixLength, + isFloor, + nextFloorLeadLabel, + nextBlockStart, + end, + hasTerms, + hasSubBlocks)); } assert newBlocks.isEmpty() == false; @@ -522,7 +583,7 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { firstBlock.compileIndex(newBlocks, scratchBytes, scratchIntsRef); // Remove slice from the top of the pending stack, that we just wrote: - pending.subList(pending.size()-count, pending.size()).clear(); + pending.subList(pending.size() - count, pending.size()).clear(); // Append new block pending.add(firstBlock); @@ -530,18 +591,29 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { newBlocks.clear(); } - /** Writes the specified slice (start is inclusive, end is exclusive) - * from pending stack as a new block. If isFloor is true, there - * were too many (more than maxItemsInBlock) entries sharing the - * same prefix, and so we broke it into multiple floor blocks where - * we record the starting label of the suffix of each floor block. */ - private PendingBlock writeBlock(int prefixLength, boolean isFloor, int floorLeadLabel, int start, int end, boolean hasTerms, boolean hasSubBlocks) throws IOException { + /** + * Writes the specified slice (start is inclusive, end is exclusive) from pending stack as a new + * block. If isFloor is true, there were too many (more than maxItemsInBlock) entries sharing + * the same prefix, and so we broke it into multiple floor blocks where we record the starting + * label of the suffix of each floor block. + */ + private PendingBlock writeBlock( + int prefixLength, + boolean isFloor, + int floorLeadLabel, + int start, + int end, + boolean hasTerms, + boolean hasSubBlocks) + throws IOException { assert end > start; long startFP = out.getFilePointer(); - // if (DEBUG) System.out.println(" writeBlock fp=" + startFP + " isFloor=" + isFloor + " floorLeadLabel=" + floorLeadLabel + " start=" + start + " end=" + end + " hasTerms=" + hasTerms + " hasSubBlocks=" + hasSubBlocks); + // if (DEBUG) System.out.println(" writeBlock fp=" + startFP + " isFloor=" + isFloor + " + // floorLeadLabel=" + floorLeadLabel + " start=" + start + " end=" + end + " hasTerms=" + + // hasTerms + " hasSubBlocks=" + hasSubBlocks); boolean hasFloorLeadLabel = isFloor && floorLeadLabel != -1; @@ -559,7 +631,11 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { out.writeVInt(code); // if (DEBUG) { - // System.out.println(" writeBlock " + (isFloor ? "(floor) " : "") + "seg=" + segment + " pending.size()=" + pending.size() + " prefixLength=" + prefixLength + " indexPrefix=" + brToString(prefix) + " entCount=" + length + " startFP=" + startFP + (isFloor ? (" floorLeadByte=" + Integer.toHexString(floorLeadByte&0xff)) : "") + " isLastInFloor=" + isLastInFloor); + // System.out.println(" writeBlock " + (isFloor ? "(floor) " : "") + "seg=" + segment + " + // pending.size()=" + pending.size() + " prefixLength=" + prefixLength + " indexPrefix=" + + // brToString(prefix) + " entCount=" + length + " startFP=" + startFP + (isFloor ? (" + // floorLeadByte=" + Integer.toHexString(floorLeadByte&0xff)) : "") + " isLastInFloor=" + + // isLastInFloor); // } // 1st pass: pack term suffix bytes into byte[] blob @@ -569,7 +645,7 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { // compact format in this case: boolean isLeafBlock = hasSubBlocks == false; - final List>> subIndices; + final List>> subIndices; boolean absolute = true; long maxVersionInBlock = -1; @@ -577,12 +653,12 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { if (isLeafBlock) { // Only terms: subIndices = null; - for (int i=start;i=pos;i--) { + for (int i = lastTerm.length() - 1; i >= pos; i--) { // How many items on top of the stack share the current suffix // we are closing: int prefixTopSize = pending.size() - prefixStarts[i]; if (prefixTopSize >= minItemsInBlock) { - // if (DEBUG) System.out.println("pushTerm i=" + i + " prefixTopSize=" + prefixTopSize + " minItemsInBlock=" + minItemsInBlock); - writeBlocks(i+1, prefixTopSize); - prefixStarts[i] -= prefixTopSize-1; + // if (DEBUG) System.out.println("pushTerm i=" + i + " prefixTopSize=" + prefixTopSize + " + // minItemsInBlock=" + minItemsInBlock); + writeBlocks(i + 1, prefixTopSize); + prefixStarts[i] -= prefixTopSize - 1; } } @@ -755,7 +837,7 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer { } // Init new tail: - for(int i=pos;i 0; out.writeVLong(field.numTerms); out.writeVInt(field.rootCode.output1.length); - out.writeBytes(field.rootCode.output1.bytes, field.rootCode.output1.offset, field.rootCode.output1.length); + out.writeBytes( + field.rootCode.output1.bytes, + field.rootCode.output1.offset, + field.rootCode.output1.length); out.writeVLong(field.rootCode.output2); indexOut.writeVLong(field.indexStartFP); writeBytesRef(out, field.minTerm); diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/VersionFieldReader.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/VersionFieldReader.java index d58f6cfec3d..64f762091e6 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/VersionFieldReader.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/VersionFieldReader.java @@ -19,7 +19,6 @@ package org.apache.lucene.sandbox.codecs.idversion; import java.io.IOException; import java.util.Collection; import java.util.Collections; - import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Terms; @@ -42,40 +41,56 @@ final class VersionFieldReader extends Terms implements Accountable { final int docCount; final long indexStartFP; final long rootBlockFP; - final Pair rootCode; + final Pair rootCode; final BytesRef minTerm; final BytesRef maxTerm; final VersionBlockTreeTermsReader parent; - final FST> index; - //private boolean DEBUG; + final FST> index; + // private boolean DEBUG; - VersionFieldReader(VersionBlockTreeTermsReader parent, FieldInfo fieldInfo, long numTerms, Pair rootCode, long sumTotalTermFreq, long sumDocFreq, int docCount, - long indexStartFP, IndexInput indexIn, BytesRef minTerm, BytesRef maxTerm) throws IOException { + VersionFieldReader( + VersionBlockTreeTermsReader parent, + FieldInfo fieldInfo, + long numTerms, + Pair rootCode, + long sumTotalTermFreq, + long sumDocFreq, + int docCount, + long indexStartFP, + IndexInput indexIn, + BytesRef minTerm, + BytesRef maxTerm) + throws IOException { assert numTerms > 0; this.fieldInfo = fieldInfo; - //DEBUG = BlockTreeTermsReader.DEBUG && fieldInfo.name.equals("id"); + // DEBUG = BlockTreeTermsReader.DEBUG && fieldInfo.name.equals("id"); this.parent = parent; this.numTerms = numTerms; - this.sumTotalTermFreq = sumTotalTermFreq; - this.sumDocFreq = sumDocFreq; + this.sumTotalTermFreq = sumTotalTermFreq; + this.sumDocFreq = sumDocFreq; this.docCount = docCount; this.indexStartFP = indexStartFP; this.rootCode = rootCode; this.minTerm = minTerm; this.maxTerm = maxTerm; // if (DEBUG) { - // System.out.println("BTTR: seg=" + segment + " field=" + fieldInfo.name + " rootBlockCode=" + rootCode + " divisor=" + indexDivisor); + // System.out.println("BTTR: seg=" + segment + " field=" + fieldInfo.name + " rootBlockCode=" + // + rootCode + " divisor=" + indexDivisor); // } - rootBlockFP = (new ByteArrayDataInput(rootCode.output1.bytes, rootCode.output1.offset, rootCode.output1.length)).readVLong() >>> VersionBlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS; + rootBlockFP = + (new ByteArrayDataInput( + rootCode.output1.bytes, rootCode.output1.offset, rootCode.output1.length)) + .readVLong() + >>> VersionBlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS; if (indexIn != null) { final IndexInput clone = indexIn.clone(); - //System.out.println("start=" + indexStartFP + " field=" + fieldInfo.name); + // System.out.println("start=" + indexStartFP + " field=" + fieldInfo.name); clone.seek(indexStartFP); index = new FST<>(clone, clone, VersionBlockTreeTermsWriter.FST_OUTPUTS); - + /* if (false) { final String dotFileName = segment + "_" + fieldInfo.name + ".dot"; @@ -117,14 +132,17 @@ final class VersionFieldReader extends Terms implements Accountable { @Override public boolean hasOffsets() { - return fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + return fieldInfo + .getIndexOptions() + .compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) + >= 0; } @Override public boolean hasPositions() { return fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; } - + @Override public boolean hasPayloads() { return fieldInfo.hasPayloads(); @@ -157,9 +175,9 @@ final class VersionFieldReader extends Terms implements Accountable { @Override public long ramBytesUsed() { - return ((index!=null)? index.ramBytesUsed() : 0); + return ((index != null) ? index.ramBytesUsed() : 0); } - + @Override public Collection getChildResources() { if (index == null) { @@ -171,6 +189,14 @@ final class VersionFieldReader extends Terms implements Accountable { @Override public String toString() { - return "IDVersionTerms(terms=" + numTerms + ",postings=" + sumDocFreq + ",positions=" + sumTotalTermFreq + ",docs=" + docCount + ")"; + return "IDVersionTerms(terms=" + + numTerms + + ",postings=" + + sumDocFreq + + ",positions=" + + sumTotalTermFreq + + ",docs=" + + docCount + + ")"; } } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/package-info.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/package-info.java index e2fee7238b1..288ffa7e500 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/package-info.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/codecs/idversion/package-info.java @@ -14,9 +14,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * A primary-key postings format that associates a version (long) with each term and - * can provide fail-fast lookups by ID and version. + +/** + * A primary-key postings format that associates a version (long) with each term and can provide + * fail-fast lookups by ID and version. */ -package org.apache.lucene.sandbox.codecs.idversion; \ No newline at end of file +package org.apache.lucene.sandbox.codecs.idversion; diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/BigIntegerPoint.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/BigIntegerPoint.java index 875cd37251c..886cd1e5c2f 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/BigIntegerPoint.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/BigIntegerPoint.java @@ -18,7 +18,6 @@ package org.apache.lucene.sandbox.document; import java.math.BigInteger; import java.util.Arrays; - import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.PointValues; @@ -28,32 +27,35 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -/** +/** * An indexed 128-bit {@code BigInteger} field. - *

    - * Finding all documents within an N-dimensional shape or range at search time is - * efficient. Multiple values for the same field in one document - * is allowed. - *

    - * This field defines static factory methods for creating common queries: + * + *

    Finding all documents within an N-dimensional shape or range at search time is efficient. + * Multiple values for the same field in one document is allowed. + * + *

    This field defines static factory methods for creating common queries: + * *

      *
    • {@link #newExactQuery(String, BigInteger)} for matching an exact 1D point. *
    • {@link #newSetQuery(String, BigInteger...)} for matching a set of 1D values. *
    • {@link #newRangeQuery(String, BigInteger, BigInteger)} for matching a 1D range. - *
    • {@link #newRangeQuery(String, BigInteger[], BigInteger[])} for matching points/ranges in n-dimensional space. + *
    • {@link #newRangeQuery(String, BigInteger[], BigInteger[])} for matching points/ranges in + * n-dimensional space. *
    + * * @see PointValues */ public class BigIntegerPoint extends Field { /** The number of bytes per dimension: 128 bits. */ public static final int BYTES = 16; - + /** A constant holding the minimum value a BigIntegerPoint can have, -2127. */ public static final BigInteger MIN_VALUE = BigInteger.ONE.shiftLeft(BYTES * 8 - 1).negate(); /** A constant holding the maximum value a BigIntegerPoint can have, 2127-1. */ - public static final BigInteger MAX_VALUE = BigInteger.ONE.shiftLeft(BYTES * 8 - 1).subtract(BigInteger.ONE); + public static final BigInteger MAX_VALUE = + BigInteger.ONE.shiftLeft(BYTES * 8 - 1).subtract(BigInteger.ONE); private static FieldType getType(int numDims) { FieldType type = new FieldType(); @@ -65,7 +67,14 @@ public class BigIntegerPoint extends Field { /** Change the values of this field */ public void setBigIntegerValues(BigInteger... point) { if (type.pointDimensionCount() != point.length) { - throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions"); + throw new IllegalArgumentException( + "this field (name=" + + name + + ") uses " + + type.pointDimensionCount() + + " dimensions; cannot change to (incoming) " + + point.length + + " dimensions"); } fieldsData = pack(point); } @@ -78,7 +87,12 @@ public class BigIntegerPoint extends Field { @Override public Number numericValue() { if (type.pointDimensionCount() != 1) { - throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value"); + throw new IllegalStateException( + "this field (name=" + + name + + ") uses " + + type.pointDimensionCount() + + " dimensions; cannot convert to a single numeric value"); } BytesRef bytes = (BytesRef) fieldsData; assert bytes.length == BYTES; @@ -93,7 +107,7 @@ public class BigIntegerPoint extends Field { throw new IllegalArgumentException("point must not be 0 dimensions"); } byte[] packed = new byte[point.length * BYTES]; - + for (int dim = 0; dim < point.length; dim++) { encodeDimension(point[dim], packed, dim * BYTES); } @@ -101,17 +115,17 @@ public class BigIntegerPoint extends Field { return new BytesRef(packed); } - /** Creates a new BigIntegerPoint, indexing the - * provided N-dimensional big integer point. + /** + * Creates a new BigIntegerPoint, indexing the provided N-dimensional big integer point. * - * @param name field name - * @param point BigInteger[] value - * @throws IllegalArgumentException if the field name or value is null. + * @param name field name + * @param point BigInteger[] value + * @throws IllegalArgumentException if the field name or value is null. */ public BigIntegerPoint(String name, BigInteger... point) { super(name, pack(point), getType(point.length)); } - + @Override public String toString() { StringBuilder result = new StringBuilder(); @@ -133,12 +147,12 @@ public class BigIntegerPoint extends Field { } // public helper methods (e.g. for queries) - + /** Encode single BigInteger dimension */ public static void encodeDimension(BigInteger value, byte dest[], int offset) { NumericUtils.bigIntToSortableBytes(value, BYTES, dest, offset); } - + /** Decode single BigInteger dimension */ public static BigInteger decodeDimension(byte value[], int offset) { return NumericUtils.sortableBytesToBigInt(value, offset, BYTES); @@ -146,11 +160,11 @@ public class BigIntegerPoint extends Field { // static methods for generating queries - /** + /** * Create a query for matching an exact big integer value. - *

    - * This is for simple one-dimension points, for multidimensional points use - * {@link #newRangeQuery(String, BigInteger[], BigInteger[])} instead. + * + *

    This is for simple one-dimension points, for multidimensional points use {@link + * #newRangeQuery(String, BigInteger[], BigInteger[])} instead. * * @param field field name. must not be {@code null}. * @param value exact value. must not be {@code null}. @@ -161,50 +175,53 @@ public class BigIntegerPoint extends Field { return newRangeQuery(field, value, value); } - /** + /** * Create a range query for big integer values. - *

    - * This is for simple one-dimension ranges, for multidimensional ranges use - * {@link #newRangeQuery(String, BigInteger[], BigInteger[])} instead. - *

    - * You can have half-open ranges (which are in fact </≤ or >/≥ queries) - * by setting {@code lowerValue = BigIntegerPoint.MIN_VALUE} - * or {@code upperValue = BigIntegerPoint.MAX_VALUE}. - *

    - * Ranges are inclusive. For exclusive ranges, pass {@code lowerValue.add(BigInteger.ONE)} - * or {@code upperValue.subtract(BigInteger.ONE)} + * + *

    This is for simple one-dimension ranges, for multidimensional ranges use {@link + * #newRangeQuery(String, BigInteger[], BigInteger[])} instead. + * + *

    You can have half-open ranges (which are in fact </≤ or >/≥ queries) by setting + * {@code lowerValue = BigIntegerPoint.MIN_VALUE} or {@code upperValue = + * BigIntegerPoint.MAX_VALUE}. + * + *

    Ranges are inclusive. For exclusive ranges, pass {@code lowerValue.add(BigInteger.ONE)} or + * {@code upperValue.subtract(BigInteger.ONE)} * * @param field field name. must not be {@code null}. * @param lowerValue lower portion of the range (inclusive). must not be {@code null}. * @param upperValue upper portion of the range (inclusive). must not be {@code null}. - * @throws IllegalArgumentException if {@code field} is null, {@code lowerValue} is null, or {@code upperValue} is null. + * @throws IllegalArgumentException if {@code field} is null, {@code lowerValue} is null, or + * {@code upperValue} is null. * @return a query matching documents within this range. */ public static Query newRangeQuery(String field, BigInteger lowerValue, BigInteger upperValue) { PointRangeQuery.checkArgs(field, lowerValue, upperValue); - return newRangeQuery(field, new BigInteger[] { lowerValue }, new BigInteger[] { upperValue }); + return newRangeQuery(field, new BigInteger[] {lowerValue}, new BigInteger[] {upperValue}); } - /** + /** * Create a range query for n-dimensional big integer values. - *

    - * You can have half-open ranges (which are in fact </≤ or >/≥ queries) - * by setting {@code lowerValue[i] = BigIntegerPoint.MIN_VALUE} - * or {@code upperValue[i] = BigIntegerPoint.MAX_VALUE}. - *

    - * Ranges are inclusive. For exclusive ranges, pass {@code lowerValue[i].add(BigInteger.ONE)} + * + *

    You can have half-open ranges (which are in fact </≤ or >/≥ queries) by setting + * {@code lowerValue[i] = BigIntegerPoint.MIN_VALUE} or {@code upperValue[i] = + * BigIntegerPoint.MAX_VALUE}. + * + *

    Ranges are inclusive. For exclusive ranges, pass {@code lowerValue[i].add(BigInteger.ONE)} * or {@code upperValue[i].subtract(BigInteger.ONE)} * * @param field field name. must not be {@code null}. * @param lowerValue lower portion of the range (inclusive). must not be {@code null}. * @param upperValue upper portion of the range (inclusive). must not be {@code null}. - * @throws IllegalArgumentException if {@code field} is null, if {@code lowerValue} is null, if {@code upperValue} is null, - * or if {@code lowerValue.length != upperValue.length} + * @throws IllegalArgumentException if {@code field} is null, if {@code lowerValue} is null, if + * {@code upperValue} is null, or if {@code lowerValue.length != upperValue.length} * @return a query matching documents within this range. */ - public static Query newRangeQuery(String field, BigInteger[] lowerValue, BigInteger[] upperValue) { + public static Query newRangeQuery( + String field, BigInteger[] lowerValue, BigInteger[] upperValue) { PointRangeQuery.checkArgs(field, lowerValue, upperValue); - return new PointRangeQuery(field, pack(lowerValue).bytes, pack(upperValue).bytes, lowerValue.length) { + return new PointRangeQuery( + field, pack(lowerValue).bytes, pack(upperValue).bytes, lowerValue.length) { @Override protected String toString(int dimension, byte[] value) { return BigIntegerPoint.decodeDimension(value, 0).toString(); @@ -213,8 +230,9 @@ public class BigIntegerPoint extends Field { } /** - * Create a query matching any of the specified 1D values. This is the points equivalent of {@code TermsQuery}. - * + * Create a query matching any of the specified 1D values. This is the points equivalent of {@code + * TermsQuery}. + * * @param field field name. must not be {@code null}. * @param values all values to match */ @@ -226,22 +244,25 @@ public class BigIntegerPoint extends Field { final BytesRef encoded = new BytesRef(new byte[BYTES]); - return new PointInSetQuery(field, 1, BYTES, - new PointInSetQuery.Stream() { + return new PointInSetQuery( + field, + 1, + BYTES, + new PointInSetQuery.Stream() { - int upto; + int upto; - @Override - public BytesRef next() { - if (upto == sortedValues.length) { - return null; - } else { - encodeDimension(sortedValues[upto], encoded.bytes, 0); - upto++; - return encoded; - } - } - }) { + @Override + public BytesRef next() { + if (upto == sortedValues.length) { + return null; + } else { + encodeDimension(sortedValues[upto], encoded.bytes, 0); + upto++; + return encoded; + } + } + }) { @Override protected String toString(byte[] value) { assert value.length == BYTES; diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/DoublePointMultiRangeBuilder.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/DoublePointMultiRangeBuilder.java index 856939828f9..9b9caba69d0 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/DoublePointMultiRangeBuilder.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/DoublePointMultiRangeBuilder.java @@ -17,11 +17,11 @@ package org.apache.lucene.sandbox.document; -import org.apache.lucene.sandbox.search.MultiRangeQuery; - import static org.apache.lucene.document.DoublePoint.decodeDimension; import static org.apache.lucene.document.DoublePoint.pack; +import org.apache.lucene.sandbox.search.MultiRangeQuery; + /** Builder for multi range queries for DoublePoints */ public class DoublePointMultiRangeBuilder extends MultiRangeQuery.Builder { public DoublePointMultiRangeBuilder(String field, int numDims) { @@ -40,15 +40,16 @@ public class DoublePointMultiRangeBuilder extends MultiRangeQuery.Builder { public void add(double[] lowerValue, double[] upperValue) { if (upperValue.length != numDims || lowerValue.length != numDims) { - throw new IllegalArgumentException("Passed in range does not conform to specified dimensions"); + throw new IllegalArgumentException( + "Passed in range does not conform to specified dimensions"); } for (int i = 0; i < numDims; i++) { if (upperValue[i] < lowerValue[i]) { - throw new IllegalArgumentException("Upper value of range should be greater than lower value of range"); + throw new IllegalArgumentException( + "Upper value of range should be greater than lower value of range"); } } add(pack(lowerValue).bytes, pack(upperValue).bytes); } } - diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/FloatPointMultiRangeBuilder.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/FloatPointMultiRangeBuilder.java index b225e384a73..fd93aea4493 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/FloatPointMultiRangeBuilder.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/FloatPointMultiRangeBuilder.java @@ -17,14 +17,12 @@ package org.apache.lucene.sandbox.document; -import org.apache.lucene.sandbox.search.MultiRangeQuery; - import static org.apache.lucene.document.FloatPoint.decodeDimension; import static org.apache.lucene.document.FloatPoint.pack; -/** - * Builder for multi range queries for FloatPoints - */ +import org.apache.lucene.sandbox.search.MultiRangeQuery; + +/** Builder for multi range queries for FloatPoints */ public class FloatPointMultiRangeBuilder extends MultiRangeQuery.Builder { public FloatPointMultiRangeBuilder(String field, int numDims) { super(field, Float.BYTES, numDims); @@ -42,12 +40,14 @@ public class FloatPointMultiRangeBuilder extends MultiRangeQuery.Builder { public void add(float[] lowerValue, float[] upperValue) { if (upperValue.length != numDims || lowerValue.length != numDims) { - throw new IllegalArgumentException("Passed in range does not conform to specified dimensions"); + throw new IllegalArgumentException( + "Passed in range does not conform to specified dimensions"); } for (int i = 0; i < numDims; i++) { if (upperValue[i] < lowerValue[i]) { - throw new IllegalArgumentException("Upper value of range should be greater than lower value of range"); + throw new IllegalArgumentException( + "Upper value of range should be greater than lower value of range"); } } add(pack(lowerValue).bytes, pack(upperValue).bytes); diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/FloatPointNearestNeighbor.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/FloatPointNearestNeighbor.java index 3e1dc838281..a48155fc0d4 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/FloatPointNearestNeighbor.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/FloatPointNearestNeighbor.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.PriorityQueue; - import org.apache.lucene.document.FloatPoint; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PointValues; @@ -48,8 +47,13 @@ public class FloatPointNearestNeighbor { final BKDReader.IndexTree index; /** The closest possible distance^2 of all points in this cell */ final double distanceSquared; - - Cell(BKDReader.IndexTree index, int readerIndex, byte[] minPacked, byte[] maxPacked, double distanceSquared) { + + Cell( + BKDReader.IndexTree index, + int readerIndex, + byte[] minPacked, + byte[] maxPacked, + double distanceSquared) { this.index = index; this.readerIndex = readerIndex; this.minPacked = minPacked.clone(); @@ -63,8 +67,15 @@ public class FloatPointNearestNeighbor { @Override public String toString() { - return "Cell(readerIndex=" + readerIndex + " nodeID=" + index.getNodeID() - + " isLeaf=" + index.isLeafNode() + " distanceSquared=" + distanceSquared + ")"; + return "Cell(readerIndex=" + + readerIndex + + " nodeID=" + + index.getNodeID() + + " isLeaf=" + + index.isLeafNode() + + " distanceSquared=" + + distanceSquared + + ")"; } } @@ -74,7 +85,7 @@ public class FloatPointNearestNeighbor { final int topN; final PriorityQueue hitQueue; final float[] origin; - final private int dims; + private final int dims; double bottomNearestDistanceSquared = Double.POSITIVE_INFINITY; int bottomNearestDistanceDoc = Integer.MAX_VALUE; @@ -98,7 +109,7 @@ public class FloatPointNearestNeighbor { } double distanceSquared = 0.0d; - for (int d = 0, offset = 0 ; d < dims ; ++d, offset += Float.BYTES) { + for (int d = 0, offset = 0; d < dims; ++d, offset += Float.BYTES) { double diff = (double) FloatPoint.decodeDimension(packedValue, offset) - (double) origin[d]; distanceSquared += diff * diff; if (distanceSquared > bottomNearestDistanceSquared) { @@ -106,12 +117,14 @@ public class FloatPointNearestNeighbor { } } - // System.out.println(" visit docID=" + docID + " distanceSquared=" + distanceSquared + " value: " + Arrays.toString(docPoint)); + // System.out.println(" visit docID=" + docID + " distanceSquared=" + distanceSquared + " + // value: " + Arrays.toString(docPoint)); int fullDocID = curDocBase + docID; if (hitQueue.size() == topN) { // queue already full - if (distanceSquared == bottomNearestDistanceSquared && fullDocID > bottomNearestDistanceDoc) { + if (distanceSquared == bottomNearestDistanceSquared + && fullDocID > bottomNearestDistanceDoc) { return; } NearestHit bottom = hitQueue.poll(); @@ -120,7 +133,7 @@ public class FloatPointNearestNeighbor { bottom.distanceSquared = distanceSquared; hitQueue.offer(bottom); updateBottomNearestDistance(); - // System.out.println(" ** keep1, now bottom=" + bottom); + // System.out.println(" ** keep1, now bottom=" + bottom); } else { NearestHit hit = new NearestHit(); hit.docID = fullDocID; @@ -141,7 +154,9 @@ public class FloatPointNearestNeighbor { @Override public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { - if (hitQueue.size() == topN && pointToRectangleDistanceSquared(minPackedValue, maxPackedValue, origin) > bottomNearestDistanceSquared) { + if (hitQueue.size() == topN + && pointToRectangleDistanceSquared(minPackedValue, maxPackedValue, origin) + > bottomNearestDistanceSquared) { return PointValues.Relation.CELL_OUTSIDE_QUERY; } return PointValues.Relation.CELL_CROSSES_QUERY; @@ -159,17 +174,27 @@ public class FloatPointNearestNeighbor { } } - private static NearestHit[] nearest(List readers, List liveDocs, List docBases, final int topN, float[] origin) throws IOException { + private static NearestHit[] nearest( + List readers, + List liveDocs, + List docBases, + final int topN, + float[] origin) + throws IOException { - // System.out.println("NEAREST: readers=" + readers + " liveDocs=" + liveDocs + " origin: " + Arrays.toString(origin)); + // System.out.println("NEAREST: readers=" + readers + " liveDocs=" + liveDocs + " origin: " + + // Arrays.toString(origin)); // Holds closest collected points seen so far: // TODO: if we used lucene's PQ we could just updateTop instead of poll/offer: - final PriorityQueue hitQueue = new PriorityQueue<>(topN, (a, b) -> { - // sort by opposite distance natural order - int cmp = Double.compare(a.distanceSquared, b.distanceSquared); - return cmp != 0 ? -cmp : b.docID - a.docID; // tie-break by higher docID - }); + final PriorityQueue hitQueue = + new PriorityQueue<>( + topN, + (a, b) -> { + // sort by opposite distance natural order + int cmp = Double.compare(a.distanceSquared, b.distanceSquared); + return cmp != 0 ? -cmp : b.docID - a.docID; // tie-break by higher docID + }); // Holds all cells, sorted by closest to the point: PriorityQueue cellQueue = new PriorityQueue<>(); @@ -180,21 +205,30 @@ public class FloatPointNearestNeighbor { // Add root cell for each reader into the queue: int bytesPerDim = -1; - for (int i = 0 ; i < readers.size() ; ++i) { + for (int i = 0; i < readers.size(); ++i) { BKDReader reader = readers.get(i); if (bytesPerDim == -1) { bytesPerDim = reader.getBytesPerDimension(); } else if (bytesPerDim != reader.getBytesPerDimension()) { - throw new IllegalStateException("bytesPerDim changed from " + bytesPerDim - + " to " + reader.getBytesPerDimension() + " across readers"); + throw new IllegalStateException( + "bytesPerDim changed from " + + bytesPerDim + + " to " + + reader.getBytesPerDimension() + + " across readers"); } byte[] minPackedValue = reader.getMinPackedValue(); byte[] maxPackedValue = reader.getMaxPackedValue(); BKDReader.IntersectState state = reader.getIntersectState(visitor); states.add(state); - cellQueue.offer(new Cell(state.index, i, reader.getMinPackedValue(), reader.getMaxPackedValue(), - pointToRectangleDistanceSquared(minPackedValue, maxPackedValue, origin))); + cellQueue.offer( + new Cell( + state.index, + i, + reader.getMinPackedValue(), + reader.getMaxPackedValue(), + pointToRectangleDistanceSquared(minPackedValue, maxPackedValue, origin))); } while (cellQueue.size() > 0) { @@ -213,7 +247,7 @@ public class FloatPointNearestNeighbor { visitor.curLiveDocs = liveDocs.get(cell.readerIndex); reader.visitLeafBlockValues(cell.index, states.get(cell.readerIndex)); - //assert hitQueue.peek().distanceSquared >= cell.distanceSquared; + // assert hitQueue.peek().distanceSquared >= cell.distanceSquared; // System.out.println(" now " + hitQueue.size() + " hits"); } else { // System.out.println(" non-leaf"); @@ -225,54 +259,72 @@ public class FloatPointNearestNeighbor { // we must clone the index so that we we can recurse left and right "concurrently": BKDReader.IndexTree newIndex = cell.index.clone(); byte[] splitPackedValue = cell.maxPacked.clone(); - System.arraycopy(splitValue.bytes, splitValue.offset, splitPackedValue, splitDim * bytesPerDim, bytesPerDim); + System.arraycopy( + splitValue.bytes, + splitValue.offset, + splitPackedValue, + splitDim * bytesPerDim, + bytesPerDim); cell.index.pushLeft(); - double distanceLeft = pointToRectangleDistanceSquared(cell.minPacked, splitPackedValue, origin); + double distanceLeft = + pointToRectangleDistanceSquared(cell.minPacked, splitPackedValue, origin); if (distanceLeft <= visitor.bottomNearestDistanceSquared) { - cellQueue.offer(new Cell(cell.index, cell.readerIndex, cell.minPacked, splitPackedValue, distanceLeft)); + cellQueue.offer( + new Cell( + cell.index, cell.readerIndex, cell.minPacked, splitPackedValue, distanceLeft)); } splitPackedValue = cell.minPacked.clone(); - System.arraycopy(splitValue.bytes, splitValue.offset, splitPackedValue, splitDim * bytesPerDim, bytesPerDim); + System.arraycopy( + splitValue.bytes, + splitValue.offset, + splitPackedValue, + splitDim * bytesPerDim, + bytesPerDim); newIndex.pushRight(); - double distanceRight = pointToRectangleDistanceSquared(splitPackedValue, cell.maxPacked, origin); + double distanceRight = + pointToRectangleDistanceSquared(splitPackedValue, cell.maxPacked, origin); if (distanceRight <= visitor.bottomNearestDistanceSquared) { - cellQueue.offer(new Cell(newIndex, cell.readerIndex, splitPackedValue, cell.maxPacked, distanceRight)); + cellQueue.offer( + new Cell( + newIndex, cell.readerIndex, splitPackedValue, cell.maxPacked, distanceRight)); } } } NearestHit[] hits = new NearestHit[hitQueue.size()]; - int downTo = hitQueue.size()-1; + int downTo = hitQueue.size() - 1; while (hitQueue.size() != 0) { hits[downTo] = hitQueue.poll(); downTo--; } - //System.out.println(visitor.comp); + // System.out.println(visitor.comp); return hits; } - private static double pointToRectangleDistanceSquared(byte[] minPackedValue, byte[] maxPackedValue, float[] value) { + private static double pointToRectangleDistanceSquared( + byte[] minPackedValue, byte[] maxPackedValue, float[] value) { double sumOfSquaredDiffs = 0.0d; - for (int i = 0, offset = 0 ; i < value.length ; ++i, offset += Float.BYTES) { + for (int i = 0, offset = 0; i < value.length; ++i, offset += Float.BYTES) { double min = FloatPoint.decodeDimension(minPackedValue, offset); if (value[i] < min) { - double diff = min - (double)value[i]; + double diff = min - (double) value[i]; sumOfSquaredDiffs += diff * diff; continue; } double max = FloatPoint.decodeDimension(maxPackedValue, offset); if (value[i] > max) { - double diff = max - (double)value[i]; + double diff = max - (double) value[i]; sumOfSquaredDiffs += diff * diff; } } return sumOfSquaredDiffs; } - public static TopFieldDocs nearest(IndexSearcher searcher, String field, int topN, float... origin) throws IOException { + public static TopFieldDocs nearest( + IndexSearcher searcher, String field, int topN, float... origin) throws IOException { if (topN < 1) { throw new IllegalArgumentException("topN must be at least 1; got " + topN); } @@ -290,10 +342,11 @@ public class FloatPointNearestNeighbor { PointValues points = leaf.reader().getPointValues(field); if (points != null) { if (points instanceof BKDReader == false) { - throw new IllegalArgumentException("can only run on Lucene60PointsReader points implementation, but got " + points); + throw new IllegalArgumentException( + "can only run on Lucene60PointsReader points implementation, but got " + points); } totalHits += points.getDocCount(); - readers.add((BKDReader)points); + readers.add((BKDReader) points); docBases.add(leaf.docBase); liveDocs.add(leaf.reader().getLiveDocs()); } @@ -303,9 +356,10 @@ public class FloatPointNearestNeighbor { // Convert to TopFieldDocs: ScoreDoc[] scoreDocs = new ScoreDoc[hits.length]; - for(int i=0;i - * The API takes floats, but they will be encoded to half-floats before being - * indexed. In case the provided floats cannot be represented accurately as a - * half float, they will be rounded to the closest value that can be - * represented as a half float. In case of tie, values will be rounded to the - * value that has a zero as its least significant bit. - *

    - * Finding all documents within an N-dimensional at search time is - * efficient. Multiple values for the same field in one document - * is allowed. - *

    - * This field defines static factory methods for creating common queries: + * An indexed {@code half-float} field for fast range filters. If you also need to store the value, + * you should add a separate {@link StoredField} instance. If you need doc values, you can store + * them in a {@link NumericDocValuesField} and use {@link #halfFloatToSortableShort} and {@link + * #sortableShortToHalfFloat} for encoding/decoding. + * + *

    The API takes floats, but they will be encoded to half-floats before being indexed. In case + * the provided floats cannot be represented accurately as a half float, they will be rounded to the + * closest value that can be represented as a half float. In case of tie, values will be rounded to + * the value that has a zero as its least significant bit. + * + *

    Finding all documents within an N-dimensional at search time is efficient. Multiple values for + * the same field in one document is allowed. + * + *

    This field defines static factory methods for creating common queries: + * *

      *
    • {@link #newExactQuery(String, float)} for matching an exact 1D point. *
    • {@link #newSetQuery(String, float...)} for matching a set of 1D values. *
    • {@link #newRangeQuery(String, float, float)} for matching a 1D range. - *
    • {@link #newRangeQuery(String, float[], float[])} for matching points/ranges in n-dimensional space. + *
    • {@link #newRangeQuery(String, float[], float[])} for matching points/ranges in + * n-dimensional space. *
    + * * @see PointValues */ public final class HalfFloatPoint extends Field { @@ -61,10 +60,9 @@ public final class HalfFloatPoint extends Field { public static final int BYTES = 2; /** - * Return the first half float which is immediately greater than {@code v}. - * If the argument is {@link Float#NaN} then the return value is - * {@link Float#NaN}. If the argument is {@link Float#POSITIVE_INFINITY} - * then the return value is {@link Float#POSITIVE_INFINITY}. + * Return the first half float which is immediately greater than {@code v}. If the argument is + * {@link Float#NaN} then the return value is {@link Float#NaN}. If the argument is {@link + * Float#POSITIVE_INFINITY} then the return value is {@link Float#POSITIVE_INFINITY}. */ public static float nextUp(float v) { if (Float.isNaN(v) || v == Float.POSITIVE_INFINITY) { @@ -81,10 +79,9 @@ public final class HalfFloatPoint extends Field { } /** - * Return the first half float which is immediately smaller than {@code v}. - * If the argument is {@link Float#NaN} then the return value is - * {@link Float#NaN}. If the argument is {@link Float#NEGATIVE_INFINITY} - * then the return value is {@link Float#NEGATIVE_INFINITY}. + * Return the first half float which is immediately smaller than {@code v}. If the argument is + * {@link Float#NaN} then the return value is {@link Float#NaN}. If the argument is {@link + * Float#NEGATIVE_INFINITY} then the return value is {@link Float#NEGATIVE_INFINITY}. */ public static float nextDown(float v) { if (Float.isNaN(v) || v == Float.NEGATIVE_INFINITY) { @@ -193,12 +190,12 @@ public final class HalfFloatPoint extends Field { static void shortToSortableBytes(short value, byte[] result, int offset) { // Flip the sign bit, so negative shorts sort before positive shorts correctly: value ^= 0x8000; - result[offset] = (byte) (value >> 8); - result[offset+1] = (byte) value; + result[offset] = (byte) (value >> 8); + result[offset + 1] = (byte) value; } static short sortableBytesToShort(byte[] encoded, int offset) { - short x = (short) (((encoded[offset] & 0xFF) << 8) | (encoded[offset+1] & 0xFF)); + short x = (short) (((encoded[offset] & 0xFF) << 8) | (encoded[offset + 1] & 0xFF)); // Re-flip the sign bit to restore the original value: return (short) (x ^ 0x8000); } @@ -218,7 +215,14 @@ public final class HalfFloatPoint extends Field { /** Change the values of this field */ public void setFloatValues(float... point) { if (type.pointDimensionCount() != point.length) { - throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions"); + throw new IllegalArgumentException( + "this field (name=" + + name + + ") uses " + + type.pointDimensionCount() + + " dimensions; cannot change to (incoming) " + + point.length + + " dimensions"); } fieldsData = pack(point); } @@ -231,7 +235,12 @@ public final class HalfFloatPoint extends Field { @Override public Number numericValue() { if (type.pointDimensionCount() != 1) { - throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value"); + throw new IllegalStateException( + "this field (name=" + + name + + ") uses " + + type.pointDimensionCount() + + " dimensions; cannot convert to a single numeric value"); } BytesRef bytes = (BytesRef) fieldsData; assert bytes.length == BYTES; @@ -254,12 +263,12 @@ public final class HalfFloatPoint extends Field { return new BytesRef(packed); } - /** Creates a new FloatPoint, indexing the - * provided N-dimensional float point. + /** + * Creates a new FloatPoint, indexing the provided N-dimensional float point. * - * @param name field name - * @param point float[] value - * @throws IllegalArgumentException if the field name or value is null. + * @param name field name + * @param point float[] value + * @throws IllegalArgumentException if the field name or value is null. */ public HalfFloatPoint(String name, float... point) { super(name, pack(point), getType(point.length)); @@ -300,12 +309,11 @@ public final class HalfFloatPoint extends Field { // static methods for generating queries /** - * Create a query for matching an exact half-float value. It will be rounded - * to the closest half-float if {@code value} cannot be represented accurately - * as a half-float. - *

    - * This is for simple one-dimension points, for multidimensional points use - * {@link #newRangeQuery(String, float[], float[])} instead. + * Create a query for matching an exact half-float value. It will be rounded to the closest + * half-float if {@code value} cannot be represented accurately as a half-float. + * + *

    This is for simple one-dimension points, for multidimensional points use {@link + * #newRangeQuery(String, float[], float[])} instead. * * @param field field name. must not be {@code null}. * @param value half-float value @@ -317,19 +325,19 @@ public final class HalfFloatPoint extends Field { } /** - * Create a range query for half-float values. Bounds will be rounded to the - * closest half-float if they cannot be represented accurately as a - * half-float. - *

    - * This is for simple one-dimension ranges, for multidimensional ranges use - * {@link #newRangeQuery(String, float[], float[])} instead. - *

    - * You can have half-open ranges (which are in fact </≤ or >/≥ queries) - * by setting {@code lowerValue = Float.NEGATIVE_INFINITY} or {@code upperValue = Float.POSITIVE_INFINITY}. - *

    Ranges are inclusive. For exclusive ranges, pass {@code nextUp(lowerValue)} - * or {@code nextDown(upperValue)}. - *

    - * Range comparisons are consistent with {@link Float#compareTo(Float)}. + * Create a range query for half-float values. Bounds will be rounded to the closest half-float if + * they cannot be represented accurately as a half-float. + * + *

    This is for simple one-dimension ranges, for multidimensional ranges use {@link + * #newRangeQuery(String, float[], float[])} instead. + * + *

    You can have half-open ranges (which are in fact </≤ or >/≥ queries) by setting + * {@code lowerValue = Float.NEGATIVE_INFINITY} or {@code upperValue = Float.POSITIVE_INFINITY}. + * + *

    Ranges are inclusive. For exclusive ranges, pass {@code nextUp(lowerValue)} or {@code + * nextDown(upperValue)}. + * + *

    Range comparisons are consistent with {@link Float#compareTo(Float)}. * * @param field field name. must not be {@code null}. * @param lowerValue lower portion of the range (inclusive). @@ -338,31 +346,33 @@ public final class HalfFloatPoint extends Field { * @return a query matching documents within this range. */ public static Query newRangeQuery(String field, float lowerValue, float upperValue) { - return newRangeQuery(field, new float[] { lowerValue }, new float[] { upperValue }); + return newRangeQuery(field, new float[] {lowerValue}, new float[] {upperValue}); } /** - * Create a range query for n-dimensional half-float values. Bounds will be - * rounded to the closest half-float if they cannot be represented accurately - * as a half-float. - *

    - * You can have half-open ranges (which are in fact </≤ or >/≥ queries) - * by setting {@code lowerValue[i] = Float.NEGATIVE_INFINITY} or {@code upperValue[i] = Float.POSITIVE_INFINITY}. - *

    Ranges are inclusive. For exclusive ranges, pass {@code nextUp(lowerValue[i])} - * or {@code nextDown(upperValue[i])}. - *

    - * Range comparisons are consistent with {@link Float#compareTo(Float)}. + * Create a range query for n-dimensional half-float values. Bounds will be rounded to the closest + * half-float if they cannot be represented accurately as a half-float. + * + *

    You can have half-open ranges (which are in fact </≤ or >/≥ queries) by setting + * {@code lowerValue[i] = Float.NEGATIVE_INFINITY} or {@code upperValue[i] = + * Float.POSITIVE_INFINITY}. + * + *

    Ranges are inclusive. For exclusive ranges, pass {@code nextUp(lowerValue[i])} or {@code + * nextDown(upperValue[i])}. + * + *

    Range comparisons are consistent with {@link Float#compareTo(Float)}. * * @param field field name. must not be {@code null}. * @param lowerValue lower portion of the range (inclusive). must not be {@code null}. * @param upperValue upper portion of the range (inclusive). must not be {@code null}. - * @throws IllegalArgumentException if {@code field} is null, if {@code lowerValue} is null, if {@code upperValue} is null, - * or if {@code lowerValue.length != upperValue.length} + * @throws IllegalArgumentException if {@code field} is null, if {@code lowerValue} is null, if + * {@code upperValue} is null, or if {@code lowerValue.length != upperValue.length} * @return a query matching documents within this range. */ public static Query newRangeQuery(String field, float[] lowerValue, float[] upperValue) { PointRangeQuery.checkArgs(field, lowerValue, upperValue); - return new PointRangeQuery(field, pack(lowerValue).bytes, pack(upperValue).bytes, lowerValue.length) { + return new PointRangeQuery( + field, pack(lowerValue).bytes, pack(upperValue).bytes, lowerValue.length) { @Override protected String toString(int dimension, byte[] value) { return Float.toString(decodeDimension(value, 0)); @@ -371,10 +381,9 @@ public final class HalfFloatPoint extends Field { } /** - * Create a query matching any of the specified 1D values. - * This is the points equivalent of {@code TermsQuery}. - * Values will be rounded to the closest half-float if they - * cannot be represented accurately as a half-float. + * Create a query matching any of the specified 1D values. This is the points equivalent of {@code + * TermsQuery}. Values will be rounded to the closest half-float if they cannot be represented + * accurately as a half-float. * * @param field field name. must not be {@code null}. * @param values all values to match @@ -387,22 +396,25 @@ public final class HalfFloatPoint extends Field { final BytesRef encoded = new BytesRef(new byte[BYTES]); - return new PointInSetQuery(field, 1, BYTES, - new PointInSetQuery.Stream() { + return new PointInSetQuery( + field, + 1, + BYTES, + new PointInSetQuery.Stream() { - int upto; + int upto; - @Override - public BytesRef next() { - if (upto == sortedValues.length) { - return null; - } else { - encodeDimension(sortedValues[upto], encoded.bytes, 0); - upto++; - return encoded; - } - } - }) { + @Override + public BytesRef next() { + if (upto == sortedValues.length) { + return null; + } else { + encodeDimension(sortedValues[upto], encoded.bytes, 0); + upto++; + return encoded; + } + } + }) { @Override protected String toString(byte[] value) { assert value.length == BYTES; @@ -412,7 +424,8 @@ public final class HalfFloatPoint extends Field { } /** - * Create a query matching any of the specified 1D values. This is the points equivalent of {@code TermsQuery}. + * Create a query matching any of the specified 1D values. This is the points equivalent of {@code + * TermsQuery}. * * @param field field name. must not be {@code null}. * @param values all values to match @@ -425,5 +438,4 @@ public final class HalfFloatPoint extends Field { } return newSetQuery(field, unboxed); } - } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/IntPointMultiRangeBuilder.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/IntPointMultiRangeBuilder.java index 16ff530d86b..b1dd2ab44df 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/IntPointMultiRangeBuilder.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/IntPointMultiRangeBuilder.java @@ -17,11 +17,11 @@ package org.apache.lucene.sandbox.document; -import org.apache.lucene.sandbox.search.MultiRangeQuery; - import static org.apache.lucene.document.IntPoint.decodeDimension; import static org.apache.lucene.document.IntPoint.pack; +import org.apache.lucene.sandbox.search.MultiRangeQuery; + /** Builder for multi range queries for IntPoints */ public class IntPointMultiRangeBuilder extends MultiRangeQuery.Builder { public IntPointMultiRangeBuilder(String field, int numDims) { @@ -40,12 +40,14 @@ public class IntPointMultiRangeBuilder extends MultiRangeQuery.Builder { public void add(int[] lowerValue, int[] upperValue) { if (upperValue.length != numDims || lowerValue.length != numDims) { - throw new IllegalArgumentException("Passed in range does not conform to specified dimensions"); + throw new IllegalArgumentException( + "Passed in range does not conform to specified dimensions"); } for (int i = 0; i < numDims; i++) { if (upperValue[i] < lowerValue[i]) { - throw new IllegalArgumentException("Upper value of range should be greater than lower value of range"); + throw new IllegalArgumentException( + "Upper value of range should be greater than lower value of range"); } } add(pack(lowerValue).bytes, pack(upperValue).bytes); diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/LatLonBoundingBox.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/LatLonBoundingBox.java index c6202208f28..03b4c387c0d 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/LatLonBoundingBox.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/LatLonBoundingBox.java @@ -16,6 +16,11 @@ */ package org.apache.lucene.sandbox.document; +import static org.apache.lucene.geo.GeoEncodingUtils.decodeLatitude; +import static org.apache.lucene.geo.GeoEncodingUtils.decodeLongitude; +import static org.apache.lucene.geo.GeoEncodingUtils.encodeLatitude; +import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude; + import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.LatLonPoint; @@ -24,33 +29,35 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -import static org.apache.lucene.geo.GeoEncodingUtils.decodeLatitude; -import static org.apache.lucene.geo.GeoEncodingUtils.decodeLongitude; -import static org.apache.lucene.geo.GeoEncodingUtils.encodeLatitude; -import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude; - /** * An indexed 2-Dimension Bounding Box field for the Geospatial Lat/Lon Coordinate system - *

    - * This field indexes 2-dimension Latitude, Longitude based Geospatial Bounding Boxes. The bounding boxes are defined as - * {@code minLat, minLon, maxLat, maxLon} where min/max lat,lon pairs using double floating point precision. - *

    - * Multiple values for the same field in one document is supported. * - *

    - * This field defines the following static factory methods for common search operations over double ranges: + *

    This field indexes 2-dimension Latitude, Longitude based Geospatial Bounding Boxes. The + * bounding boxes are defined as {@code minLat, minLon, maxLat, maxLon} where min/max lat,lon pairs + * using double floating point precision. + * + *

    Multiple values for the same field in one document is supported. + * + *

    This field defines the following static factory methods for common search operations over + * double ranges: + * *

      - *
    • {@link #newIntersectsQuery newIntersectsQuery()} matches bounding boxes that intersect the defined search bounding box. - *
    • {@link #newWithinQuery newWithinQuery()} matches bounding boxes that are within the defined search bounding box. - *
    • {@link #newContainsQuery newContainsQuery()} matches bounding boxes that contain the defined search bounding box. - *
    • {@link #newCrossesQuery newCrosses()} matches bounding boxes that cross the defined search bounding box. + *
    • {@link #newIntersectsQuery newIntersectsQuery()} matches bounding boxes that intersect the + * defined search bounding box. + *
    • {@link #newWithinQuery newWithinQuery()} matches bounding boxes that are within the defined + * search bounding box. + *
    • {@link #newContainsQuery newContainsQuery()} matches bounding boxes that contain the + * defined search bounding box. + *
    • {@link #newCrossesQuery newCrosses()} matches bounding boxes that cross the defined search + * bounding box. *
    * - *

    - * The following Field limitations and restrictions apply: + *

    The following Field limitations and restrictions apply: + * *

      *
    • Dateline wrapping is not supported. - *
    • Due to an encoding limitation Eastern and Western Hemisphere Bounding Boxes that share the dateline are not supported. + *
    • Due to an encoding limitation Eastern and Western Hemisphere Bounding Boxes that share the + * dateline are not supported. *
    */ public class LatLonBoundingBox extends Field { @@ -66,8 +73,12 @@ public class LatLonBoundingBox extends Field { * @param maxLat maximum latitude value (in degrees); valid in [minLat : 90.0] * @param maxLon maximum longitude value (in degrees); valid in [minLon : 180.0] */ - public LatLonBoundingBox(String name, final double minLat, final double minLon, - final double maxLat, final double maxLon) { + public LatLonBoundingBox( + String name, + final double minLat, + final double minLon, + final double maxLat, + final double maxLon) { super(name, getType(2)); setRangeValues(minLat, minLon, maxLat, maxLon); } @@ -75,13 +86,14 @@ public class LatLonBoundingBox extends Field { /** set the field type */ static FieldType getType(int geoDimensions) { FieldType ft = new FieldType(); - ft.setDimensions(geoDimensions*2, BYTES); + ft.setDimensions(geoDimensions * 2, BYTES); ft.freeze(); return ft; } /** * Changes the values of the field + * * @param minLat minimum latitude value (in degrees); valid in [-90.0 : 90.0] * @param minLon minimum longitude value (in degrees); valid in [-180.0 : 180.0] * @param maxLat maximum latitude value (in degrees); valid in [minLat : 90.0] @@ -92,30 +104,34 @@ public class LatLonBoundingBox extends Field { checkArgs(minLat, minLon, maxLat, maxLon); final byte[] bytes; if (fieldsData == null) { - bytes = new byte[4*BYTES]; + bytes = new byte[4 * BYTES]; fieldsData = new BytesRef(bytes); } else { - bytes = ((BytesRef)fieldsData).bytes; + bytes = ((BytesRef) fieldsData).bytes; } encode(minLat, minLon, bytes, 0); encode(maxLat, maxLon, bytes, 2 * BYTES); } /** validate the two-dimension arguments */ - static void checkArgs(final double minLat, final double minLon, final double maxLat, final double maxLon) { + static void checkArgs( + final double minLat, final double minLon, final double maxLat, final double maxLon) { // dateline crossing not supported if (minLon > maxLon) { - throw new IllegalArgumentException("cannot have minLon [" + minLon + "] exceed maxLon [" + maxLon + "]."); + throw new IllegalArgumentException( + "cannot have minLon [" + minLon + "] exceed maxLon [" + maxLon + "]."); } // pole crossing not supported if (minLat > maxLat) { - throw new IllegalArgumentException("cannot have minLat [" + minLat + "] exceed maxLat [" + maxLat + "]."); + throw new IllegalArgumentException( + "cannot have minLat [" + minLat + "] exceed maxLat [" + maxLat + "]."); } } /** - * Create a new 2d query that finds all indexed 2d GeoBoundingBoxField values that intersect the defined - * 3d bounding ranges + * Create a new 2d query that finds all indexed 2d GeoBoundingBoxField values that intersect the + * defined 3d bounding ranges + * * @param field field name. must not be null * @param minLat minimum latitude value (in degrees); valid in [-90.0 : 90.0] * @param minLon minimum longitude value (in degrees); valid in [-180.0 : 180.0] @@ -123,14 +139,20 @@ public class LatLonBoundingBox extends Field { * @param maxLon maximum longitude value (in degrees); valid in [minLon : 180.0] * @return query for matching intersecting 2d bounding boxes */ - public static Query newIntersectsQuery(String field, final double minLat, final double minLon, - final double maxLat, final double maxLon) { - return newRangeQuery(field, minLat, minLon, maxLat, maxLon, RangeFieldQuery.QueryType.INTERSECTS); + public static Query newIntersectsQuery( + String field, + final double minLat, + final double minLon, + final double maxLat, + final double maxLon) { + return newRangeQuery( + field, minLat, minLon, maxLat, maxLon, RangeFieldQuery.QueryType.INTERSECTS); } /** - * Create a new 2d query that finds all indexed 2d GeoBoundingBoxField values that are within the defined - * 2d bounding box + * Create a new 2d query that finds all indexed 2d GeoBoundingBoxField values that are within the + * defined 2d bounding box + * * @param field field name. must not be null * @param minLat minimum latitude value (in degrees); valid in [-90.0 : 90.0] * @param minLon minimum longitude value (in degrees); valid in [-180.0 : 180.0] @@ -138,14 +160,19 @@ public class LatLonBoundingBox extends Field { * @param maxLon maximum longitude value (in degrees); valid in [minLon : 180.0] * @return query for matching 3d bounding boxes that are within the defined bounding box */ - public static Query newWithinQuery(String field, final double minLat, final double minLon, - final double maxLat, final double maxLon) { + public static Query newWithinQuery( + String field, + final double minLat, + final double minLon, + final double maxLat, + final double maxLon) { return newRangeQuery(field, minLat, minLon, maxLat, maxLon, RangeFieldQuery.QueryType.WITHIN); } /** - * Create a new 2d query that finds all indexed 2d GeoBoundingBoxField values that contain the defined - * 2d bounding box + * Create a new 2d query that finds all indexed 2d GeoBoundingBoxField values that contain the + * defined 2d bounding box + * * @param field field name. must not be null * @param minLat minimum latitude value (in degrees); valid in [-90.0 : 90.0] * @param minLon minimum longitude value (in degrees); valid in [-180.0 : 180.0] @@ -153,14 +180,19 @@ public class LatLonBoundingBox extends Field { * @param maxLon maximum longitude value (in degrees); valid in [minLon : 180.0] * @return query for matching 2d bounding boxes that contain the defined bounding box */ - public static Query newContainsQuery(String field, final double minLat, final double minLon, - final double maxLat, final double maxLon) { + public static Query newContainsQuery( + String field, + final double minLat, + final double minLon, + final double maxLat, + final double maxLon) { return newRangeQuery(field, minLat, minLon, maxLat, maxLon, RangeFieldQuery.QueryType.CONTAINS); } /** - * Create a new 2d query that finds all indexed 2d GeoBoundingBoxField values that cross the defined - * 3d bounding box + * Create a new 2d query that finds all indexed 2d GeoBoundingBoxField values that cross the + * defined 3d bounding box + * * @param field field name. must not be null * @param minLat minimum latitude value (in degrees); valid in [-90.0 : 90.0] * @param minLon minimum longitude value (in degrees); valid in [-180.0 : 180.0] @@ -168,18 +200,29 @@ public class LatLonBoundingBox extends Field { * @param maxLon maximum longitude value (in degrees); valid in [minLon : 180.0] * @return query for matching 2d bounding boxes that cross the defined bounding box */ - public static Query newCrossesQuery(String field, final double minLat, final double minLon, - final double maxLat, final double maxLon) { + public static Query newCrossesQuery( + String field, + final double minLat, + final double minLon, + final double maxLat, + final double maxLon) { return newRangeQuery(field, minLat, minLon, maxLat, maxLon, RangeFieldQuery.QueryType.CROSSES); } /** helper method to create a two-dimensional geospatial bounding box query */ - private static Query newRangeQuery(String field, final double minLat, final double minLon, - final double maxLat, final double maxLon, final RangeFieldQuery.QueryType queryType) { + private static Query newRangeQuery( + String field, + final double minLat, + final double minLon, + final double maxLat, + final double maxLon, + final RangeFieldQuery.QueryType queryType) { checkArgs(minLat, minLon, maxLat, maxLon); - return new RangeFieldQuery(field, encode(minLat, minLon, maxLat, maxLon), 2, queryType) { + return new RangeFieldQuery(field, encode(minLat, minLon, maxLat, maxLon), 2, queryType) { @Override - protected String toString(byte[] ranges, int dimension) { return LatLonBoundingBox.toString(ranges, dimension); } + protected String toString(byte[] ranges, int dimension) { + return LatLonBoundingBox.toString(ranges, dimension); + } }; } @@ -187,14 +230,14 @@ public class LatLonBoundingBox extends Field { static byte[] encode(double minLat, double minLon, double maxLat, double maxLon) { byte[] b = new byte[BYTES * 4]; encode(minLat, minLon, b, 0); - encode(maxLat, maxLon, b, BYTES*2); + encode(maxLat, maxLon, b, BYTES * 2); return b; } /** encodes a two-dimensional geopoint (lat, lon) into a byte array */ static void encode(double lat, double lon, byte[] result, int offset) { if (result == null) { - result = new byte[BYTES*4]; + result = new byte[BYTES * 4]; } NumericUtils.intToSortableBytes(encodeLatitude(lat), result, offset); NumericUtils.intToSortableBytes(encodeLongitude(lon), result, offset + BYTES); @@ -208,7 +251,7 @@ public class LatLonBoundingBox extends Field { sb.append(name); sb.append(':'); sb.append('['); - byte[] b = ((BytesRef)fieldsData).bytes; + byte[] b = ((BytesRef) fieldsData).bytes; sb.append(toString(b, 0)); sb.append(','); sb.append(toString(b, 1)); diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/LongPointMultiRangeBuilder.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/LongPointMultiRangeBuilder.java index 3167e5c0496..5a0229c8c11 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/LongPointMultiRangeBuilder.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/LongPointMultiRangeBuilder.java @@ -17,11 +17,11 @@ package org.apache.lucene.sandbox.document; -import org.apache.lucene.sandbox.search.MultiRangeQuery; - import static org.apache.lucene.document.LongPoint.decodeDimension; import static org.apache.lucene.document.LongPoint.pack; +import org.apache.lucene.sandbox.search.MultiRangeQuery; + /** Builder for multi range queries for LongPoints */ public class LongPointMultiRangeBuilder extends MultiRangeQuery.Builder { public LongPointMultiRangeBuilder(String field, int numDims) { @@ -40,12 +40,14 @@ public class LongPointMultiRangeBuilder extends MultiRangeQuery.Builder { public void add(long[] lowerValue, long[] upperValue) { if (upperValue.length != numDims || lowerValue.length != numDims) { - throw new IllegalArgumentException("Passed in range does not conform to specified dimensions"); + throw new IllegalArgumentException( + "Passed in range does not conform to specified dimensions"); } for (int i = 0; i < numDims; i++) { if (upperValue[i] < lowerValue[i]) { - throw new IllegalArgumentException("Upper value of range should be greater than lower value of range"); + throw new IllegalArgumentException( + "Upper value of range should be greater than lower value of range"); } } add(pack(lowerValue).bytes, pack(upperValue).bytes); diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/package-info.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/package-info.java index c3e3a70d9b0..5b2a39e9f6d 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/package-info.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/document/package-info.java @@ -17,9 +17,12 @@ /** * This package contains several point types: + * *
      - *
    • {@link org.apache.lucene.sandbox.document.BigIntegerPoint BigIntegerPoint} for 128-bit integers
    • - *
    • {@link org.apache.lucene.document.LatLonPoint LatLonPoint} for latitude/longitude geospatial points
    • + *
    • {@link org.apache.lucene.sandbox.document.BigIntegerPoint BigIntegerPoint} for 128-bit + * integers + *
    • {@link org.apache.lucene.document.LatLonPoint LatLonPoint} for latitude/longitude + * geospatial points *
    */ -package org.apache.lucene.sandbox.document; \ No newline at end of file +package org.apache.lucene.sandbox.document; diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java index b73bea51de1..a98a4793426 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Objects; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -49,32 +48,30 @@ import org.apache.lucene.util.PriorityQueue; import org.apache.lucene.util.automaton.LevenshteinAutomata; /** - * Fuzzifies ALL terms provided as strings and then picks the best n differentiating terms. - * In effect this mixes the behaviour of FuzzyQuery and MoreLikeThis but with special consideration - * of fuzzy scoring factors. - * This generally produces good results for queries where users may provide details in a number of - * fields and have no knowledge of boolean query syntax and also want a degree of fuzzy matching and - * a fast query. - * - * For each source term the fuzzy variants are held in a BooleanQuery with no coord factor (because - * we are not looking for matches on multiple variants in any one doc). Additionally, a specialized - * TermQuery is used for variants and does not use that variant term's IDF because this would favour rarer - * terms eg misspellings. Instead, all variants use the same IDF ranking (the one for the source query - * term) and this is factored into the variant's boost. If the source query term does not exist in the - * index the average IDF of the variants is used. + * Fuzzifies ALL terms provided as strings and then picks the best n differentiating terms. In + * effect this mixes the behaviour of FuzzyQuery and MoreLikeThis but with special consideration of + * fuzzy scoring factors. This generally produces good results for queries where users may provide + * details in a number of fields and have no knowledge of boolean query syntax and also want a + * degree of fuzzy matching and a fast query. + * + *

    For each source term the fuzzy variants are held in a BooleanQuery with no coord factor + * (because we are not looking for matches on multiple variants in any one doc). Additionally, a + * specialized TermQuery is used for variants and does not use that variant term's IDF because this + * would favour rarer terms eg misspellings. Instead, all variants use the same IDF ranking (the one + * for the source query term) and this is factored into the variant's boost. If the source query + * term does not exist in the index the average IDF of the variants is used. */ -public class FuzzyLikeThisQuery extends Query -{ +public class FuzzyLikeThisQuery extends Query { // TODO: generalize this query (at least it should not reuse this static sim! // a better way might be to convert this into multitermquery rewrite methods. // the rewrite method can 'average' the TermStates's term statistics (docfreq,totalTermFreq) // provided to TermQuery, so that the general idea is agnostic to any scoring system... - static TFIDFSimilarity sim=new ClassicSimilarity(); - ArrayList fieldVals=new ArrayList<>(); + static TFIDFSimilarity sim = new ClassicSimilarity(); + ArrayList fieldVals = new ArrayList<>(); Analyzer analyzer; - int MAX_VARIANTS_PER_TERM=50; - boolean ignoreTF=false; + int MAX_VARIANTS_PER_TERM = 50; + boolean ignoreTF = false; private int maxNumTerms; @Override @@ -90,35 +87,32 @@ public class FuzzyLikeThisQuery extends Query @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } private boolean equalsTo(FuzzyLikeThisQuery other) { - return Objects.equals(analyzer, other.analyzer) && - Objects.equals(fieldVals, other.fieldVals) && - ignoreTF == other.ignoreTF && - maxNumTerms == other.maxNumTerms; + return Objects.equals(analyzer, other.analyzer) + && Objects.equals(fieldVals, other.fieldVals) + && ignoreTF == other.ignoreTF + && maxNumTerms == other.maxNumTerms; } /** - * - * @param maxNumTerms The total number of terms clauses that will appear once rewritten as a BooleanQuery + * @param maxNumTerms The total number of terms clauses that will appear once rewritten as a + * BooleanQuery */ - public FuzzyLikeThisQuery(int maxNumTerms, Analyzer analyzer) - { - this.analyzer=analyzer; + public FuzzyLikeThisQuery(int maxNumTerms, Analyzer analyzer) { + this.analyzer = analyzer; this.maxNumTerms = maxNumTerms; } - static class FieldVals - { + static class FieldVals { String queryString; String fieldName; int maxEdits; int prefixLength; - public FieldVals(String name, int maxEdits, int length, String queryString) - { + + public FieldVals(String name, int maxEdits, int length, String queryString) { fieldName = name; this.maxEdits = maxEdits; prefixLength = length; @@ -129,62 +123,71 @@ public class FuzzyLikeThisQuery extends Query public int hashCode() { final int prime = 31; int result = 1; - result = prime * result - + ((fieldName == null) ? 0 : fieldName.hashCode()); + result = prime * result + ((fieldName == null) ? 0 : fieldName.hashCode()); result = prime * result + maxEdits; result = prime * result + prefixLength; - result = prime * result - + ((queryString == null) ? 0 : queryString.hashCode()); + result = prime * result + ((queryString == null) ? 0 : queryString.hashCode()); return result; } @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } FieldVals other = (FieldVals) obj; if (fieldName == null) { - if (other.fieldName != null) + if (other.fieldName != null) { return false; - } else if (!fieldName.equals(other.fieldName)) - return false; + } + } else if (!fieldName.equals(other.fieldName)) return false; if (maxEdits != other.maxEdits) { return false; } - if (prefixLength != other.prefixLength) + if (prefixLength != other.prefixLength) { return false; + } if (queryString == null) { - if (other.queryString != null) + if (other.queryString != null) { return false; - } else if (!queryString.equals(other.queryString)) + } + } else if (!queryString.equals(other.queryString)) { return false; + } return true; } - - - } - + /** - * Adds user input for "fuzzification" - * @param queryString The string which will be parsed by the analyzer and for which fuzzy variants will be parsed - * @param minSimilarity The minimum similarity of the term variants; must be 0, 1 or 2 (see FuzzyTermsEnum) + * Adds user input for "fuzzification" + * + * @param queryString The string which will be parsed by the analyzer and for which fuzzy variants + * will be parsed + * @param minSimilarity The minimum similarity of the term variants; must be 0, 1 or 2 (see + * FuzzyTermsEnum) * @param prefixLength Length of required common prefix on variant terms (see FuzzyTermsEnum) */ - public void addTerms(String queryString, String fieldName,float minSimilarity, int prefixLength) - { + public void addTerms( + String queryString, String fieldName, float minSimilarity, int prefixLength) { int maxEdits = (int) minSimilarity; - if (maxEdits != minSimilarity || maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) { - throw new IllegalArgumentException("minSimilarity must integer value between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE + ", inclusive; got " + minSimilarity); + if (maxEdits != minSimilarity + || maxEdits < 0 + || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) { + throw new IllegalArgumentException( + "minSimilarity must integer value between 0 and " + + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE + + ", inclusive; got " + + minSimilarity); } - fieldVals.add(new FieldVals(fieldName,maxEdits,prefixLength,queryString)); + fieldVals.add(new FieldVals(fieldName, maxEdits, prefixLength, queryString)); } - private void addTerms(IndexReader reader, FieldVals f, ScoreTermQueue q) throws IOException { if (f.queryString == null) return; final Terms terms = MultiTerms.getTerms(reader, f.fieldName); @@ -201,35 +204,42 @@ public class FuzzyLikeThisQuery extends Query String term = termAtt.toString(); if (!processedTerms.contains(term)) { processedTerms.add(term); - ScoreTermQueue variantsQ = new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term + ScoreTermQueue variantsQ = + new ScoreTermQueue( + MAX_VARIANTS_PER_TERM); // maxNum variants considered for any one term float minScore = 0; Term startTerm = new Term(f.fieldName, term); - FuzzyTermsEnum fe = new FuzzyTermsEnum(terms, startTerm, f.maxEdits, f.prefixLength, true); - //store the df so all variants use same idf + FuzzyTermsEnum fe = + new FuzzyTermsEnum(terms, startTerm, f.maxEdits, f.prefixLength, true); + // store the df so all variants use same idf int df = reader.docFreq(startTerm); int numVariants = 0; int totalVariantDocFreqs = 0; BytesRef possibleMatch; - BoostAttribute boostAtt = - fe.attributes().addAttribute(BoostAttribute.class); + BoostAttribute boostAtt = fe.attributes().addAttribute(BoostAttribute.class); while ((possibleMatch = fe.next()) != null) { numVariants++; totalVariantDocFreqs += fe.docFreq(); float score = boostAtt.getBoost(); if (variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore) { - ScoreTerm st = new ScoreTerm(new Term(startTerm.field(), BytesRef.deepCopyOf(possibleMatch)), score, startTerm); + ScoreTerm st = + new ScoreTerm( + new Term(startTerm.field(), BytesRef.deepCopyOf(possibleMatch)), + score, + startTerm); variantsQ.insertWithOverflow(st); minScore = variantsQ.top().score; // maintain minScore } - fe.setMaxNonCompetitiveBoost(variantsQ.size() >= MAX_VARIANTS_PER_TERM ? minScore : Float.NEGATIVE_INFINITY); + fe.setMaxNonCompetitiveBoost( + variantsQ.size() >= MAX_VARIANTS_PER_TERM ? minScore : Float.NEGATIVE_INFINITY); } if (numVariants > 0) { int avgDf = totalVariantDocFreqs / numVariants; - if (df == 0)//no direct match we can use as df for all variants - { - df = avgDf; //use avg df of all variants - } + if (df == 0) // no direct match we can use as df for all variants + { + df = avgDf; // use avg df of all variants + } // take the top variants (scored by edit distance) and reset the score // to include an IDF factor then add to the global queue for ranking @@ -274,116 +284,101 @@ public class FuzzyLikeThisQuery extends Query } @Override - public Query rewrite(IndexReader reader) throws IOException - { + public Query rewrite(IndexReader reader) throws IOException { ScoreTermQueue q = new ScoreTermQueue(maxNumTerms); - //load up the list of possible terms + // load up the list of possible terms for (FieldVals f : fieldVals) { addTerms(reader, f, q); } - + BooleanQuery.Builder bq = new BooleanQuery.Builder(); - - //create BooleanQueries to hold the variants for each token/field pair and ensure it + + // create BooleanQueries to hold the variants for each token/field pair and ensure it // has no coord factor - //Step 1: sort the termqueries by term/field - HashMap> variantQueries=new HashMap<>(); + // Step 1: sort the termqueries by term/field + HashMap> variantQueries = new HashMap<>(); int size = q.size(); - for(int i = 0; i < size; i++) - { - ScoreTerm st = q.pop(); - ArrayList l= variantQueries.get(st.fuzziedSourceTerm); - if(l==null) - { - l=new ArrayList<>(); - variantQueries.put(st.fuzziedSourceTerm,l); - } - l.add(st); + for (int i = 0; i < size; i++) { + ScoreTerm st = q.pop(); + ArrayList l = variantQueries.get(st.fuzziedSourceTerm); + if (l == null) { + l = new ArrayList<>(); + variantQueries.put(st.fuzziedSourceTerm, l); } - //Step 2: Organize the sorted termqueries into zero-coord scoring boolean queries - for (Iterator> iter = variantQueries.values().iterator(); iter.hasNext();) - { - ArrayList variants = iter.next(); - if(variants.size()==1) - { - //optimize where only one selected variant - ScoreTerm st= variants.get(0); - Query tq = newTermQuery(reader, st.term); - // set the boost to a mix of IDF and score - bq.add(new BoostQuery(tq, st.score), BooleanClause.Occur.SHOULD); - } - else - { - BooleanQuery.Builder termVariants=new BooleanQuery.Builder(); - for (Iterator iterator2 = variants.iterator(); iterator2 - .hasNext();) - { - ScoreTerm st = iterator2.next(); - // found a match - Query tq = newTermQuery(reader, st.term); - // set the boost using the ScoreTerm's score - termVariants.add(new BoostQuery(tq, st.score), BooleanClause.Occur.SHOULD); // add to query - } - bq.add(termVariants.build(), BooleanClause.Occur.SHOULD); // add to query - } + l.add(st); + } + // Step 2: Organize the sorted termqueries into zero-coord scoring boolean queries + for (Iterator> iter = variantQueries.values().iterator(); + iter.hasNext(); ) { + ArrayList variants = iter.next(); + if (variants.size() == 1) { + // optimize where only one selected variant + ScoreTerm st = variants.get(0); + Query tq = newTermQuery(reader, st.term); + // set the boost to a mix of IDF and score + bq.add(new BoostQuery(tq, st.score), BooleanClause.Occur.SHOULD); + } else { + BooleanQuery.Builder termVariants = new BooleanQuery.Builder(); + for (Iterator iterator2 = variants.iterator(); iterator2.hasNext(); ) { + ScoreTerm st = iterator2.next(); + // found a match + Query tq = newTermQuery(reader, st.term); + // set the boost using the ScoreTerm's score + termVariants.add( + new BoostQuery(tq, st.score), BooleanClause.Occur.SHOULD); // add to query + } + bq.add(termVariants.build(), BooleanClause.Occur.SHOULD); // add to query } - //TODO possible alternative step 3 - organize above booleans into a new layer of field-based + } + // TODO possible alternative step 3 - organize above booleans into a new layer of field-based // booleans with a minimum-should-match of NumFields-1? return bq.build(); } - //Holds info for a fuzzy term variant - initially score is set to edit distance (for ranking best + // Holds info for a fuzzy term variant - initially score is set to edit distance (for ranking best // term variants) then is reset with IDF for use in ranking against all other // terms/fields - private static class ScoreTerm{ + private static class ScoreTerm { public Term term; public float score; Term fuzziedSourceTerm; - - public ScoreTerm(Term term, float score, Term fuzziedSourceTerm){ + + public ScoreTerm(Term term, float score, Term fuzziedSourceTerm) { this.term = term; this.score = score; - this.fuzziedSourceTerm=fuzziedSourceTerm; + this.fuzziedSourceTerm = fuzziedSourceTerm; } } - - private static class ScoreTermQueue extends PriorityQueue { - public ScoreTermQueue(int size){ + + private static class ScoreTermQueue extends PriorityQueue { + public ScoreTermQueue(int size) { super(size); } - + /* (non-Javadoc) * @see org.apache.lucene.util.PriorityQueue#lessThan(java.lang.Object, java.lang.Object) */ @Override protected boolean lessThan(ScoreTerm termA, ScoreTerm termB) { - if (termA.score== termB.score) + if (termA.score == termB.score) { return termA.term.compareTo(termB.term) > 0; - else - return termA.score < termB.score; + } else return termA.score < termB.score; } - - } - + } + /* (non-Javadoc) * @see org.apache.lucene.search.Query#toString(java.lang.String) */ @Override - public String toString(String field) - { + public String toString(String field) { return null; } - - public boolean isIgnoreTF() - { + public boolean isIgnoreTF() { return ignoreTF; } - - public void setIgnoreTF(boolean ignoreTF) - { + public void setIgnoreTF(boolean ignoreTF) { this.ignoreTF = ignoreTF; } - } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/package-info.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/package-info.java index 499403fad8c..1f67c5b2a44 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/package-info.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/package-info.java @@ -14,8 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Additional queries (some may have caveats or limitations) - */ + +/** Additional queries (some may have caveats or limitations) */ package org.apache.lucene.sandbox.queries; diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/BM25FQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/BM25FQuery.java index 9befa7f38cc..827dc85be85 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/BM25FQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/BM25FQuery.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; @@ -62,39 +61,35 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; /** - * A {@link Query} that treats multiple fields as a single stream and scores - * terms as if you had indexed them as a single term in a single field. + * A {@link Query} that treats multiple fields as a single stream and scores terms as if you had + * indexed them as a single term in a single field. * - * For scoring purposes this query implements the BM25F's simple formula - * described in: - * http://www.staff.city.ac.uk/~sb317/papers/foundations_bm25_review.pdf + *

    For scoring purposes this query implements the BM25F's simple formula described in: + * http://www.staff.city.ac.uk/~sb317/papers/foundations_bm25_review.pdf * - * The per-field similarity is ignored but to be compatible each field must use - * a {@link Similarity} at index time that encodes norms the same way as - * {@link SimilarityBase#computeNorm}. + *

    The per-field similarity is ignored but to be compatible each field must use a {@link + * Similarity} at index time that encodes norms the same way as {@link SimilarityBase#computeNorm}. * * @lucene.experimental */ public final class BM25FQuery extends Query implements Accountable { - private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(BM25FQuery.class); + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(BM25FQuery.class); - /** - * A builder for {@link BM25FQuery}. - */ + /** A builder for {@link BM25FQuery}. */ public static class Builder { private final BM25Similarity similarity; private final Map fieldAndWeights = new HashMap<>(); private final Set termsSet = new HashSet<>(); - /** - * Default builder. - */ + /** Default builder. */ public Builder() { this.similarity = new BM25Similarity(); } /** * Builder with the supplied parameter values. + * * @param k1 Controls non-linear term frequency normalization (saturation). * @param b Controls to what degree document length normalizes tf values. */ @@ -104,6 +99,7 @@ public final class BM25FQuery extends Query implements Accountable { /** * Adds a field to this builder. + * * @param field The field name. */ public Builder addField(String field) { @@ -112,6 +108,7 @@ public final class BM25FQuery extends Query implements Accountable { /** * Adds a field to this builder. + * * @param field The field name. * @param weight The weight associated to this field. */ @@ -123,9 +120,7 @@ public final class BM25FQuery extends Query implements Accountable { return this; } - /** - * Adds a term to this builder. - */ + /** Adds a term to this builder. */ public Builder addTerm(BytesRef term) { if (termsSet.size() > IndexSearcher.getMaxClauseCount()) { throw new IndexSearcher.TooManyClauses(); @@ -134,9 +129,7 @@ public final class BM25FQuery extends Query implements Accountable { return this; } - /** - * Builds the {@link BM25FQuery}. - */ + /** Builds the {@link BM25FQuery}. */ public BM25FQuery build() { int size = fieldAndWeights.size() * termsSet.size(); if (size > IndexSearcher.getMaxClauseCount()) { @@ -168,7 +161,10 @@ public final class BM25FQuery extends Query implements Accountable { private final long ramBytesUsed; - private BM25FQuery(BM25Similarity similarity, TreeMap fieldAndWeights, BytesRef[] terms) { + private BM25FQuery( + BM25Similarity similarity, + TreeMap fieldAndWeights, + BytesRef[] terms) { this.similarity = similarity; this.fieldAndWeights = fieldAndWeights; this.terms = terms; @@ -185,10 +181,11 @@ public final class BM25FQuery extends Query implements Accountable { } } - this.ramBytesUsed = BASE_RAM_BYTES + - RamUsageEstimator.sizeOfObject(fieldAndWeights) + - RamUsageEstimator.sizeOfObject(fieldTerms) + - RamUsageEstimator.sizeOfObject(terms); + this.ramBytesUsed = + BASE_RAM_BYTES + + RamUsageEstimator.sizeOfObject(fieldAndWeights) + + RamUsageEstimator.sizeOfObject(fieldTerms) + + RamUsageEstimator.sizeOfObject(terms); } public List getTerms() { @@ -228,8 +225,7 @@ public final class BM25FQuery extends Query implements Accountable { @Override public boolean equals(Object other) { - return sameClassAs(other) && - Arrays.equals(terms, ((BM25FQuery) other).terms); + return sameClassAs(other) && Arrays.equals(terms, ((BM25FQuery) other).terms); } @Override @@ -260,7 +256,8 @@ public final class BM25FQuery extends Query implements Accountable { @Override public void visit(QueryVisitor visitor) { - Term[] selectedTerms = Arrays.stream(fieldTerms).filter(t -> visitor.acceptField(t.field())).toArray(Term[]::new); + Term[] selectedTerms = + Arrays.stream(fieldTerms).filter(t -> visitor.acceptField(t.field())).toArray(Term[]::new); if (selectedTerms.length > 0) { QueryVisitor v = visitor.getSubVisitor(BooleanClause.Occur.SHOULD, this); v.consumeTerms(this, selectedTerms); @@ -277,7 +274,8 @@ public final class BM25FQuery extends Query implements Accountable { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { if (scoreMode.needsScores()) { return new BM25FWeight(this, searcher, scoreMode, boost); } else { @@ -292,7 +290,8 @@ public final class BM25FQuery extends Query implements Accountable { private final TermStates termStates[]; private final Similarity.SimScorer simWeight; - BM25FWeight(Query query, IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + BM25FWeight(Query query, IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { super(query); assert scoreMode.needsScores(); this.searcher = searcher; @@ -304,21 +303,24 @@ public final class BM25FQuery extends Query implements Accountable { TermStates ts = TermStates.build(searcher.getTopReaderContext(), fieldTerms[i], true); termStates[i] = ts; if (ts.docFreq() > 0) { - TermStatistics termStats = searcher.termStatistics(fieldTerms[i], ts.docFreq(), ts.totalTermFreq()); + TermStatistics termStats = + searcher.termStatistics(fieldTerms[i], ts.docFreq(), ts.totalTermFreq()); docFreq = Math.max(termStats.docFreq(), docFreq); totalTermFreq += (double) field.weight * termStats.totalTermFreq(); } } if (docFreq > 0) { CollectionStatistics pseudoCollectionStats = mergeCollectionStatistics(searcher); - TermStatistics pseudoTermStatistics = new TermStatistics(new BytesRef("pseudo_term"), docFreq, Math.max(1, totalTermFreq)); + TermStatistics pseudoTermStatistics = + new TermStatistics(new BytesRef("pseudo_term"), docFreq, Math.max(1, totalTermFreq)); this.simWeight = similarity.scorer(boost, pseudoCollectionStats, pseudoTermStatistics); } else { this.simWeight = null; } } - private CollectionStatistics mergeCollectionStatistics(IndexSearcher searcher) throws IOException { + private CollectionStatistics mergeCollectionStatistics(IndexSearcher searcher) + throws IOException { long maxDoc = searcher.getIndexReader().maxDoc(); long docCount = 0; long sumTotalTermFreq = 0; @@ -332,12 +334,14 @@ public final class BM25FQuery extends Query implements Accountable { } } - return new CollectionStatistics("pseudo_field", maxDoc, docCount, sumTotalTermFreq, sumDocFreq); + return new CollectionStatistics( + "pseudo_field", maxDoc, docCount, sumTotalTermFreq, sumDocFreq); } @Override public Matches matches(LeafReaderContext context, int doc) throws IOException { - Weight weight = searcher.rewrite(rewriteToBoolean()).createWeight(searcher, ScoreMode.COMPLETE, 1f); + Weight weight = + searcher.rewrite(rewriteToBoolean()).createWeight(searcher, ScoreMode.COMPLETE, 1f); return weight.matches(context, doc); } @@ -355,13 +359,19 @@ public final class BM25FQuery extends Query implements Accountable { freq = ((TermScorer) scorer).freq(); } final MultiNormsLeafSimScorer docScorer = - new MultiNormsLeafSimScorer(simWeight, context.reader(), fieldAndWeights.values(), true); + new MultiNormsLeafSimScorer( + simWeight, context.reader(), fieldAndWeights.values(), true); Explanation freqExplanation = Explanation.match(freq, "termFreq=" + freq); Explanation scoreExplanation = docScorer.explain(doc, freqExplanation); return Explanation.match( scoreExplanation.getValue(), - "weight(" + getQuery() + " in " + doc + ") [" - + similarity.getClass().getSimpleName() + "], result of:", + "weight(" + + getQuery() + + " in " + + doc + + ") [" + + similarity.getClass().getSimpleName() + + "], result of:", scoreExplanation); } } @@ -395,12 +405,15 @@ public final class BM25FQuery extends Query implements Accountable { } final MultiNormsLeafSimScorer scoringSimScorer = new MultiNormsLeafSimScorer(simWeight, context.reader(), fields, true); - LeafSimScorer nonScoringSimScorer = new LeafSimScorer(simWeight, context.reader(), "pseudo_field", false); + LeafSimScorer nonScoringSimScorer = + new LeafSimScorer(simWeight, context.reader(), "pseudo_field", false); // we use termscorers + disjunction as an impl detail DisiPriorityQueue queue = new DisiPriorityQueue(iterators.size()); for (int i = 0; i < iterators.size(); i++) { float weight = fields.get(i).weight; - queue.add(new WeightedDisiWrapper(new TermScorer(this, iterators.get(i), nonScoringSimScorer), weight)); + queue.add( + new WeightedDisiWrapper( + new TermScorer(this, iterators.get(i), nonScoringSimScorer), weight)); } // Even though it is called approximation, it is accurate since none of // the sub iterators are two-phase iterators. @@ -432,7 +445,11 @@ public final class BM25FQuery extends Query implements Accountable { private final DocIdSetIterator iterator; private final MultiNormsLeafSimScorer simScorer; - BM25FScorer(Weight weight, DisiPriorityQueue queue, DocIdSetIterator iterator, MultiNormsLeafSimScorer simScorer) { + BM25FScorer( + Weight weight, + DisiPriorityQueue queue, + DocIdSetIterator iterator, + MultiNormsLeafSimScorer simScorer) { super(weight); this.queue = queue; this.iterator = iterator; @@ -471,4 +488,4 @@ public final class BM25FQuery extends Query implements Accountable { return Float.POSITIVE_INFINITY; } } -} \ No newline at end of file +} diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CoveringQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CoveringQuery.java index 76a08277faa..c9968989ed8 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CoveringQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CoveringQuery.java @@ -22,7 +22,6 @@ import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BooleanClause; @@ -41,13 +40,15 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; -/** A {@link Query} that allows to have a configurable number or required - * matches per document. This is typically useful in order to build queries - * whose query terms must all appear in documents. - * @lucene.experimental +/** + * A {@link Query} that allows to have a configurable number or required matches per document. This + * is typically useful in order to build queries whose query terms must all appear in documents. + * + * @lucene.experimental */ public final class CoveringQuery extends Query implements Accountable { - private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(CoveringQuery.class); + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(CoveringQuery.class); private final Collection queries; private final LongValuesSource minimumNumberMatch; @@ -56,37 +57,41 @@ public final class CoveringQuery extends Query implements Accountable { /** * Sole constructor. + * * @param queries Sub queries to match. - * @param minimumNumberMatch Per-document long value that records how many queries - * should match. Values that are less than 1 are treated - * like 1: only documents that have at least one - * matching clause will be considered matches. Documents - * that do not have a value for minimumNumberMatch - * do not match. + * @param minimumNumberMatch Per-document long value that records how many queries should match. + * Values that are less than 1 are treated like 1: only documents that have at + * least one matching clause will be considered matches. Documents that do not have a value + * for minimumNumberMatch do not match. */ public CoveringQuery(Collection queries, LongValuesSource minimumNumberMatch) { if (queries.size() > IndexSearcher.getMaxClauseCount()) { throw new IndexSearcher.TooManyClauses(); } if (minimumNumberMatch.needsScores()) { - throw new IllegalArgumentException("The minimum number of matches may not depend on the score."); + throw new IllegalArgumentException( + "The minimum number of matches may not depend on the score."); } this.queries = new Multiset<>(); this.queries.addAll(queries); this.minimumNumberMatch = Objects.requireNonNull(minimumNumberMatch); this.hashCode = computeHashCode(); - this.ramBytesUsed = BASE_RAM_BYTES + - RamUsageEstimator.sizeOfObject(this.queries, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED); + this.ramBytesUsed = + BASE_RAM_BYTES + + RamUsageEstimator.sizeOfObject( + this.queries, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED); } @Override public String toString(String field) { - String queriesToString = queries.stream() - .map(q -> q.toString(field)) - .sorted() - .collect(Collectors.joining(", ")); - return "CoveringQuery(queries=[" + queriesToString + "], minimumNumberMatch=" + minimumNumberMatch + ")"; + String queriesToString = + queries.stream().map(q -> q.toString(field)).sorted().collect(Collectors.joining(", ")); + return "CoveringQuery(queries=[" + + queriesToString + + "], minimumNumberMatch=" + + minimumNumberMatch + + ")"; } @Override @@ -141,7 +146,8 @@ public final class CoveringQuery extends Query implements Accountable { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { final List weights = new ArrayList<>(queries.size()); for (Query query : queries) { weights.add(searcher.createWeight(query, scoreMode, boost)); @@ -201,9 +207,13 @@ public final class CoveringQuery extends Query implements Accountable { subExpls.add(subExpl); } if (freq >= minimumNumberMatch) { - return Explanation.match((float) score, freq + " matches for " + minimumNumberMatch + " required matches, sum of:", subExpls); + return Explanation.match( + (float) score, + freq + " matches for " + minimumNumberMatch + " required matches, sum of:", + subExpls); } else { - return Explanation.noMatch(freq + " matches for " + minimumNumberMatch + " required matches", subExpls); + return Explanation.noMatch( + freq + " matches for " + minimumNumberMatch + " required matches", subExpls); } } @@ -219,14 +229,13 @@ public final class CoveringQuery extends Query implements Accountable { if (scorers.isEmpty()) { return null; } - return new CoveringScorer(this, scorers, minimumNumberMatch.getValues(context, null), context.reader().maxDoc()); + return new CoveringScorer( + this, scorers, minimumNumberMatch.getValues(context, null), context.reader().maxDoc()); } @Override public boolean isCacheable(LeafReaderContext ctx) { return minimumNumberMatch.isCacheable(ctx); } - } - } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CoveringScorer.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CoveringScorer.java index cb15599c2bc..49081d2412c 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CoveringScorer.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/CoveringScorer.java @@ -16,6 +16,10 @@ */ package org.apache.lucene.sandbox.search; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; import org.apache.lucene.search.DisiPriorityQueue; import org.apache.lucene.search.DisiWrapper; import org.apache.lucene.search.DocIdSetIterator; @@ -24,11 +28,6 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - /** A {@link Scorer} whose number of matches is per-document. */ final class CoveringScorer extends Scorer { @@ -37,7 +36,7 @@ final class CoveringScorer extends Scorer { final LongValues minMatchValues; boolean matches; // if true then the doc matches, otherwise we don't know and need to check - int doc; // current doc ID + int doc; // current doc ID DisiWrapper topList; // list of matches int freq; // number of scorers on the desired doc ID long minMatch; // current required number of matches @@ -74,106 +73,106 @@ final class CoveringScorer extends Scorer { return matchingChildren; } - private final DocIdSetIterator approximation = new DocIdSetIterator() { + private final DocIdSetIterator approximation = + new DocIdSetIterator() { - @Override - public int docID() { - return doc; - } + @Override + public int docID() { + return doc; + } - @Override - public int nextDoc() throws IOException { - return advance(docID() + 1); - } + @Override + public int nextDoc() throws IOException { + return advance(docID() + 1); + } - @Override - public int advance(int target) throws IOException { - // reset state - matches = false; - topList = null; + @Override + public int advance(int target) throws IOException { + // reset state + matches = false; + topList = null; - doc = target; - setMinMatch(); + doc = target; + setMinMatch(); - DisiWrapper top = subScorers.top(); - int numMatches = 0; - int maxPotentialMatches = numScorers; - while (top.doc < target) { - if (maxPotentialMatches < minMatch) { - // No need to keep trying to advance to `target` since no match is possible. - if (target >= maxDoc - 1) { - doc = NO_MORE_DOCS; - } else { - doc = target + 1; + DisiWrapper top = subScorers.top(); + int numMatches = 0; + int maxPotentialMatches = numScorers; + while (top.doc < target) { + if (maxPotentialMatches < minMatch) { + // No need to keep trying to advance to `target` since no match is possible. + if (target >= maxDoc - 1) { + doc = NO_MORE_DOCS; + } else { + doc = target + 1; + } + setMinMatch(); + return doc; + } + top.doc = top.iterator.advance(target); + boolean match = top.doc == target; + top = subScorers.updateTop(); + if (match) { + numMatches++; + if (numMatches >= minMatch) { + // success, no need to check other iterators + matches = true; + return doc; + } + } else { + maxPotentialMatches--; + } } + + doc = top.doc; setMinMatch(); return doc; } - top.doc = top.iterator.advance(target); - boolean match = top.doc == target; - top = subScorers.updateTop(); - if (match) { - numMatches++; - if (numMatches >= minMatch) { - // success, no need to check other iterators - matches = true; - return doc; + + private void setMinMatch() throws IOException { + if (doc >= maxDoc) { + // advanceExact may not be called on out-of-range doc ids + minMatch = 1; + } else if (minMatchValues.advanceExact(doc)) { + // values < 1 are treated as 1: we require at least one match + minMatch = Math.max(1, minMatchValues.longValue()); + } else { + // this will make sure the document does not match + minMatch = Long.MAX_VALUE; } - } else { - maxPotentialMatches--; } - } - doc = top.doc; - setMinMatch(); - return doc; - } + @Override + public long cost() { + return maxDoc; + } + }; - private void setMinMatch() throws IOException { - if (doc >= maxDoc) { - // advanceExact may not be called on out-of-range doc ids - minMatch = 1; - } else if (minMatchValues.advanceExact(doc)) { - // values < 1 are treated as 1: we require at least one match - minMatch = Math.max(1, minMatchValues.longValue()); - } else { - // this will make sure the document does not match - minMatch = Long.MAX_VALUE; - } - } + private final TwoPhaseIterator twoPhase = + new TwoPhaseIterator(approximation) { - @Override - public long cost() { - return maxDoc; - } + @Override + public boolean matches() throws IOException { + if (matches) { + return true; + } + if (topList == null) { + advanceAll(doc); + } + if (subScorers.top().doc != doc) { + assert subScorers.top().doc > doc; + return false; + } + setTopListAndFreq(); + assert topList.doc == doc; + return matches = freq >= minMatch; + } - }; - - private final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { - - @Override - public boolean matches() throws IOException { - if (matches) { - return true; - } - if (topList == null) { - advanceAll(doc); - } - if (subScorers.top().doc != doc) { - assert subScorers.top().doc > doc; - return false; - } - setTopListAndFreq(); - assert topList.doc == doc; - return matches = freq >= minMatch; - } - - @Override - public float matchCost() { - return numScorers; - } - - }; + @Override + public float matchCost() { + return numScorers; + } + }; @Override public DocIdSetIterator iterator() { @@ -229,5 +228,4 @@ final class CoveringScorer extends Scorer { public int docID() { return doc; } - } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/DocValuesNumbersQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/DocValuesNumbersQuery.java index 0b7243dd5ff..726c08b1aae 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/DocValuesNumbersQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/DocValuesNumbersQuery.java @@ -22,7 +22,6 @@ import java.util.Collection; import java.util.HashSet; import java.util.Objects; import java.util.Set; - import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DocValues; @@ -41,21 +40,18 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; /** - * Like {@link DocValuesTermsQuery}, but this query only - * runs on a long {@link NumericDocValuesField} or a - * {@link SortedNumericDocValuesField}, matching - * all documents whose value in the specified field is - * contained in the provided set of long values. + * Like {@link DocValuesTermsQuery}, but this query only runs on a long {@link + * NumericDocValuesField} or a {@link SortedNumericDocValuesField}, matching all documents whose + * value in the specified field is contained in the provided set of long values. * - *

    - * NOTE: be very careful using this query: it is - * typically much slower than using {@code TermsQuery}, - * but in certain specialized cases may be faster. + *

    NOTE: be very careful using this query: it is typically much slower than using {@code + * TermsQuery}, but in certain specialized cases may be faster. * * @lucene.experimental */ public class DocValuesNumbersQuery extends Query implements Accountable { - private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(DocValuesNumbersQuery.class); + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(DocValuesNumbersQuery.class); private final String field; private final LongHashSet numbers; @@ -76,13 +72,11 @@ public class DocValuesNumbersQuery extends Query implements Accountable { @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } private boolean equalsTo(DocValuesNumbersQuery other) { - return field.equals(other.field) && - numbers.equals(other.numbers); + return field.equals(other.field) && numbers.equals(other.numbers); } @Override @@ -107,53 +101,52 @@ public class DocValuesNumbersQuery extends Query implements Accountable { @Override public String toString(String defaultField) { - return new StringBuilder() - .append(field) - .append(": ") - .append(numbers.toString()) - .toString(); + return new StringBuilder().append(field).append(": ").append(numbers.toString()).toString(); } @Override public long ramBytesUsed() { - return BASE_RAM_BYTES + - RamUsageEstimator.sizeOfObject(field) + - RamUsageEstimator.sizeOfObject(numbers); + return BASE_RAM_BYTES + + RamUsageEstimator.sizeOfObject(field) + + RamUsageEstimator.sizeOfObject(numbers); } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field); - return new ConstantScoreScorer(this, score(), scoreMode, new TwoPhaseIterator(values) { + return new ConstantScoreScorer( + this, + score(), + scoreMode, + new TwoPhaseIterator(values) { - @Override - public boolean matches() throws IOException { - int count = values.docValueCount(); - for(int i=0;i - * This is the same functionality as TermsQuery (from - * queries/), but because of drastically different - * implementations, they also have different performance - * characteristics, as described below. + *

    This is the same functionality as TermsQuery (from queries/), but because of drastically + * different implementations, they also have different performance characteristics, as described + * below. * - *

    - * NOTE: be very careful using this query: it is - * typically much slower than using {@code TermsQuery}, - * but in certain specialized cases may be faster. + *

    NOTE: be very careful using this query: it is typically much slower than using {@code + * TermsQuery}, but in certain specialized cases may be faster. * - *

    - * With each search, this query translates the specified - * set of Terms into a private {@link LongBitSet} keyed by - * term number per unique {@link IndexReader} (normally one - * reader per segment). Then, during matching, the term - * number for each docID is retrieved from the cache and - * then checked for inclusion using the {@link LongBitSet}. - * Since all testing is done using RAM resident data - * structures, performance should be very fast, most likely - * fast enough to not require further caching of the - * DocIdSet for each possible combination of terms. - * However, because docIDs are simply scanned linearly, an - * index with a great many small documents may find this - * linear scan too costly. + *

    With each search, this query translates the specified set of Terms into a private {@link + * LongBitSet} keyed by term number per unique {@link IndexReader} (normally one reader per + * segment). Then, during matching, the term number for each docID is retrieved from the cache and + * then checked for inclusion using the {@link LongBitSet}. Since all testing is done using RAM + * resident data structures, performance should be very fast, most likely fast enough to not require + * further caching of the DocIdSet for each possible combination of terms. However, because docIDs + * are simply scanned linearly, an index with a great many small documents may find this linear scan + * too costly. * - *

    - * In contrast, TermsQuery builds up an {@link FixedBitSet}, - * keyed by docID, every time it's created, by enumerating - * through all matching docs using {@link org.apache.lucene.index.PostingsEnum} to seek - * and scan through each term's docID list. While there is - * no linear scan of all docIDs, besides the allocation of - * the underlying array in the {@link FixedBitSet}, this - * approach requires a number of "disk seeks" in proportion - * to the number of terms, which can be exceptionally costly - * when there are cache misses in the OS's IO cache. + *

    In contrast, TermsQuery builds up an {@link FixedBitSet}, keyed by docID, every time it's + * created, by enumerating through all matching docs using {@link + * org.apache.lucene.index.PostingsEnum} to seek and scan through each term's docID list. While + * there is no linear scan of all docIDs, besides the allocation of the underlying array in the + * {@link FixedBitSet}, this approach requires a number of "disk seeks" in proportion to the number + * of terms, which can be exceptionally costly when there are cache misses in the OS's IO cache. * - *

    - * Generally, this filter will be slower on the first - * invocation for a given field, but subsequent invocations, - * even if you change the allowed set of Terms, should be - * faster than TermsQuery, especially as the number of - * Terms being matched increases. If you are matching only - * a very small number of terms, and those terms in turn - * match a very small number of documents, TermsQuery may - * perform faster. + *

    Generally, this filter will be slower on the first invocation for a given field, but + * subsequent invocations, even if you change the allowed set of Terms, should be faster than + * TermsQuery, especially as the number of Terms being matched increases. If you are matching only a + * very small number of terms, and those terms in turn match a very small number of documents, + * TermsQuery may perform faster. * - *

    - * Which query is best is very application dependent. + *

    Which query is best is very application dependent. * * @lucene.experimental */ public class DocValuesTermsQuery extends Query implements Accountable { - private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(DocValuesTermsQuery.class); + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(DocValuesTermsQuery.class); private final String field; private final PrefixCodedTerms termData; @@ -131,28 +111,29 @@ public class DocValuesTermsQuery extends Query implements Accountable { } public DocValuesTermsQuery(String field, String... terms) { - this(field, new AbstractList() { - @Override - public BytesRef get(int index) { - return new BytesRef(terms[index]); - } - @Override - public int size() { - return terms.length; - } - }); + this( + field, + new AbstractList() { + @Override + public BytesRef get(int index) { + return new BytesRef(terms[index]); + } + + @Override + public int size() { + return terms.length; + } + }); } @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } private boolean equalsTo(DocValuesTermsQuery other) { // termData might be heavy to compare so check the hash code first - return termDataHashCode == other.termDataHashCode && - termData.equals(other.termData); + return termDataHashCode == other.termDataHashCode && termData.equals(other.termData); } @Override @@ -176,25 +157,21 @@ public class DocValuesTermsQuery extends Query implements Accountable { return builder.toString(); } - /** - * @return the name of the field searched by this query. - */ + /** @return the name of the field searched by this query. */ public String getField() { return field; } - /** - * @return the terms looked up by this query, prefix-encoded. - */ + /** @return the terms looked up by this query, prefix-encoded. */ public PrefixCodedTerms getTerms() { return termData; } @Override public long ramBytesUsed() { - return BASE_RAM_BYTES + - RamUsageEstimator.sizeOfObject(field) + - RamUsageEstimator.sizeOfObject(termData); + return BASE_RAM_BYTES + + RamUsageEstimator.sizeOfObject(field) + + RamUsageEstimator.sizeOfObject(termData); } @Override @@ -205,7 +182,8 @@ public class DocValuesTermsQuery extends Query implements Accountable { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { return new ConstantScoreWeight(this, boost) { @Override @@ -224,32 +202,35 @@ public class DocValuesTermsQuery extends Query implements Accountable { if (matchesAtLeastOneTerm == false) { return null; } - return new ConstantScoreScorer(this, score(), scoreMode, new TwoPhaseIterator(values) { + return new ConstantScoreScorer( + this, + score(), + scoreMode, + new TwoPhaseIterator(values) { - @Override - public boolean matches() throws IOException { - for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { - if (bits.get(ord)) { - return true; + @Override + public boolean matches() throws IOException { + for (long ord = values.nextOrd(); + ord != SortedSetDocValues.NO_MORE_ORDS; + ord = values.nextOrd()) { + if (bits.get(ord)) { + return true; + } + } + return false; } - } - return false; - } - @Override - public float matchCost() { - return 3; // lookup in a bitset - } - - }); + @Override + public float matchCost() { + return 3; // lookup in a bitset + } + }); } @Override public boolean isCacheable(LeafReaderContext ctx) { return DocValues.isCacheable(ctx, field); } - }; } - } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/IndexSortSortedNumericDocValuesRangeQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/IndexSortSortedNumericDocValuesRangeQuery.java index 14382d881bd..a43debdfa4c 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/IndexSortSortedNumericDocValuesRangeQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/IndexSortSortedNumericDocValuesRangeQuery.java @@ -18,7 +18,6 @@ package org.apache.lucene.sandbox.search; import java.io.IOException; import java.util.Objects; - import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; @@ -40,24 +39,25 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; /** - * A range query that can take advantage of the fact that the index is sorted to speed up - * execution. If the index is sorted on the same field as the query, it performs binary - * search on the field's numeric doc values to find the documents at the lower and upper - * ends of the range. + * A range query that can take advantage of the fact that the index is sorted to speed up execution. + * If the index is sorted on the same field as the query, it performs binary search on the field's + * numeric doc values to find the documents at the lower and upper ends of the range. + * + *

    This optimized execution strategy is only used if the following conditions hold: * - * This optimized execution strategy is only used if the following conditions hold: *

      - *
    • The index is sorted, and its primary sort is on the same field as the query. - *
    • The query field has either {@link SortedNumericDocValues} or {@link NumericDocValues}. - *
    • The segments must have at most one field value per document (otherwise we cannot easily - * determine the matching document IDs through a binary search). + *
    • The index is sorted, and its primary sort is on the same field as the query. + *
    • The query field has either {@link SortedNumericDocValues} or {@link NumericDocValues}. + *
    • The segments must have at most one field value per document (otherwise we cannot easily + * determine the matching document IDs through a binary search). *
    * * If any of these conditions isn't met, the search is delegated to {@code fallbackQuery}. * - * This fallback must be an equivalent range query -- it should produce the same documents and give - * constant scores. As an example, an {@link IndexSortSortedNumericDocValuesRangeQuery} might be - * constructed as follows: + *

    This fallback must be an equivalent range query -- it should produce the same documents and + * give constant scores. As an example, an {@link IndexSortSortedNumericDocValuesRangeQuery} might + * be constructed as follows: + * *

      *   String field = "field";
      *   long lowerValue = 0, long upperValue = 10;
    @@ -82,11 +82,9 @@ public class IndexSortSortedNumericDocValuesRangeQuery extends Query {
        * @param lowerValue The lower end of the range (inclusive).
        * @param upperValue The upper end of the range (exclusive).
        * @param fallbackQuery A query to fall back to if the optimization cannot be applied.
    -      */
    -  public IndexSortSortedNumericDocValuesRangeQuery(String field,
    -                                                   long lowerValue,
    -                                                   long upperValue,
    -                                                   Query fallbackQuery) {
    +   */
    +  public IndexSortSortedNumericDocValuesRangeQuery(
    +      String field, long lowerValue, long upperValue, Query fallbackQuery) {
         this.field = Objects.requireNonNull(field);
         this.lowerValue = lowerValue;
         this.upperValue = upperValue;
    @@ -102,10 +100,10 @@ public class IndexSortSortedNumericDocValuesRangeQuery extends Query {
         if (this == o) return true;
         if (o == null || getClass() != o.getClass()) return false;
         IndexSortSortedNumericDocValuesRangeQuery that = (IndexSortSortedNumericDocValuesRangeQuery) o;
    -    return lowerValue == that.lowerValue &&
    -        upperValue == that.upperValue &&
    -        Objects.equals(field, that.field) &&
    -        Objects.equals(fallbackQuery, that.fallbackQuery);
    +    return lowerValue == that.lowerValue
    +        && upperValue == that.upperValue
    +        && Objects.equals(field, that.field)
    +        && Objects.equals(fallbackQuery, that.fallbackQuery);
       }
     
       @Override
    @@ -127,8 +125,7 @@ public class IndexSortSortedNumericDocValuesRangeQuery extends Query {
         if (this.field.equals(field) == false) {
           b.append(this.field).append(":");
         }
    -    return b
    -        .append("[")
    +    return b.append("[")
             .append(lowerValue)
             .append(" TO ")
             .append(upperValue)
    @@ -152,13 +149,15 @@ public class IndexSortSortedNumericDocValuesRangeQuery extends Query {
       }
     
       @Override
    -  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
    +  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
    +      throws IOException {
         Weight fallbackWeight = fallbackQuery.createWeight(searcher, scoreMode, boost);
     
         return new ConstantScoreWeight(this, boost) {
           @Override
           public Scorer scorer(LeafReaderContext context) throws IOException {
    -        SortedNumericDocValues sortedNumericValues = DocValues.getSortedNumeric(context.reader(), field);
    +        SortedNumericDocValues sortedNumericValues =
    +            DocValues.getSortedNumeric(context.reader(), field);
             NumericDocValues numericValues = DocValues.unwrapSingleton(sortedNumericValues);
     
             if (numericValues != null) {
    @@ -185,20 +184,20 @@ public class IndexSortSortedNumericDocValuesRangeQuery extends Query {
       }
     
       /**
    -   * Computes the document IDs that lie within the range [lowerValue, upperValue] by
    -   * performing binary search on the field's doc values.
    +   * Computes the document IDs that lie within the range [lowerValue, upperValue] by performing
    +   * binary search on the field's doc values.
        *
    -   * Because doc values only allow forward iteration, we need to reload the field comparator
    +   * 

    Because doc values only allow forward iteration, we need to reload the field comparator * every time the binary search accesses an earlier element. * - * We must also account for missing values when performing the binary search. For this - * reason, we load the {@link FieldComparator} instead of checking the docvalues directly. - * The returned {@link DocIdSetIterator} makes sure to wrap the original docvalues to skip - * over documents with no value. + *

    We must also account for missing values when performing the binary search. For this reason, + * we load the {@link FieldComparator} instead of checking the docvalues directly. The returned + * {@link DocIdSetIterator} makes sure to wrap the original docvalues to skip over documents with + * no value. */ - private DocIdSetIterator getDocIdSetIterator(SortField sortField, - LeafReaderContext context, - DocIdSetIterator delegate) throws IOException { + private DocIdSetIterator getDocIdSetIterator( + SortField sortField, LeafReaderContext context, DocIdSetIterator delegate) + throws IOException { long lower = sortField.getReverse() ? upperValue : lowerValue; long upper = sortField.getReverse() ? lowerValue : upperValue; int maxDoc = context.reader().maxDoc(); @@ -240,16 +239,13 @@ public class IndexSortSortedNumericDocValuesRangeQuery extends Query { return new BoundedDocSetIdIterator(firstDocIdInclusive, lastDocIdExclusive, delegate); } - /** - * Compares the given document's value with a stored reference value. - */ + /** Compares the given document's value with a stored reference value. */ private interface ValueComparator { int compare(int docID) throws IOException; } - private static ValueComparator loadComparator(SortField sortField, - long topValue, - LeafReaderContext context) throws IOException { + private static ValueComparator loadComparator( + SortField sortField, long topValue, LeafReaderContext context) throws IOException { @SuppressWarnings("unchecked") FieldComparator fieldComparator = (FieldComparator) sortField.getComparator(1, 0); fieldComparator.setTopValue(topValue); @@ -264,8 +260,8 @@ public class IndexSortSortedNumericDocValuesRangeQuery extends Query { } /** - * A doc ID set iterator that wraps a delegate iterator and only returns doc IDs in - * the range [firstDocInclusive, lastDoc). + * A doc ID set iterator that wraps a delegate iterator and only returns doc IDs in the range + * [firstDocInclusive, lastDoc). */ private static class BoundedDocSetIdIterator extends DocIdSetIterator { private final int firstDoc; @@ -274,9 +270,7 @@ public class IndexSortSortedNumericDocValuesRangeQuery extends Query { private int docID = -1; - BoundedDocSetIdIterator(int firstDoc, - int lastDoc, - DocIdSetIterator delegate) { + BoundedDocSetIdIterator(int firstDoc, int lastDoc, DocIdSetIterator delegate) { this.firstDoc = firstDoc; this.lastDoc = lastDoc; this.delegate = delegate; diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/LargeNumHitsTopDocsCollector.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/LargeNumHitsTopDocsCollector.java index ff5788a56f3..e0f8ff6acff 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/LargeNumHitsTopDocsCollector.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/LargeNumHitsTopDocsCollector.java @@ -17,12 +17,13 @@ package org.apache.lucene.sandbox.search; +import static org.apache.lucene.search.TopDocsCollector.EMPTY_TOPDOCS; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.HitQueue; @@ -34,14 +35,10 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.TotalHits; -import static org.apache.lucene.search.TopDocsCollector.EMPTY_TOPDOCS; - /** - * Optimized collector for large number of hits. - * The collector maintains an ArrayList of hits until it accumulates - * the requested number of hits. Post that, it builds a Priority Queue - * and starts filtering further hits based on the minimum competitive - * score. + * Optimized collector for large number of hits. The collector maintains an ArrayList of hits until + * it accumulates the requested number of hits. Post that, it builds a Priority Queue and starts + * filtering further hits based on the minimum competitive score. */ public final class LargeNumHitsTopDocsCollector implements Collector { private final int requestedHitCount; @@ -127,8 +124,8 @@ public final class LargeNumHitsTopDocsCollector implements Collector { } /** - * Populates the results array with the ScoreDoc instances. This can be - * overridden in case a different ScoreDoc type should be returned. + * Populates the results array with the ScoreDoc instances. This can be overridden in case a + * different ScoreDoc type should be returned. */ protected void populateResults(ScoreDoc[] results, int howMany) { if (pq != null) { @@ -141,8 +138,11 @@ public final class LargeNumHitsTopDocsCollector implements Collector { // Total number of hits collected were less than requestedHitCount assert totalHits < requestedHitCount; - Collections.sort(hits, Comparator.comparing((ScoreDoc scoreDoc) -> - scoreDoc.score).reversed().thenComparing(scoreDoc -> scoreDoc.doc)); + Collections.sort( + hits, + Comparator.comparing((ScoreDoc scoreDoc) -> scoreDoc.score) + .reversed() + .thenComparing(scoreDoc -> scoreDoc.doc)); for (int i = 0; i < howMany; i++) { results[i] = hits.get(i); @@ -150,13 +150,14 @@ public final class LargeNumHitsTopDocsCollector implements Collector { } /** - * Returns a {@link TopDocs} instance containing the given results. If - * results is null it means there are no results to return, - * either because there were 0 calls to collect() or because the arguments to - * topDocs were invalid. + * Returns a {@link TopDocs} instance containing the given results. If results is + * null it means there are no results to return, either because there were 0 calls to collect() or + * because the arguments to topDocs were invalid. */ protected TopDocs newTopDocs(ScoreDoc[] results) { - return results == null ? EMPTY_TOPDOCS : new TopDocs(new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), results); + return results == null + ? EMPTY_TOPDOCS + : new TopDocs(new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), results); } /** Returns the top docs that were collected by this collector. */ diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/LatLonPointPrototypeQueries.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/LatLonPointPrototypeQueries.java index cbb1698e562..ee98488249b 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/LatLonPointPrototypeQueries.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/LatLonPointPrototypeQueries.java @@ -19,7 +19,6 @@ package org.apache.lucene.sandbox.search; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.codecs.lucene86.Lucene86PointsFormat; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.LatLonPoint; @@ -39,41 +38,46 @@ import org.apache.lucene.util.bkd.BKDReader; /** * Holder class for prototype sandboxed queries * - * When the query graduates from sandbox, these static calls should be - * placed in {@link LatLonPoint} + *

    When the query graduates from sandbox, these static calls should be placed in {@link + * LatLonPoint} * * @lucene.experimental */ public class LatLonPointPrototypeQueries { - // no instance - private LatLonPointPrototypeQueries() { - } + // no instance + private LatLonPointPrototypeQueries() {} /** - * Finds the {@code n} nearest indexed points to the provided point, according to Haversine distance. - *

    - * This is functionally equivalent to running {@link MatchAllDocsQuery} with a {@link LatLonDocValuesField#newDistanceSort}, - * but is far more efficient since it takes advantage of properties the indexed BKD tree. Currently this - * only works with {@link Lucene86PointsFormat} (used by the default codec). Multi-valued fields are - * currently not de-duplicated, so if a document had multiple instances of the specified field that - * make it into the top n, that document will appear more than once. - *

    - * Documents are ordered by ascending distance from the location. The value returned in {@link FieldDoc} for - * the hits contains a Double instance with the distance in meters. + * Finds the {@code n} nearest indexed points to the provided point, according to Haversine + * distance. + * + *

    This is functionally equivalent to running {@link MatchAllDocsQuery} with a {@link + * LatLonDocValuesField#newDistanceSort}, but is far more efficient since it takes advantage of + * properties the indexed BKD tree. Currently this only works with {@link Lucene86PointsFormat} + * (used by the default codec). Multi-valued fields are currently not de-duplicated, so if a + * document had multiple instances of the specified field that make it into the top n, that + * document will appear more than once. + * + *

    Documents are ordered by ascending distance from the location. The value returned in {@link + * FieldDoc} for the hits contains a Double instance with the distance in meters. * * @param searcher IndexSearcher to find nearest points from. * @param field field name. must not be null. * @param latitude latitude at the center: must be within standard +/-90 coordinate bounds. * @param longitude longitude at the center: must be within standard +/-180 coordinate bounds. * @param n the number of nearest neighbors to retrieve. - * @return TopFieldDocs containing documents ordered by distance, where the field value for each {@link FieldDoc} is the distance in meters - * @throws IllegalArgumentException if the underlying PointValues is not a {@code Lucene60PointsReader} (this is a current limitation), or - * if {@code field} or {@code searcher} is null, or if {@code latitude}, {@code longitude} or {@code n} are out-of-bounds + * @return TopFieldDocs containing documents ordered by distance, where the field value for each + * {@link FieldDoc} is the distance in meters + * @throws IllegalArgumentException if the underlying PointValues is not a {@code + * Lucene60PointsReader} (this is a current limitation), or if {@code field} or {@code + * searcher} is null, or if {@code latitude}, {@code longitude} or {@code n} are out-of-bounds * @throws IOException if an IOException occurs while finding the points. */ // TODO: what about multi-valued documents? what happens? - public static TopFieldDocs nearest(IndexSearcher searcher, String field, double latitude, double longitude, int n) throws IOException { + public static TopFieldDocs nearest( + IndexSearcher searcher, String field, double latitude, double longitude, int n) + throws IOException { GeoUtils.checkLatitude(latitude); GeoUtils.checkLongitude(longitude); if (n < 1) { @@ -89,11 +93,12 @@ public class LatLonPointPrototypeQueries { List docBases = new ArrayList<>(); List liveDocs = new ArrayList<>(); int totalHits = 0; - for(LeafReaderContext leaf : searcher.getIndexReader().leaves()) { + for (LeafReaderContext leaf : searcher.getIndexReader().leaves()) { PointValues points = leaf.reader().getPointValues(field); if (points != null) { if (points instanceof BKDReader == false) { - throw new IllegalArgumentException("can only run on Lucene60PointsReader points implementation, but got " + points); + throw new IllegalArgumentException( + "can only run on Lucene60PointsReader points implementation, but got " + points); } totalHits += points.getDocCount(); BKDReader reader = (BKDReader) points; @@ -105,11 +110,12 @@ public class LatLonPointPrototypeQueries { } } - NearestNeighbor.NearestHit[] hits = NearestNeighbor.nearest(latitude, longitude, readers, liveDocs, docBases, n); + NearestNeighbor.NearestHit[] hits = + NearestNeighbor.nearest(latitude, longitude, readers, liveDocs, docBases, n); // Convert to TopFieldDocs: ScoreDoc[] scoreDocs = new ScoreDoc[hits.length]; - for(int i=0;i implements Accountable { - private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(LongHashSet.class); + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(LongHashSet.class); private static final long MISSING = Long.MIN_VALUE; @@ -119,8 +119,7 @@ final class LongHashSet extends AbstractSet implements Accountable { @Override public long ramBytesUsed() { - return BASE_RAM_BYTES + - RamUsageEstimator.sizeOfObject(table); + return BASE_RAM_BYTES + RamUsageEstimator.sizeOfObject(table); } @Override @@ -158,8 +157,6 @@ final class LongHashSet extends AbstractSet implements Accountable { hasNext = false; return value; } - }; } - } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/MultiNormsLeafSimScorer.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/MultiNormsLeafSimScorer.java index d17ac05ce85..9fa8f190365 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/MultiNormsLeafSimScorer.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/MultiNormsLeafSimScorer.java @@ -16,12 +16,13 @@ */ package org.apache.lucene.sandbox.search; +import static org.apache.lucene.sandbox.search.BM25FQuery.FieldAndWeight; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Objects; - import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.Explanation; @@ -29,15 +30,9 @@ import org.apache.lucene.search.LeafSimScorer; import org.apache.lucene.search.similarities.Similarity.SimScorer; import org.apache.lucene.util.SmallFloat; -import static org.apache.lucene.sandbox.search.BM25FQuery.FieldAndWeight; - -/** - * Copy of {@link LeafSimScorer} that sums document's norms from multiple fields. - */ +/** Copy of {@link LeafSimScorer} that sums document's norms from multiple fields. */ final class MultiNormsLeafSimScorer { - /** - * Cache of decoded norms. - */ + /** Cache of decoded norms. */ private static final float[] LENGTH_TABLE = new float[256]; static { @@ -49,11 +44,13 @@ final class MultiNormsLeafSimScorer { private final SimScorer scorer; private final NumericDocValues norms; - /** - * Sole constructor: Score documents of {@code reader} with {@code scorer}. - * - */ - MultiNormsLeafSimScorer(SimScorer scorer, LeafReader reader, Collection normFields, boolean needsScores) throws IOException { + /** Sole constructor: Score documents of {@code reader} with {@code scorer}. */ + MultiNormsLeafSimScorer( + SimScorer scorer, + LeafReader reader, + Collection normFields, + boolean needsScores) + throws IOException { this.scorer = Objects.requireNonNull(scorer); if (needsScores) { final List normsList = new ArrayList<>(); @@ -92,16 +89,22 @@ final class MultiNormsLeafSimScorer { } } - /** Score the provided document assuming the given term document frequency. - * This method must be called on non-decreasing sequences of doc ids. - * @see SimScorer#score(float, long) */ + /** + * Score the provided document assuming the given term document frequency. This method must be + * called on non-decreasing sequences of doc ids. + * + * @see SimScorer#score(float, long) + */ public float score(int doc, float freq) throws IOException { return scorer.score(freq, getNormValue(doc)); } - /** Explain the score for the provided document assuming the given term document frequency. - * This method must be called on non-decreasing sequences of doc ids. - * @see SimScorer#explain(Explanation, long) */ + /** + * Explain the score for the provided document assuming the given term document frequency. This + * method must be called on non-decreasing sequences of doc ids. + * + * @see SimScorer#explain(Explanation, long) + */ public Explanation explain(int doc, Explanation freqExpl) throws IOException { return scorer.explain(freqExpl, getNormValue(doc)); } @@ -128,7 +131,8 @@ final class MultiNormsLeafSimScorer { for (int i = 0; i < normsArr.length; i++) { boolean found = normsArr[i].advanceExact(target); assert found; - normValue += weightArr[i] * LENGTH_TABLE[Byte.toUnsignedInt((byte) normsArr[i].longValue())]; + normValue += + weightArr[i] * LENGTH_TABLE[Byte.toUnsignedInt((byte) normsArr[i].longValue())]; } current = SmallFloat.intToByte4(Math.round(normValue)); return true; diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/MultiRangeQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/MultiRangeQuery.java index 531f6e0adcc..ebc6b258669 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/MultiRangeQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/MultiRangeQuery.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; - import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PointValues; @@ -40,15 +39,14 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.DocIdSetBuilder; /** - * Abstract class for range queries involving multiple ranges against physical points such as {@code IntPoints} - * All ranges are logically ORed together - * TODO: Add capability for handling overlapping ranges at rewrite time + * Abstract class for range queries involving multiple ranges against physical points such as {@code + * IntPoints} All ranges are logically ORed together TODO: Add capability for handling overlapping + * ranges at rewrite time + * * @lucene.experimental */ public abstract class MultiRangeQuery extends Query { - /** - * Representation of a single clause in a MultiRangeQuery - */ + /** Representation of a single clause in a MultiRangeQuery */ public static class RangeClause { byte[] lowerValue; byte[] upperValue; @@ -60,7 +58,7 @@ public abstract class MultiRangeQuery extends Query { } /** A builder for multirange queries. */ - public static abstract class Builder { + public abstract static class Builder { protected final String field; protected final int bytesPerDim; @@ -84,29 +82,29 @@ public abstract class MultiRangeQuery extends Query { this.numDims = numDims; } - /** - * Add a new clause to this {@link Builder}. - */ + /** Add a new clause to this {@link Builder}. */ public Builder add(RangeClause clause) { clauses.add(clause); return this; } - /** - * Add a new clause to this {@link Builder}. - */ + /** Add a new clause to this {@link Builder}. */ public Builder add(byte[] lowerValue, byte[] upperValue) { checkArgs(lowerValue, upperValue); return add(new RangeClause(lowerValue, upperValue)); } - /** Create a new {@link MultiRangeQuery} based on the parameters that have - * been set on this builder. */ + /** + * Create a new {@link MultiRangeQuery} based on the parameters that have been set on this + * builder. + */ public abstract MultiRangeQuery build(); /** * Check preconditions for all factory methods - * @throws IllegalArgumentException if {@code field}, {@code lowerPoint} or {@code upperPoint} are null. + * + * @throws IllegalArgumentException if {@code field}, {@code lowerPoint} or {@code upperPoint} + * are null. */ private void checkArgs(Object lowerPoint, Object upperPoint) { if (lowerPoint == null) { @@ -129,7 +127,8 @@ public abstract class MultiRangeQuery extends Query { * @param rangeClauses Range Clauses for this query * @param numDims number of dimensions. */ - protected MultiRangeQuery(String field, int numDims, int bytesPerDim, List rangeClauses) { + protected MultiRangeQuery( + String field, int numDims, int bytesPerDim, List rangeClauses) { this.field = field; this.numDims = numDims; this.bytesPerDim = bytesPerDim; @@ -147,7 +146,8 @@ public abstract class MultiRangeQuery extends Query { * TODO: Organize ranges similar to how EdgeTree does, to avoid linear scan of ranges */ @Override - public final Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public final Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { // We don't use RandomAccessWeight here: it's no good to approximate with "match all docs". // This is an inverted structure and should be used in the first pass: @@ -175,8 +175,22 @@ public abstract class MultiRangeQuery extends Query { for (RangeClause rangeClause : rangeClauses) { for (int dim = 0; dim < numDims; dim++) { int offset = dim * bytesPerDim; - if ((Arrays.compareUnsigned(packedValue, offset, offset + bytesPerDim, rangeClause.lowerValue, offset, offset + bytesPerDim) >= 0) && - (Arrays.compareUnsigned(packedValue, offset, offset + bytesPerDim, rangeClause.upperValue, offset, offset + bytesPerDim) <= 0)) { + if ((Arrays.compareUnsigned( + packedValue, + offset, + offset + bytesPerDim, + rangeClause.lowerValue, + offset, + offset + bytesPerDim) + >= 0) + && (Arrays.compareUnsigned( + packedValue, + offset, + offset + bytesPerDim, + rangeClause.upperValue, + offset, + offset + bytesPerDim) + <= 0)) { // Doc is in-bounds. Add and short circuit adder.add(docID); return; @@ -192,22 +206,51 @@ public abstract class MultiRangeQuery extends Query { boolean crosses = false; /** - * CROSSES and INSIDE take priority over OUTSIDE. How we calculate the position is: - * 1) If any range sees the point as inside, return INSIDE. - * 2) If no range sees the point as inside and atleast one range sees the point as CROSSES, return CROSSES - * 3) If none of the above, return OUTSIDE + * CROSSES and INSIDE take priority over OUTSIDE. How we calculate the position is: 1) + * If any range sees the point as inside, return INSIDE. 2) If no range sees the point + * as inside and atleast one range sees the point as CROSSES, return CROSSES 3) If none + * of the above, return OUTSIDE */ for (RangeClause rangeClause : rangeClauses) { for (int dim = 0; dim < numDims; dim++) { int offset = dim * bytesPerDim; - if ((Arrays.compareUnsigned(minPackedValue, offset, offset + bytesPerDim, rangeClause.lowerValue, offset, offset + bytesPerDim) >= 0) && - (Arrays.compareUnsigned(maxPackedValue, offset, offset + bytesPerDim, rangeClause.upperValue, offset, offset + bytesPerDim) <= 0)) { + if ((Arrays.compareUnsigned( + minPackedValue, + offset, + offset + bytesPerDim, + rangeClause.lowerValue, + offset, + offset + bytesPerDim) + >= 0) + && (Arrays.compareUnsigned( + maxPackedValue, + offset, + offset + bytesPerDim, + rangeClause.upperValue, + offset, + offset + bytesPerDim) + <= 0)) { return PointValues.Relation.CELL_INSIDE_QUERY; } - crosses |= Arrays.compareUnsigned(minPackedValue, offset, offset + bytesPerDim, rangeClause.lowerValue, offset, offset + bytesPerDim) < 0 || - Arrays.compareUnsigned(maxPackedValue, offset, offset + bytesPerDim, rangeClause.upperValue, offset, offset + bytesPerDim) > 0; + crosses |= + Arrays.compareUnsigned( + minPackedValue, + offset, + offset + bytesPerDim, + rangeClause.lowerValue, + offset, + offset + bytesPerDim) + < 0 + || Arrays.compareUnsigned( + maxPackedValue, + offset, + offset + bytesPerDim, + rangeClause.upperValue, + offset, + offset + bytesPerDim) + > 0; } } @@ -231,10 +274,22 @@ public abstract class MultiRangeQuery extends Query { } if (values.getNumIndexDimensions() != numDims) { - throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numIndexDimensions=" + values.getNumIndexDimensions() + " but this query has numDims=" + numDims); + throw new IllegalArgumentException( + "field=\"" + + field + + "\" was indexed with numIndexDimensions=" + + values.getNumIndexDimensions() + + " but this query has numDims=" + + numDims); } if (bytesPerDim != values.getBytesPerDimension()) { - throw new IllegalArgumentException("field=\"" + field + "\" was indexed with bytesPerDim=" + values.getBytesPerDimension() + " but this query has bytesPerDim=" + bytesPerDim); + throw new IllegalArgumentException( + "field=\"" + + field + + "\" was indexed with bytesPerDim=" + + values.getBytesPerDimension() + + " but this query has bytesPerDim=" + + bytesPerDim); } boolean allDocsMatch; @@ -245,8 +300,22 @@ public abstract class MultiRangeQuery extends Query { for (RangeClause rangeClause : rangeClauses) { for (int i = 0; i < numDims; ++i) { int offset = i * bytesPerDim; - if (Arrays.compareUnsigned(rangeClause.lowerValue, offset, offset + bytesPerDim, fieldPackedLower, offset, offset + bytesPerDim) > 0 - || Arrays.compareUnsigned(rangeClause.upperValue, offset, offset + bytesPerDim, fieldPackedUpper, offset, offset + bytesPerDim) < 0) { + if (Arrays.compareUnsigned( + rangeClause.lowerValue, + offset, + offset + bytesPerDim, + fieldPackedLower, + offset, + offset + bytesPerDim) + > 0 + || Arrays.compareUnsigned( + rangeClause.upperValue, + offset, + offset + bytesPerDim, + fieldPackedUpper, + offset, + offset + bytesPerDim) + < 0) { allDocsMatch = false; break; } @@ -262,7 +331,8 @@ public abstract class MultiRangeQuery extends Query { return new ScorerSupplier() { @Override public Scorer get(long leadCost) { - return new ConstantScoreScorer(weight, score(), scoreMode, DocIdSetIterator.all(reader.maxDoc())); + return new ConstantScoreScorer( + weight, score(), scoreMode, DocIdSetIterator.all(reader.maxDoc())); } @Override @@ -310,7 +380,6 @@ public abstract class MultiRangeQuery extends Query { public boolean isCacheable(LeafReaderContext ctx) { return true; } - }; } @@ -341,15 +410,14 @@ public abstract class MultiRangeQuery extends Query { @Override public final boolean equals(Object o) { - return sameClassAs(o) && - equalsTo(getClass().cast(o)); + return sameClassAs(o) && equalsTo(getClass().cast(o)); } private boolean equalsTo(MultiRangeQuery other) { - return Objects.equals(field, other.field) && - numDims == other.numDims && - bytesPerDim == other.bytesPerDim && - rangeClauses.equals(other.rangeClauses); + return Objects.equals(field, other.field) + && numDims == other.numDims + && bytesPerDim == other.bytesPerDim + && rangeClauses.equals(other.rangeClauses); } @Override @@ -375,9 +443,17 @@ public abstract class MultiRangeQuery extends Query { int startOffset = bytesPerDim * i; sb.append('['); - sb.append(toString(i, ArrayUtil.copyOfSubArray(rangeClause.lowerValue, startOffset, startOffset + bytesPerDim))); + sb.append( + toString( + i, + ArrayUtil.copyOfSubArray( + rangeClause.lowerValue, startOffset, startOffset + bytesPerDim))); sb.append(" TO "); - sb.append(toString(i, ArrayUtil.copyOfSubArray(rangeClause.upperValue, startOffset, startOffset + bytesPerDim))); + sb.append( + toString( + i, + ArrayUtil.copyOfSubArray( + rangeClause.upperValue, startOffset, startOffset + bytesPerDim))); sb.append(']'); } sb.append('}'); @@ -388,8 +464,8 @@ public abstract class MultiRangeQuery extends Query { } /** - * Returns a string of a single value in a human-readable format for debugging. - * This is used by {@link #toString()}. + * Returns a string of a single value in a human-readable format for debugging. This is used by + * {@link #toString()}. * * @param dimension dimension of the particular value * @param value single value, never null diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/NearestNeighbor.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/NearestNeighbor.java index a1a59e18922..e3b040586ac 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/NearestNeighbor.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/NearestNeighbor.java @@ -16,12 +16,14 @@ */ package org.apache.lucene.sandbox.search; +import static org.apache.lucene.geo.GeoEncodingUtils.decodeLatitude; +import static org.apache.lucene.geo.GeoEncodingUtils.decodeLongitude; + import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.PriorityQueue; - import org.apache.lucene.geo.Rectangle; import org.apache.lucene.index.PointValues.IntersectVisitor; import org.apache.lucene.index.PointValues.Relation; @@ -32,9 +34,6 @@ import org.apache.lucene.util.bkd.BKDReader; import org.apache.lucene.util.bkd.BKDReader.IndexTree; import org.apache.lucene.util.bkd.BKDReader.IntersectState; -import static org.apache.lucene.geo.GeoEncodingUtils.decodeLatitude; -import static org.apache.lucene.geo.GeoEncodingUtils.decodeLongitude; - /** * KNN search on top of 2D lat/lon indexed points. * @@ -49,13 +48,18 @@ class NearestNeighbor { final IndexTree index; /** - * The closest distance from a point in this cell to the query point, computed as a sort key through - * {@link SloppyMath#haversinSortKey}. Note that this is an approximation to the closest distance, - * and there could be a point in the cell that is closer. + * The closest distance from a point in this cell to the query point, computed as a sort key + * through {@link SloppyMath#haversinSortKey}. Note that this is an approximation to the closest + * distance, and there could be a point in the cell that is closer. */ final double distanceSortKey; - public Cell(IndexTree index, int readerIndex, byte[] minPacked, byte[] maxPacked, double distanceSortKey) { + public Cell( + IndexTree index, + int readerIndex, + byte[] minPacked, + byte[] maxPacked, + double distanceSortKey) { this.index = index; this.readerIndex = readerIndex; this.minPacked = minPacked.clone(); @@ -73,7 +77,23 @@ class NearestNeighbor { double minLon = decodeLongitude(minPacked, Integer.BYTES); double maxLat = decodeLatitude(maxPacked, 0); double maxLon = decodeLongitude(maxPacked, Integer.BYTES); - return "Cell(readerIndex=" + readerIndex + " nodeID=" + index.getNodeID() + " isLeaf=" + index.isLeafNode() + " lat=" + minLat + " TO " + maxLat + ", lon=" + minLon + " TO " + maxLon + "; distanceSortKey=" + distanceSortKey + ")"; + return "Cell(readerIndex=" + + readerIndex + + " nodeID=" + + index.getNodeID() + + " isLeaf=" + + index.isLeafNode() + + " lat=" + + minLat + + " TO " + + maxLat + + ", lon=" + + minLon + + " TO " + + maxLon + + "; distanceSortKey=" + + distanceSortKey + + ")"; } } @@ -95,7 +115,8 @@ class NearestNeighbor { // second set of longitude ranges to check (for cross-dateline case) private double minLon2 = Double.POSITIVE_INFINITY; - public NearestVisitor(PriorityQueue hitQueue, int topN, double pointLat, double pointLon) { + public NearestVisitor( + PriorityQueue hitQueue, int topN, double pointLat, double pointLon) { this.hitQueue = hitQueue; this.topN = topN; this.pointLat = pointLat; @@ -110,9 +131,10 @@ class NearestNeighbor { private void maybeUpdateBBox() { if (setBottomCounter < 1024 || (setBottomCounter & 0x3F) == 0x3F) { NearestHit hit = hitQueue.peek(); - Rectangle box = Rectangle.fromPointDistance(pointLat, pointLon, - SloppyMath.haversinMeters(hit.distanceSortKey)); - //System.out.println(" update bbox to " + box); + Rectangle box = + Rectangle.fromPointDistance( + pointLat, pointLon, SloppyMath.haversinMeters(hit.distanceSortKey)); + // System.out.println(" update bbox to " + box); minLat = box.minLat; maxLat = box.maxLat; if (box.crossesDateline()) { @@ -133,7 +155,7 @@ class NearestNeighbor { @Override public void visit(int docID, byte[] packedValue) { - //System.out.println("visit docID=" + docID + " liveDocs=" + curLiveDocs); + // System.out.println("visit docID=" + docID + " liveDocs=" + curLiveDocs); if (curLiveDocs != null && curLiveDocs.get(docID) == false) { return; @@ -150,33 +172,37 @@ class NearestNeighbor { return; } - // Use the haversin sort key when comparing hits, as it is faster to compute than the true distance. - double distanceSortKey = SloppyMath.haversinSortKey(pointLat, pointLon, docLatitude, docLongitude); + // Use the haversin sort key when comparing hits, as it is faster to compute than the true + // distance. + double distanceSortKey = + SloppyMath.haversinSortKey(pointLat, pointLon, docLatitude, docLongitude); - //System.out.println(" visit docID=" + docID + " distanceSortKey=" + distanceSortKey + " docLat=" + docLatitude + " docLon=" + docLongitude); + // System.out.println(" visit docID=" + docID + " distanceSortKey=" + distanceSortKey + " + // docLat=" + docLatitude + " docLon=" + docLongitude); int fullDocID = curDocBase + docID; if (hitQueue.size() == topN) { // queue already full NearestHit hit = hitQueue.peek(); - //System.out.println(" bottom distanceSortKey=" + hit.distanceSortKey); + // System.out.println(" bottom distanceSortKey=" + hit.distanceSortKey); // we don't collect docs in order here, so we must also test the tie-break case ourselves: - if (distanceSortKey < hit.distanceSortKey || (distanceSortKey == hit.distanceSortKey && fullDocID < hit.docID)) { + if (distanceSortKey < hit.distanceSortKey + || (distanceSortKey == hit.distanceSortKey && fullDocID < hit.docID)) { hitQueue.poll(); hit.docID = fullDocID; hit.distanceSortKey = distanceSortKey; hitQueue.offer(hit); - //System.out.println(" ** keep2, now bottom=" + hit); + // System.out.println(" ** keep2, now bottom=" + hit); maybeUpdateBBox(); } - + } else { NearestHit hit = new NearestHit(); hit.docID = fullDocID; hit.distanceSortKey = distanceSortKey; hitQueue.offer(hit); - //System.out.println(" ** keep1, now bottom=" + hit); + // System.out.println(" ** keep1, now bottom=" + hit); } } @@ -187,7 +213,9 @@ class NearestNeighbor { double cellMaxLat = decodeLatitude(maxPackedValue, 0); double cellMaxLon = decodeLongitude(maxPackedValue, Integer.BYTES); - if (cellMaxLat < minLat || maxLat < cellMinLat || ((cellMaxLon < minLon || maxLon < cellMinLon) && cellMaxLon < minLon2)) { + if (cellMaxLat < minLat + || maxLat < cellMinLat + || ((cellMaxLon < minLon || maxLon < cellMinLon) && cellMaxLon < minLon2)) { // this cell is outside our search bbox; don't bother exploring any more return Relation.CELL_OUTSIDE_QUERY; } @@ -200,7 +228,8 @@ class NearestNeighbor { public int docID; /** - * The distance from the hit to the query point, computed as a sort key through {@link SloppyMath#haversinSortKey}. + * The distance from the hit to the query point, computed as a sort key through {@link + * SloppyMath#haversinSortKey}. */ public double distanceSortKey; @@ -210,27 +239,39 @@ class NearestNeighbor { } } - // TODO: can we somehow share more with, or simply directly use, the LatLonPointDistanceComparator? It's really doing the same thing as + // TODO: can we somehow share more with, or simply directly use, the + // LatLonPointDistanceComparator? It's really doing the same thing as // our hitQueue... - public static NearestHit[] nearest(double pointLat, double pointLon, List readers, List liveDocs, List docBases, final int n) throws IOException { + public static NearestHit[] nearest( + double pointLat, + double pointLon, + List readers, + List liveDocs, + List docBases, + final int n) + throws IOException { - //System.out.println("NEAREST: readers=" + readers + " liveDocs=" + liveDocs + " pointLat=" + pointLat + " pointLon=" + pointLon); + // System.out.println("NEAREST: readers=" + readers + " liveDocs=" + liveDocs + " pointLat=" + + // pointLat + " pointLon=" + pointLon); // Holds closest collected points seen so far: // TODO: if we used lucene's PQ we could just updateTop instead of poll/offer: - final PriorityQueue hitQueue = new PriorityQueue<>(n, new Comparator() { - @Override - public int compare(NearestHit a, NearestHit b) { - // sort by opposite distanceSortKey natural order - int cmp = Double.compare(a.distanceSortKey, b.distanceSortKey); - if (cmp != 0) { - return -cmp; - } + final PriorityQueue hitQueue = + new PriorityQueue<>( + n, + new Comparator() { + @Override + public int compare(NearestHit a, NearestHit b) { + // sort by opposite distanceSortKey natural order + int cmp = Double.compare(a.distanceSortKey, b.distanceSortKey); + if (cmp != 0) { + return -cmp; + } - // tie-break by higher docID: - return b.docID - a.docID; - } - }); + // tie-break by higher docID: + return b.docID - a.docID; + } + }); // Holds all cells, sorted by closest to the point: PriorityQueue cellQueue = new PriorityQueue<>(); @@ -240,68 +281,99 @@ class NearestNeighbor { // Add root cell for each reader into the queue: int bytesPerDim = -1; - - for(int i=0;i 0) { Cell cell = cellQueue.poll(); - //System.out.println(" visit " + cell); + // System.out.println(" visit " + cell); - // TODO: if we replace approxBestDistance with actualBestDistance, we can put an opto here to break once this "best" cell is fully outside of the hitQueue bottom's radius: + // TODO: if we replace approxBestDistance with actualBestDistance, we can put an opto here to + // break once this "best" cell is fully outside of the hitQueue bottom's radius: BKDReader reader = readers.get(cell.readerIndex); if (cell.index.isLeafNode()) { - //System.out.println(" leaf"); + // System.out.println(" leaf"); // Leaf block: visit all points and possibly collect them: visitor.curDocBase = docBases.get(cell.readerIndex); visitor.curLiveDocs = liveDocs.get(cell.readerIndex); reader.visitLeafBlockValues(cell.index, states.get(cell.readerIndex)); - //System.out.println(" now " + hitQueue.size() + " hits"); + // System.out.println(" now " + hitQueue.size() + " hits"); } else { - //System.out.println(" non-leaf"); + // System.out.println(" non-leaf"); // Non-leaf block: split into two cells and put them back into the queue: if (visitor.compare(cell.minPacked, cell.maxPacked) == Relation.CELL_OUTSIDE_QUERY) { continue; } - + BytesRef splitValue = BytesRef.deepCopyOf(cell.index.getSplitDimValue()); int splitDim = cell.index.getSplitDim(); - + // we must clone the index so that we we can recurse left and right "concurrently": IndexTree newIndex = cell.index.clone(); byte[] splitPackedValue = cell.maxPacked.clone(); - System.arraycopy(splitValue.bytes, splitValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim); + System.arraycopy( + splitValue.bytes, + splitValue.offset, + splitPackedValue, + splitDim * bytesPerDim, + bytesPerDim); cell.index.pushLeft(); - cellQueue.offer(new Cell(cell.index, cell.readerIndex, cell.minPacked, splitPackedValue, - approxBestDistance(cell.minPacked, splitPackedValue, pointLat, pointLon))); + cellQueue.offer( + new Cell( + cell.index, + cell.readerIndex, + cell.minPacked, + splitPackedValue, + approxBestDistance(cell.minPacked, splitPackedValue, pointLat, pointLon))); splitPackedValue = cell.minPacked.clone(); - System.arraycopy(splitValue.bytes, splitValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim); + System.arraycopy( + splitValue.bytes, + splitValue.offset, + splitPackedValue, + splitDim * bytesPerDim, + bytesPerDim); newIndex.pushRight(); - cellQueue.offer(new Cell(newIndex, cell.readerIndex, splitPackedValue, cell.maxPacked, - approxBestDistance(splitPackedValue, cell.maxPacked, pointLat, pointLon))); + cellQueue.offer( + new Cell( + newIndex, + cell.readerIndex, + splitPackedValue, + cell.maxPacked, + approxBestDistance(splitPackedValue, cell.maxPacked, pointLat, pointLon))); } } NearestHit[] hits = new NearestHit[hitQueue.size()]; - int downTo = hitQueue.size()-1; + int downTo = hitQueue.size() - 1; while (hitQueue.size() != 0) { hits[downTo] = hitQueue.poll(); downTo--; @@ -311,7 +383,8 @@ class NearestNeighbor { } // NOTE: incoming args never cross the dateline, since they are a BKD cell - private static double approxBestDistance(byte[] minPackedValue, byte[] maxPackedValue, double pointLat, double pointLon) { + private static double approxBestDistance( + byte[] minPackedValue, byte[] maxPackedValue, double pointLat, double pointLon) { double minLat = decodeLatitude(minPackedValue, 0); double minLon = decodeLongitude(minPackedValue, Integer.BYTES); double maxLat = decodeLatitude(maxPackedValue, 0); @@ -320,10 +393,18 @@ class NearestNeighbor { } // NOTE: incoming args never cross the dateline, since they are a BKD cell - private static double approxBestDistance(double minLat, double maxLat, double minLon, double maxLon, double pointLat, double pointLon) { - - // TODO: can we make this the trueBestDistance? I.e., minimum distance between the point and ANY point on the box? we can speed things - // up if so, but not enrolling any BKD cell whose true best distance is > bottom of the current hit queue + private static double approxBestDistance( + double minLat, + double maxLat, + double minLon, + double maxLon, + double pointLat, + double pointLon) { + + // TODO: can we make this the trueBestDistance? I.e., minimum distance between the point and + // ANY point on the box? we can speed things + // up if so, but not enrolling any BKD cell whose true best distance is > bottom of the current + // hit queue if (pointLat >= minLat && pointLat <= maxLat && pointLon >= minLon && pointLon <= maxLon) { // point is inside the cell! @@ -336,5 +417,4 @@ class NearestNeighbor { double d4 = SloppyMath.haversinSortKey(pointLat, pointLon, maxLat, minLon); return Math.min(Math.min(d1, d2), Math.min(d3, d4)); } - } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/PhraseWildcardQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/PhraseWildcardQuery.java index 5fdac5bf42f..3743caab377 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/PhraseWildcardQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/PhraseWildcardQuery.java @@ -26,7 +26,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; @@ -60,33 +59,35 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.mutable.MutableValueBool; /** - * A generalized version of {@link PhraseQuery}, built with one or more {@link MultiTermQuery} - * that provides term expansions for multi-terms (one of the expanded terms must match). - *

    - * Its main advantage is to control the total number of expansions across all {@link MultiTermQuery} - * and across all segments. - *

    - * Use the {@link Builder} to build a {@link PhraseWildcardQuery}. - *

    - * This query is similar to {@link MultiPhraseQuery}, but it handles, controls and optimizes the + * A generalized version of {@link PhraseQuery}, built with one or more {@link MultiTermQuery} that + * provides term expansions for multi-terms (one of the expanded terms must match). + * + *

    Its main advantage is to control the total number of expansions across all {@link + * MultiTermQuery} and across all segments. + * + *

    Use the {@link Builder} to build a {@link PhraseWildcardQuery}. + * + *

    This query is similar to {@link MultiPhraseQuery}, but it handles, controls and optimizes the * multi-term expansions. - *

    - * This query is equivalent to building an ordered {@link org.apache.lucene.search.spans.SpanNearQuery} - * with a list of {@link org.apache.lucene.search.spans.SpanTermQuery} and - * {@link org.apache.lucene.search.spans.SpanMultiTermQueryWrapper}. - * But it optimizes the multi-term expansions and the segment accesses. - * It first resolves the single-terms to early stop if some does not match. Then - * it expands each multi-term sequentially, stopping immediately if one does not - * match. It detects the segments that do not match to skip them for the next - * expansions. This often avoid expanding the other multi-terms on some or - * even all segments. And finally it controls the total number of expansions. - *

    - * Immutable. + * + *

    This query is equivalent to building an ordered {@link + * org.apache.lucene.search.spans.SpanNearQuery} with a list of {@link + * org.apache.lucene.search.spans.SpanTermQuery} and {@link + * org.apache.lucene.search.spans.SpanMultiTermQueryWrapper}. But it optimizes the multi-term + * expansions and the segment accesses. It first resolves the single-terms to early stop if some + * does not match. Then it expands each multi-term sequentially, stopping immediately if one does + * not match. It detects the segments that do not match to skip them for the next expansions. This + * often avoid expanding the other multi-terms on some or even all segments. And finally it controls + * the total number of expansions. + * + *

    Immutable. + * * @lucene.experimental */ public class PhraseWildcardQuery extends Query { - protected static final Query NO_MATCH_QUERY = new MatchNoDocsQuery("Empty " + PhraseWildcardQuery.class.getSimpleName()); + protected static final Query NO_MATCH_QUERY = + new MatchNoDocsQuery("Empty " + PhraseWildcardQuery.class.getSimpleName()); protected final String field; protected final List phraseTerms; @@ -134,7 +135,8 @@ public class PhraseWildcardQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { IndexReader reader = searcher.getIndexReader(); // Build a list of segments ordered by terms size (number of terms). @@ -184,7 +186,14 @@ public class PhraseWildcardQuery extends Query { // Consider the remaining expansions allowed for all remaining multi-terms. // Divide it evenly to get the expansion limit for the current multi-term. int maxExpansionsForTerm = remainingExpansions / remainingMultiTerms; - int numExpansions = phraseTerm.collectTermData(this, searcher, sizeSortedSegments, remainingMultiTerms, maxExpansionsForTerm, termsData); + int numExpansions = + phraseTerm.collectTermData( + this, + searcher, + sizeSortedSegments, + remainingMultiTerms, + maxExpansionsForTerm, + termsData); assert numExpansions >= 0 && numExpansions <= maxExpansionsForTerm; if (numExpansions == 0) { // Early stop here because the multi-term does not match in any segment. @@ -200,16 +209,14 @@ public class PhraseWildcardQuery extends Query { assert remainingMultiTerms == 0; assert remainingExpansions >= 0; -// TestCounters.get().printTestCounters(termsData); + // TestCounters.get().printTestCounters(termsData); - return termsData.areAllTermsMatching() ? - createPhraseWeight(searcher, scoreMode, boost, termsData) + return termsData.areAllTermsMatching() + ? createPhraseWeight(searcher, scoreMode, boost, termsData) : noMatchWeight(); } - /** - * Creates new {@link TermsData}. - */ + /** Creates new {@link TermsData}. */ protected TermsData createTermsData(int numSegments) { return new TermsData(phraseTerms.size(), numSegments); } @@ -233,8 +240,9 @@ public class PhraseWildcardQuery extends Query { }; } - PhraseWeight createPhraseWeight(IndexSearcher searcher, ScoreMode scoreMode, - float boost, TermsData termsData) throws IOException { + PhraseWeight createPhraseWeight( + IndexSearcher searcher, ScoreMode scoreMode, float boost, TermsData termsData) + throws IOException { return new PhraseWeight(this, field, searcher, scoreMode) { @Override @@ -242,14 +250,18 @@ public class PhraseWildcardQuery extends Query { if (termsData.termStatsList.isEmpty()) { return null; } - return searcher.getSimilarity().scorer( - boost, - searcher.collectionStatistics(field), - termsData.termStatsList.toArray(new TermStatistics[0])); + return searcher + .getSimilarity() + .scorer( + boost, + searcher.collectionStatistics(field), + termsData.termStatsList.toArray(new TermStatistics[0])); } @Override - protected PhraseMatcher getPhraseMatcher(LeafReaderContext leafReaderContext, Similarity.SimScorer scorer, boolean exposeOffsets) throws IOException { + protected PhraseMatcher getPhraseMatcher( + LeafReaderContext leafReaderContext, Similarity.SimScorer scorer, boolean exposeOffsets) + throws IOException { Terms fieldTerms = leafReaderContext.reader().terms(field); if (fieldTerms == null) { return null; @@ -257,13 +269,15 @@ public class PhraseWildcardQuery extends Query { TermsEnum termsEnum = fieldTerms.iterator(); float totalMatchCost = 0; - PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[phraseTerms.size()]; + PhraseQuery.PostingsAndFreq[] postingsFreqs = + new PhraseQuery.PostingsAndFreq[phraseTerms.size()]; for (int termPosition = 0; termPosition < postingsFreqs.length; termPosition++) { TermData termData = termsData.getTermData(termPosition); assert termData != null; List termStates = termData.getTermStatesForSegment(leafReaderContext); if (termStates == null) { - // If the current phrase term does not match in the segment, then the phrase cannot match on the segment. + // If the current phrase term does not match in the segment, then the phrase cannot + // match on the segment. // So early stop by returning a null scorer. return null; } @@ -272,16 +286,26 @@ public class PhraseWildcardQuery extends Query { List postingsEnums = new ArrayList<>(termStates.size()); for (TermBytesTermState termBytesTermState : termStates) { termsEnum.seekExact(termBytesTermState.termBytes, termBytesTermState.termState); - postingsEnums.add(termsEnum.postings(null, exposeOffsets ? PostingsEnum.ALL : PostingsEnum.POSITIONS)); + postingsEnums.add( + termsEnum.postings( + null, exposeOffsets ? PostingsEnum.ALL : PostingsEnum.POSITIONS)); totalMatchCost += PhraseQuery.termPositionsCost(termsEnum); } PostingsEnum unionPostingsEnum; if (postingsEnums.size() == 1) { unionPostingsEnum = postingsEnums.get(0); } else { - unionPostingsEnum = exposeOffsets ? new MultiPhraseQuery.UnionFullPostingsEnum(postingsEnums) : new MultiPhraseQuery.UnionPostingsEnum(postingsEnums); + unionPostingsEnum = + exposeOffsets + ? new MultiPhraseQuery.UnionFullPostingsEnum(postingsEnums) + : new MultiPhraseQuery.UnionPostingsEnum(postingsEnums); } - postingsFreqs[termPosition] = new PhraseQuery.PostingsAndFreq(unionPostingsEnum, new SlowImpactsEnum(unionPostingsEnum), termPosition, termData.terms); + postingsFreqs[termPosition] = + new PhraseQuery.PostingsAndFreq( + unionPostingsEnum, + new SlowImpactsEnum(unionPostingsEnum), + termPosition, + termData.terms); } if (slop == 0) { @@ -289,7 +313,8 @@ public class PhraseWildcardQuery extends Query { ArrayUtil.timSort(postingsFreqs); return new ExactPhraseMatcher(postingsFreqs, scoreMode, scorer, totalMatchCost); } else { - return new SloppyPhraseMatcher(postingsFreqs, slop, scoreMode, scorer, totalMatchCost, exposeOffsets); + return new SloppyPhraseMatcher( + postingsFreqs, slop, scoreMode, scorer, totalMatchCost, exposeOffsets); } } }; @@ -337,8 +362,7 @@ public class PhraseWildcardQuery extends Query { } /** - * Collects the {@link TermState} and {@link TermStatistics} for a single-term - * without expansion. + * Collects the {@link TermState} and {@link TermStatistics} for a single-term without expansion. * * @param termsData receives the collected data. */ @@ -346,7 +370,8 @@ public class PhraseWildcardQuery extends Query { SingleTerm singleTerm, IndexSearcher searcher, List segments, - TermsData termsData) throws IOException { + TermsData termsData) + throws IOException { TermData termData = termsData.getOrCreateTermData(singleTerm.termPosition); Term term = singleTerm.term; termData.terms.add(term); @@ -366,7 +391,9 @@ public class PhraseWildcardQuery extends Query { if (termState != null) { termMatchesInSegment = true; numMatches++; - termData.setTermStatesForSegment(leafReaderContext, Collections.singletonList(new TermBytesTermState(term.bytes(), termState))); + termData.setTermStatesForSegment( + leafReaderContext, + Collections.singletonList(new TermBytesTermState(term.bytes(), termState))); } } if (!termMatchesInSegment && shouldOptimizeSegments()) { @@ -377,18 +404,18 @@ public class PhraseWildcardQuery extends Query { } // Collect the term stats across all segments. if (termStates.docFreq() > 0) { - termsData.termStatsList.add(searcher.termStatistics(term, termStates.docFreq(), termStates.totalTermFreq())); + termsData.termStatsList.add( + searcher.termStatistics(term, termStates.docFreq(), termStates.totalTermFreq())); } return numMatches; } /** - * Collects the {@link TermState} and {@link TermStatistics} for a multi-term - * with expansion. + * Collects the {@link TermState} and {@link TermStatistics} for a multi-term with expansion. * - * @param remainingMultiTerms the number of remaining multi-terms to process, - * including the current one, excluding the multi-terms already processed. - * @param termsData receives the collected data. + * @param remainingMultiTerms the number of remaining multi-terms to process, including the + * current one, excluding the multi-terms already processed. + * @param termsData receives the collected data. */ protected int collectMultiTermData( MultiTerm multiTerm, @@ -396,7 +423,8 @@ public class PhraseWildcardQuery extends Query { List segments, int remainingMultiTerms, // Unused here but leveraged by extending classes. int maxExpansionsForTerm, - TermsData termsData) throws IOException { + TermsData termsData) + throws IOException { TermData termData = termsData.getOrCreateTermData(multiTerm.termPosition); Map termStatsMap = createTermStatsMap(multiTerm); int numExpansions = 0; @@ -407,8 +435,13 @@ public class PhraseWildcardQuery extends Query { LeafReaderContext leafReaderContext = segmentIterator.next(); int remainingExpansions = maxExpansionsForTerm - numExpansions; assert remainingExpansions >= 0; - List termStates = collectMultiTermDataForSegment( - multiTerm, leafReaderContext, remainingExpansions, shouldStopSegmentIteration, termStatsMap); + List termStates = + collectMultiTermDataForSegment( + multiTerm, + leafReaderContext, + remainingExpansions, + shouldStopSegmentIteration, + termStatsMap); if (!termStates.isEmpty()) { assert termStates.size() <= remainingExpansions; @@ -432,29 +465,28 @@ public class PhraseWildcardQuery extends Query { return segmentOptimizationEnabled; } - /** - * Creates a {@link TermStats} map for a {@link MultiTerm}. - */ - protected Map createTermStatsMap(MultiTerm multiTerm) { // multiTerm param can be used by sub-classes. + /** Creates a {@link TermStats} map for a {@link MultiTerm}. */ + protected Map createTermStatsMap( + MultiTerm multiTerm) { // multiTerm param can be used by sub-classes. return new HashMap<>(); } /** - * Collects the {@link TermState} list and {@link TermStatistics} for a multi-term - * on a specific index segment. + * Collects the {@link TermState} list and {@link TermStatistics} for a multi-term on a specific + * index segment. * - * @param remainingExpansions the number of remaining expansions allowed - * for the segment. - * @param shouldStopSegmentIteration to be set to true to stop the segment - * iteration calling this method repeatedly. - * @param termStatsMap receives the collected {@link TermStats} across all segments. + * @param remainingExpansions the number of remaining expansions allowed for the segment. + * @param shouldStopSegmentIteration to be set to true to stop the segment iteration calling this + * method repeatedly. + * @param termStatsMap receives the collected {@link TermStats} across all segments. */ protected List collectMultiTermDataForSegment( MultiTerm multiTerm, LeafReaderContext leafReaderContext, int remainingExpansions, MutableValueBool shouldStopSegmentIteration, - Map termStatsMap) throws IOException { + Map termStatsMap) + throws IOException { TermsEnum termsEnum = createTermsEnum(multiTerm, leafReaderContext); if (termsEnum == null) { return Collections.emptyList(); @@ -488,7 +520,8 @@ public class PhraseWildcardQuery extends Query { * * @return null if there is no term for this query field in the segment. */ - protected TermsEnum createTermsEnum(MultiTerm multiTerm, LeafReaderContext leafReaderContext) throws IOException { + protected TermsEnum createTermsEnum(MultiTerm multiTerm, LeafReaderContext leafReaderContext) + throws IOException { Terms terms = leafReaderContext.reader().terms(field); if (terms == null) { return null; @@ -503,36 +536,41 @@ public class PhraseWildcardQuery extends Query { * Collect the term stats across all segments. * * @param termStatsMap input map of already collected {@link TermStats}. - * @param termsData receives the {@link TermStatistics} computed for all {@link TermStats}. - * @param termData receives all the collected {@link Term}. + * @param termsData receives the {@link TermStatistics} computed for all {@link TermStats}. + * @param termData receives all the collected {@link Term}. */ protected void collectMultiTermStats( IndexSearcher searcher, Map termStatsMap, TermsData termsData, - TermData termData) throws IOException { + TermData termData) + throws IOException { // Collect term stats across all segments. - // Collect stats the same way MultiPhraseQuery.MultiPhraseWeight constructor does, for all terms and all segments. + // Collect stats the same way MultiPhraseQuery.MultiPhraseWeight constructor does, for all terms + // and all segments. for (Map.Entry termStatsEntry : termStatsMap.entrySet()) { Term term = new Term(field, termStatsEntry.getKey()); termData.terms.add(term); TermStats termStats = termStatsEntry.getValue(); if (termStats.docFreq > 0) { - termsData.termStatsList.add(searcher.termStatistics(term, termStats.docFreq, termStats.totalTermFreq)); + termsData.termStatsList.add( + searcher.termStatistics(term, termStats.docFreq, termStats.totalTermFreq)); } } } protected void checkTermsHavePositions(Terms terms) { if (!terms.hasPositions()) { - throw new IllegalStateException("field \"" + field + "\" was indexed without position data;" + - " cannot run " + PhraseWildcardQuery.class.getSimpleName()); + throw new IllegalStateException( + "field \"" + + field + + "\" was indexed without position data;" + + " cannot run " + + PhraseWildcardQuery.class.getSimpleName()); } } - /** - * Builds a {@link PhraseWildcardQuery}. - */ + /** Builds a {@link PhraseWildcardQuery}. */ public static class Builder { protected final String field; @@ -542,28 +580,30 @@ public class PhraseWildcardQuery extends Query { protected final boolean segmentOptimizationEnabled; /** - * @param field The query field. - * @param maxMultiTermExpansions The maximum number of expansions across all multi-terms and across all segments. - * It counts expansions for each segments individually, that allows optimizations per - * segment and unused expansions are credited to next segments. This is different from - * {@link MultiPhraseQuery} and {@link org.apache.lucene.search.spans.SpanMultiTermQueryWrapper} - * which have an expansion limit per multi-term. + * @param field The query field. + * @param maxMultiTermExpansions The maximum number of expansions across all multi-terms and + * across all segments. It counts expansions for each segments individually, that allows + * optimizations per segment and unused expansions are credited to next segments. This is + * different from {@link MultiPhraseQuery} and {@link + * org.apache.lucene.search.spans.SpanMultiTermQueryWrapper} which have an expansion limit + * per multi-term. */ public Builder(String field, int maxMultiTermExpansions) { this(field, maxMultiTermExpansions, true); } /** - * @param field The query field. - * @param maxMultiTermExpansions The maximum number of expansions across all multi-terms and across all segments. - * It counts expansions for each segments individually, that allows optimizations per - * segment and unused expansions are credited to next segments. This is different from - * {@link MultiPhraseQuery} and {@link org.apache.lucene.search.spans.SpanMultiTermQueryWrapper} - * which have an expansion limit per multi-term. - * @param segmentOptimizationEnabled Whether to enable the segment optimization which consists in ignoring a segment - * for further analysis as soon as a term is not present inside it. This optimizes - * the query execution performance but changes the scoring. The result ranking is - * preserved. + * @param field The query field. + * @param maxMultiTermExpansions The maximum number of expansions across all multi-terms and + * across all segments. It counts expansions for each segments individually, that allows + * optimizations per segment and unused expansions are credited to next segments. This is + * different from {@link MultiPhraseQuery} and {@link + * org.apache.lucene.search.spans.SpanMultiTermQueryWrapper} which have an expansion limit + * per multi-term. + * @param segmentOptimizationEnabled Whether to enable the segment optimization which consists + * in ignoring a segment for further analysis as soon as a term is not present inside it. + * This optimizes the query execution performance but changes the scoring. The result + * ranking is preserved. */ public Builder(String field, int maxMultiTermExpansions, boolean segmentOptimizationEnabled) { this.field = field; @@ -572,44 +612,49 @@ public class PhraseWildcardQuery extends Query { phraseTerms = new ArrayList<>(); } - /** - * Adds a single term at the next position in the phrase. - */ + /** Adds a single term at the next position in the phrase. */ public Builder addTerm(BytesRef termBytes) { return addTerm(new Term(field, termBytes)); } - /** - * Adds a single term at the next position in the phrase. - */ + /** Adds a single term at the next position in the phrase. */ public Builder addTerm(Term term) { if (!term.field().equals(field)) { - throw new IllegalArgumentException(term.getClass().getSimpleName() - + " field \"" + term.field() + "\" cannot be different from the " - + PhraseWildcardQuery.class.getSimpleName() + " field \"" + field + "\""); + throw new IllegalArgumentException( + term.getClass().getSimpleName() + + " field \"" + + term.field() + + "\" cannot be different from the " + + PhraseWildcardQuery.class.getSimpleName() + + " field \"" + + field + + "\""); } phraseTerms.add(new SingleTerm(term, phraseTerms.size())); return this; } /** - * Adds a multi-term at the next position in the phrase. - * Any of the terms returned by the provided {@link MultiTermQuery} enumeration - * may match (expansion as a disjunction). + * Adds a multi-term at the next position in the phrase. Any of the terms returned by the + * provided {@link MultiTermQuery} enumeration may match (expansion as a disjunction). */ public Builder addMultiTerm(MultiTermQuery multiTermQuery) { if (!multiTermQuery.getField().equals(field)) { - throw new IllegalArgumentException(multiTermQuery.getClass().getSimpleName() - + " field \"" + multiTermQuery.getField() + "\" cannot be different from the " - + PhraseWildcardQuery.class.getSimpleName() + " field \"" + field + "\""); + throw new IllegalArgumentException( + multiTermQuery.getClass().getSimpleName() + + " field \"" + + multiTermQuery.getField() + + "\" cannot be different from the " + + PhraseWildcardQuery.class.getSimpleName() + + " field \"" + + field + + "\""); } phraseTerms.add(new MultiTerm(multiTermQuery, phraseTerms.size())); return this; } - /** - * Sets the phrase slop. - */ + /** Sets the phrase slop. */ public Builder setSlop(int slop) { if (slop < 0) { throw new IllegalArgumentException("slop value cannot be negative"); @@ -618,18 +663,17 @@ public class PhraseWildcardQuery extends Query { return this; } - /** - * Builds a {@link PhraseWildcardQuery}. - */ + /** Builds a {@link PhraseWildcardQuery}. */ public PhraseWildcardQuery build() { - return new PhraseWildcardQuery(field, phraseTerms, slop, maxMultiTermExpansions, segmentOptimizationEnabled); + return new PhraseWildcardQuery( + field, phraseTerms, slop, maxMultiTermExpansions, segmentOptimizationEnabled); } } /** - * All {@link PhraseTerm} are light and immutable. They do not hold query - * processing data such as {@link TermsData}. That way, the {@link PhraseWildcardQuery} - * is immutable and light itself and can be used safely as a key of the query cache. + * All {@link PhraseTerm} are light and immutable. They do not hold query processing data such as + * {@link TermsData}. That way, the {@link PhraseWildcardQuery} is immutable and light itself and + * can be used safely as a key of the query cache. */ protected abstract static class PhraseTerm { @@ -644,16 +688,17 @@ public class PhraseWildcardQuery extends Query { protected abstract Query getQuery(); /** - * Collects {@link TermState} and {@link TermStatistics} for the term without expansion. - * It must be called only if {@link #hasExpansions()} returns false. - * Simplified version of {@code #collectTermData(PhraseWildcardQuery, IndexSearcher, List, int, int, TermsData)} - * with less arguments. This method throws {@link UnsupportedOperationException} if not overridden. + * Collects {@link TermState} and {@link TermStatistics} for the term without expansion. It must + * be called only if {@link #hasExpansions()} returns false. Simplified version of {@code + * #collectTermData(PhraseWildcardQuery, IndexSearcher, List, int, int, TermsData)} with less + * arguments. This method throws {@link UnsupportedOperationException} if not overridden. */ protected int collectTermData( PhraseWildcardQuery query, IndexSearcher searcher, List segments, - TermsData termsData) throws IOException { + TermsData termsData) + throws IOException { throw new UnsupportedOperationException(); } @@ -661,8 +706,8 @@ public class PhraseWildcardQuery extends Query { * Collects {@link TermState} and {@link TermStatistics} for the term (potentially expanded). * * @param termsData {@link TermsData} to update with the collected terms and stats. - * @return The number of expansions or matches in all segments; or 0 if this term - * does not match in any segment, in this case the phrase query can immediately stop. + * @return The number of expansions or matches in all segments; or 0 if this term does not match + * in any segment, in this case the phrase query can immediately stop. */ protected abstract int collectTermData( PhraseWildcardQuery query, @@ -670,7 +715,8 @@ public class PhraseWildcardQuery extends Query { List segments, int remainingMultiTerms, int maxExpansionsForTerm, - TermsData termsData) throws IOException; + TermsData termsData) + throws IOException; protected abstract void toString(StringBuilder builder); @@ -681,9 +727,7 @@ public class PhraseWildcardQuery extends Query { public abstract int hashCode(); } - /** - * Phrase term with no expansion. - */ + /** Phrase term with no expansion. */ protected static class SingleTerm extends PhraseTerm { protected final Term term; @@ -708,7 +752,8 @@ public class PhraseWildcardQuery extends Query { PhraseWildcardQuery query, IndexSearcher searcher, List segments, - TermsData termsData) throws IOException { + TermsData termsData) + throws IOException { return collectTermData(query, searcher, segments, 0, 0, termsData); } @@ -719,7 +764,8 @@ public class PhraseWildcardQuery extends Query { List segments, int remainingMultiTerms, int maxExpansionsForTerm, - TermsData termsData) throws IOException { + TermsData termsData) + throws IOException { return query.collectSingleTermData(this, searcher, segments, termsData); } @@ -743,9 +789,7 @@ public class PhraseWildcardQuery extends Query { } } - /** - * Phrase term with expansions. - */ + /** Phrase term with expansions. */ protected static class MultiTerm extends PhraseTerm { protected final MultiTermQuery query; @@ -772,8 +816,10 @@ public class PhraseWildcardQuery extends Query { List segments, int remainingMultiTerms, int maxExpansionsForTerm, - TermsData termsData) throws IOException { - return query.collectMultiTermData(this, searcher, segments, remainingMultiTerms, maxExpansionsForTerm, termsData); + TermsData termsData) + throws IOException { + return query.collectMultiTermData( + this, searcher, segments, remainingMultiTerms, maxExpansionsForTerm, termsData); } @Override @@ -797,8 +843,8 @@ public class PhraseWildcardQuery extends Query { } /** - * Holds the {@link TermState} and {@link TermStatistics} for all the matched - * and collected {@link Term}, for all phrase terms, for all segments. + * Holds the {@link TermState} and {@link TermStatistics} for all the matched and collected {@link + * Term}, for all phrase terms, for all segments. */ protected static class TermsData { @@ -841,10 +887,13 @@ public class PhraseWildcardQuery extends Query { builder.append(", termDataPerPosition=").append(Arrays.asList(termDataPerPosition)); builder.append(", termsStatsList=["); for (TermStatistics termStatistics : termStatsList) { - builder.append("{") + builder + .append("{") .append(termStatistics.term().utf8ToString()) - .append(", ").append(termStatistics.docFreq()) - .append(", ").append(termStatistics.totalTermFreq()) + .append(", ") + .append(termStatistics.docFreq()) + .append(", ") + .append(termStatistics.totalTermFreq()) .append("}"); } builder.append("]"); @@ -854,8 +903,8 @@ public class PhraseWildcardQuery extends Query { } /** - * Holds the {@link TermState} for all the collected {@link Term}, - * for a specific phrase term, for all segments. + * Holds the {@link TermState} for all the collected {@link Term}, for a specific phrase term, for + * all segments. */ protected static class TermData { @@ -870,11 +919,10 @@ public class PhraseWildcardQuery extends Query { terms = new ArrayList<>(); } - /** - * Sets the collected list of {@link TermBytesTermState} for the given segment. - */ + /** Sets the collected list of {@link TermBytesTermState} for the given segment. */ @SuppressWarnings("unchecked") - protected void setTermStatesForSegment(LeafReaderContext leafReaderContext, List termStates) { + protected void setTermStatesForSegment( + LeafReaderContext leafReaderContext, List termStates) { if (termStatesPerSegment == null) { termStatesPerSegment = (List[]) new List[numSegments]; termsData.numTermsMatching++; @@ -883,11 +931,13 @@ public class PhraseWildcardQuery extends Query { } /** - * @return The collected list of {@link TermBytesTermState} for the given segment; - * or null if this phrase term does not match in the given segment. + * @return The collected list of {@link TermBytesTermState} for the given segment; or null if + * this phrase term does not match in the given segment. */ - protected List getTermStatesForSegment(LeafReaderContext leafReaderContext) { - assert termStatesPerSegment != null : "No TermState for any segment; the query should have been stopped before"; + protected List getTermStatesForSegment( + LeafReaderContext leafReaderContext) { + assert termStatesPerSegment != null + : "No TermState for any segment; the query should have been stopped before"; return termStatesPerSegment[leafReaderContext.ord]; } @@ -907,9 +957,7 @@ public class PhraseWildcardQuery extends Query { } } - /** - * Holds a pair of term bytes - term state. - */ + /** Holds a pair of term bytes - term state. */ public static class TermBytesTermState { protected final BytesRef termBytes; @@ -926,9 +974,7 @@ public class PhraseWildcardQuery extends Query { } } - /** - * Accumulates the doc freq and total term freq. - */ + /** Accumulates the doc freq and total term freq. */ public static class TermStats { protected final BytesRef termBytes; @@ -955,11 +1001,10 @@ public class PhraseWildcardQuery extends Query { /** * Compares segments based of the number of terms they contain. - *

    - * This is used to sort segments incrementally by number of terms. This - * way the first segment to search is the smallest, so a term has the lowest - * probability to match in this segment. And if the term does not match, - * we credit unused expansions when searching the other next segments. + * + *

    This is used to sort segments incrementally by number of terms. This way the first segment + * to search is the smallest, so a term has the lowest probability to match in this segment. And + * if the term does not match, we credit unused expansions when searching the other next segments. */ protected class SegmentTermsSizeComparator implements Comparator { @@ -974,7 +1019,8 @@ public class PhraseWildcardQuery extends Query { } } - protected List createTermsSizeSortedCopyOf(List segments) throws IOException { + protected List createTermsSizeSortedCopyOf(List segments) + throws IOException { List copy = new ArrayList<>(segments); try { copy.sort(this); @@ -993,9 +1039,7 @@ public class PhraseWildcardQuery extends Query { } } - /** - * Test counters incremented when assertions are enabled. Used only when testing. - */ + /** Test counters incremented when assertions are enabled. Used only when testing. */ protected static class TestCounters { private static final TestCounters SINGLETON = new TestCounters(); @@ -1050,14 +1094,14 @@ public class PhraseWildcardQuery extends Query { queryEarlyStopCount = 0; } -// protected void printTestCounters(TermsData termsData) { -// System.out.println("singleTermAnalysisCount=" + singleTermAnalysisCount); -// System.out.println("multiTermAnalysisCount=" + multiTermAnalysisCount); -// System.out.println("expansionCount=" + expansionCount); -// System.out.println("segmentUseCount=" + segmentUseCount); -// System.out.println("segmentSkipCount=" + segmentSkipCount); -// System.out.println("queryEarlyStopCount=" + queryEarlyStopCount); -// System.out.println(termsData); -// } + // protected void printTestCounters(TermsData termsData) { + // System.out.println("singleTermAnalysisCount=" + singleTermAnalysisCount); + // System.out.println("multiTermAnalysisCount=" + multiTermAnalysisCount); + // System.out.println("expansionCount=" + expansionCount); + // System.out.println("segmentUseCount=" + segmentUseCount); + // System.out.println("segmentSkipCount=" + segmentSkipCount); + // System.out.println("queryEarlyStopCount=" + queryEarlyStopCount); + // System.out.println(termsData); + // } } -} \ No newline at end of file +} diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonQuery.java index 754b99c5ed1..be8688ee425 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonQuery.java @@ -16,12 +16,13 @@ */ package org.apache.lucene.sandbox.search; +import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; + import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; @@ -55,8 +56,6 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.Transition; -import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; - // TODO // - compare perf to PhraseQuery exact and sloppy // - optimize: find terms that are in fact MUST (because all paths @@ -64,32 +63,31 @@ import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZ // - if we ever store posLength in the index, it would be easy[ish] // to take it into account here -/** A proximity query that lets you express an automaton, whose - * transitions are terms, to match documents. This is a generalization - * of other proximity queries like {@link PhraseQuery}, {@link - * MultiPhraseQuery} and {@link SpanNearQuery}. It is likely - * slow, since it visits any document having any of the terms (i.e. it - * acts like a disjunction, not a conjunction like {@link - * PhraseQuery}), and then it must merge-sort all positions within each - * document to test whether/how many times the automaton matches. +/** + * A proximity query that lets you express an automaton, whose transitions are terms, to match + * documents. This is a generalization of other proximity queries like {@link PhraseQuery}, {@link + * MultiPhraseQuery} and {@link SpanNearQuery}. It is likely slow, since it visits any document + * having any of the terms (i.e. it acts like a disjunction, not a conjunction like {@link + * PhraseQuery}), and then it must merge-sort all positions within each document to test whether/how + * many times the automaton matches. * - *

    After creating the query, use {@link #createState}, {@link - * #setAccept}, {@link #addTransition} and {@link #addAnyTransition} to - * build up the automaton. Once you are done, call {@link #finish} and - * then execute the query. + *

    After creating the query, use {@link #createState}, {@link #setAccept}, {@link #addTransition} + * and {@link #addAnyTransition} to build up the automaton. Once you are done, call {@link #finish} + * and then execute the query. * - *

    This code is very new and likely has exciting bugs! + *

    This code is very new and likely has exciting bugs! * - * @lucene.experimental */ - + * @lucene.experimental + */ public class TermAutomatonQuery extends Query implements Accountable { - private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(TermAutomatonQuery.class); + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(TermAutomatonQuery.class); private final String field; private final Automaton.Builder builder; Automaton det; - private final Map termToID = new HashMap<>(); - private final Map idToTerm = new HashMap<>(); + private final Map termToID = new HashMap<>(); + private final Map idToTerm = new HashMap<>(); private int anyTermID = -1; public TermAutomatonQuery(String field) { @@ -132,9 +130,10 @@ public class TermAutomatonQuery extends Query implements Accountable { /** * Call this once you are done adding states/transitions. - * @param maxDeterminizedStates Maximum number of states created when - * determinizing the automaton. Higher numbers allow this operation to - * consume more memory but allow more complex automatons. + * + * @param maxDeterminizedStates Maximum number of states created when determinizing the automaton. + * Higher numbers allow this operation to consume more memory but allow more complex + * automatons. */ public void finish(int maxDeterminizedStates) { Automaton automaton = builder.finish(); @@ -152,7 +151,7 @@ public class TermAutomatonQuery extends Query implements Accountable { // Make sure there are no leading or trailing ANY: int count = automaton.initTransition(0, t); - for(int i=0;i= t.min && anyTermID <= t.max) { throw new IllegalStateException("automaton cannot lead with an ANY transition"); @@ -160,9 +159,9 @@ public class TermAutomatonQuery extends Query implements Accountable { } int numStates = automaton.getNumStates(); - for(int i=0;i= t.min && anyTermID <= t.max) { throw new IllegalStateException("automaton cannot end with an ANY transition"); @@ -175,20 +174,20 @@ public class TermAutomatonQuery extends Query implements Accountable { // We have to carefully translate these transitions so automaton // realizes they also match all other terms: Automaton newAutomaton = new Automaton(); - for(int i=0;i termStates = new HashMap<>(); + Map termStates = new HashMap<>(); - for (Map.Entry ent : termToID.entrySet()) { + for (Map.Entry ent : termToID.entrySet()) { if (ent.getKey() != null) { - termStates.put(ent.getValue(), TermStates.build(context, new Term(field, ent.getKey()), scoreMode.needsScores())); + termStates.put( + ent.getValue(), + TermStates.build(context, new Term(field, ent.getKey()), scoreMode.needsScores())); } } @@ -256,8 +257,7 @@ public class TermAutomatonQuery extends Query implements Accountable { /** Returns true iff o is equal to this. */ @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } private static boolean checkFinished(TermAutomatonQuery q) { @@ -268,9 +268,7 @@ public class TermAutomatonQuery extends Query implements Accountable { } private boolean equalsTo(TermAutomatonQuery other) { - return checkFinished(this) && - checkFinished(other) && - other == this; + return checkFinished(this) && checkFinished(other) && other == this; } @Override @@ -284,16 +282,18 @@ public class TermAutomatonQuery extends Query implements Accountable { @Override public long ramBytesUsed() { - return BASE_RAM_BYTES + - RamUsageEstimator.sizeOfObject(builder) + - RamUsageEstimator.sizeOfObject(det) + - RamUsageEstimator.sizeOfObject(field) + - RamUsageEstimator.sizeOfObject(idToTerm) + - RamUsageEstimator.sizeOfObject(termToID); + return BASE_RAM_BYTES + + RamUsageEstimator.sizeOfObject(builder) + + RamUsageEstimator.sizeOfObject(det) + + RamUsageEstimator.sizeOfObject(field) + + RamUsageEstimator.sizeOfObject(idToTerm) + + RamUsageEstimator.sizeOfObject(termToID); } - /** Returns the dot (graphviz) representation of this automaton. - * This is extremely useful for visualizing the automaton. */ + /** + * Returns the dot (graphviz) representation of this automaton. This is extremely useful for + * visualizing the automaton. + */ public String toDot() { // TODO: refactor & share with Automaton.toDot! @@ -308,7 +308,7 @@ public class TermAutomatonQuery extends Query implements Accountable { } Transition t = new Transition(); - for(int state=0;state= t.min; - for(int j=t.min;j<=t.max;j++) { + for (int j = t.min; j <= t.max; j++) { b.append(" "); b.append(state); b.append(" -> "); @@ -361,22 +361,29 @@ public class TermAutomatonQuery extends Query implements Accountable { final class TermAutomatonWeight extends Weight { final Automaton automaton; - private final Map termStates; + private final Map termStates; private final Similarity.SimScorer stats; private final Similarity similarity; - public TermAutomatonWeight(Automaton automaton, IndexSearcher searcher, Map termStates, float boost) throws IOException { + public TermAutomatonWeight( + Automaton automaton, + IndexSearcher searcher, + Map termStates, + float boost) + throws IOException { super(TermAutomatonQuery.this); this.automaton = automaton; this.termStates = termStates; this.similarity = searcher.getSimilarity(); List allTermStats = new ArrayList<>(); - for(Map.Entry ent : idToTerm.entrySet()) { + for (Map.Entry ent : idToTerm.entrySet()) { Integer termID = ent.getKey(); if (ent.getValue() != null) { TermStates ts = termStates.get(termID); if (ts.docFreq() > 0) { - allTermStats.add(searcher.termStatistics(new Term(field, ent.getValue()), ts.docFreq(), ts.totalTermFreq())); + allTermStats.add( + searcher.termStatistics( + new Term(field, ent.getValue()), ts.docFreq(), ts.totalTermFreq())); } } } @@ -384,8 +391,11 @@ public class TermAutomatonQuery extends Query implements Accountable { if (allTermStats.isEmpty()) { stats = null; // no terms matched at all, will not use sim } else { - stats = similarity.scorer(boost, searcher.collectionStatistics(field), - allTermStats.toArray(new TermStatistics[allTermStats.size()])); + stats = + similarity.scorer( + boost, + searcher.collectionStatistics(field), + allTermStats.toArray(new TermStatistics[allTermStats.size()])); } } @@ -401,21 +411,29 @@ public class TermAutomatonQuery extends Query implements Accountable { EnumAndScorer[] enums = new EnumAndScorer[idToTerm.size()]; boolean any = false; - for(Map.Entry ent : termStates.entrySet()) { + for (Map.Entry ent : termStates.entrySet()) { TermStates termStates = ent.getValue(); - assert termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context)) : "The top-reader used to create Weight is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context); + assert termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context)) + : "The top-reader used to create Weight is not the same as the current reader's top-reader (" + + ReaderUtil.getTopLevelContext(context); BytesRef term = idToTerm.get(ent.getKey()); TermState state = termStates.get(context); if (state != null) { TermsEnum termsEnum = context.reader().terms(field).iterator(); termsEnum.seekExact(term, state); - enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.postings(null, PostingsEnum.POSITIONS)); + enums[ent.getKey()] = + new EnumAndScorer(ent.getKey(), termsEnum.postings(null, PostingsEnum.POSITIONS)); any = true; } } if (any) { - return new TermAutomatonScorer(this, enums, anyTermID, idToTerm, new LeafSimScorer(stats, context.reader(), field, true)); + return new TermAutomatonScorer( + this, + enums, + anyTermID, + idToTerm, + new LeafSimScorer(stats, context.reader(), field, true)); } else { return null; } @@ -443,9 +461,11 @@ public class TermAutomatonQuery extends Query implements Accountable { return new TermQuery(new Term(field, idToTerm.get(single.ints[single.offset]))); } - // TODO: can PhraseQuery really handle multiple terms at the same position? If so, why do we even have MultiPhraseQuery? - - // Try for either PhraseQuery or MultiPhraseQuery, which only works when the automaton is a sausage: + // TODO: can PhraseQuery really handle multiple terms at the same position? If so, why do we + // even have MultiPhraseQuery? + + // Try for either PhraseQuery or MultiPhraseQuery, which only works when the automaton is a + // sausage: MultiPhraseQuery.Builder mpq = new MultiPhraseQuery.Builder(); PhraseQuery.Builder pq = new PhraseQuery.Builder(); @@ -469,7 +489,7 @@ public class TermAutomatonQuery extends Query implements Accountable { int dest = -1; List terms = new ArrayList<>(); boolean matchesAny = false; - for(int i=0;i= t.min && anyTermID <= t.max; if (matchesAny == false) { - for(int termID=t.min;termID<=t.max;termID++) { + for (int termID = t.min; termID <= t.max; termID++) { terms.add(new Term(field, idToTerm.get(termID))); } } @@ -506,8 +526,9 @@ public class TermAutomatonQuery extends Query implements Accountable { } else if (mpq != null) { return mpq.build(); } - - // TODO: we could maybe also rewrite to union of PhraseQuery (pull all finite strings) if it's "worth it"? + + // TODO: we could maybe also rewrite to union of PhraseQuery (pull all finite strings) if it's + // "worth it"? return this; } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonScorer.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonScorer.java index 586bc7c68cf..8567ce0747a 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonScorer.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonScorer.java @@ -18,7 +18,6 @@ package org.apache.lucene.sandbox.search; import java.io.IOException; import java.util.Map; - import org.apache.lucene.sandbox.search.TermAutomatonQuery.EnumAndScorer; import org.apache.lucene.sandbox.search.TermAutomatonQuery.TermAutomatonWeight; import org.apache.lucene.search.DocIdSetIterator; @@ -38,7 +37,7 @@ class TermAutomatonScorer extends Scorer { private final PriorityQueue docIDQueue; private final PriorityQueue posQueue; private final RunAutomaton runAutomaton; - private final Map idToTerm; + private final Map idToTerm; // We reuse this array to check for matches starting from an initial // position; we increase posShift every time we move to a new possible @@ -58,9 +57,15 @@ class TermAutomatonScorer extends Scorer { private int docID = -1; private int freq; - public TermAutomatonScorer(TermAutomatonWeight weight, EnumAndScorer[] subs, int anyTermID, Map idToTerm, LeafSimScorer docScorer) throws IOException { + public TermAutomatonScorer( + TermAutomatonWeight weight, + EnumAndScorer[] subs, + int anyTermID, + Map idToTerm, + LeafSimScorer docScorer) + throws IOException { super(weight); - //System.out.println(" automaton:\n" + weight.automaton.toDot()); + // System.out.println(" automaton:\n" + weight.automaton.toDot()); this.runAutomaton = new TermRunAutomaton(weight.automaton, subs.length); this.docScorer = docScorer; this.idToTerm = idToTerm; @@ -70,13 +75,13 @@ class TermAutomatonScorer extends Scorer { this.anyTermID = anyTermID; this.subsOnDoc = new EnumAndScorer[subs.length]; this.positions = new PosState[4]; - for(int i=0;i { public DocIDQueue(int maxSize) { super(maxSize); @@ -98,8 +102,7 @@ class TermAutomatonScorer extends Scorer { } } - /** Sorts by position so we can visit all scorers on one doc, by - * position. */ + /** Sorts by position so we can visit all scorers on one doc, by position. */ private static class PositionQueue extends PriorityQueue { public PositionQueue(int maxSize) { super(maxSize); @@ -124,7 +127,7 @@ class TermAutomatonScorer extends Scorer { /** Pushes all previously pop'd enums back into the docIDQueue */ private void pushCurrentDoc() { - for(int i=0;i docID; while (true) { - //System.out.println(" doNext: cycle"); + // System.out.println(" doNext: cycle"); popCurrentDoc(); - //System.out.println(" docID=" + docID); + // System.out.println(" docID=" + docID); if (docID == NO_MORE_DOCS) { return docID; } @@ -200,10 +203,10 @@ class TermAutomatonScorer extends Scorer { if (freq > 0) { return docID; } - for(int i=0;i payloads = psu.getPayloadsForQuery(new TermQuery(new Term(FIELD, "rr"))); - if(VERBOSE) { + if (VERBOSE) { System.out.println("Num payloads:" + payloads.size()); - for (final byte [] bytes : payloads) { + for (final byte[] bytes : payloads) { System.out.println(new String(bytes, StandardCharsets.UTF_8)); } } @@ -72,7 +74,7 @@ public class TestPayloadSpanUtil extends LuceneTestCase { directory.close(); } - final static class PayloadAnalyzer extends Analyzer { + static final class PayloadAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName) { @@ -108,9 +110,9 @@ public class TestPayloadSpanUtil extends LuceneTestCase { if (!nopayload.contains(token)) { if (entities.contains(token)) { - payloadAtt.setPayload(new BytesRef(token + ":Entity:"+ pos )); + payloadAtt.setPayload(new BytesRef(token + ":Entity:" + pos)); } else { - payloadAtt.setPayload(new BytesRef(token + ":Noise:" + pos )); + payloadAtt.setPayload(new BytesRef(token + ":Noise:" + pos)); } } pos += posIncrAtt.getPositionIncrement(); @@ -125,5 +127,4 @@ public class TestPayloadSpanUtil extends LuceneTestCase { this.pos = 0; } } - } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/StringAndPayloadField.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/StringAndPayloadField.java index 36779d8642a..7c6bd35d6f3 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/StringAndPayloadField.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/StringAndPayloadField.java @@ -65,7 +65,7 @@ class StringAndPayloadField extends Field { private boolean used = false; private String value = null; private BytesRef payload; - + /** Sets the string value. */ void setValue(String value, BytesRef payload) { this.value = value; @@ -96,5 +96,3 @@ class StringAndPayloadField extends Field { } } } - - diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/TestIDVersionPostingsFormat.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/TestIDVersionPostingsFormat.java index 487d796edf0..683f329be30 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/TestIDVersionPostingsFormat.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/TestIDVersionPostingsFormat.java @@ -27,12 +27,10 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.sandbox.codecs.idversion.StringAndPayloadField.SingleTokenWithPayloadTokenStream; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -46,6 +44,7 @@ import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.TieredMergePolicy; +import org.apache.lucene.sandbox.codecs.idversion.StringAndPayloadField.SingleTokenWithPayloadTokenStream; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LiveFieldValues; import org.apache.lucene.search.SearcherFactory; @@ -56,9 +55,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -/** - * Basic tests for IDVersionPostingsFormat - */ +/** Basic tests for IDVersionPostingsFormat */ // Cannot extend BasePostingsFormatTestCase because this PF is not // general (it requires payloads, only allows 1 doc per term, etc.) public class TestIDVersionPostingsFormat extends LuceneTestCase { @@ -75,7 +72,8 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { doc.add(makeIDField("id1", 110)); w.addDocument(doc); IndexReader r = w.getReader(); - IDVersionSegmentTermsEnum termsEnum = (IDVersionSegmentTermsEnum) r.leaves().get(0).reader().terms("id").iterator(); + IDVersionSegmentTermsEnum termsEnum = + (IDVersionSegmentTermsEnum) r.leaves().get(0).reader().terms("id").iterator(); assertTrue(termsEnum.seekExact(new BytesRef("id0"), 50)); assertTrue(termsEnum.seekExact(new BytesRef("id0"), 100)); assertFalse(termsEnum.seekExact(new BytesRef("id0"), 101)); @@ -95,90 +93,104 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { private IDSource getRandomIDs() { IDSource ids; switch (random().nextInt(6)) { - case 0: - // random simple - if (VERBOSE) { - System.out.println("TEST: use random simple ids"); - } - ids = new IDSource() { - @Override - public String next() { - return TestUtil.randomSimpleString(random()); - } - }; - break; - case 1: - // random realistic unicode - if (VERBOSE) { - System.out.println("TEST: use random realistic unicode ids"); - } - ids = new IDSource() { - @Override - public String next() { - return TestUtil.randomRealisticUnicodeString(random()); - } - }; - break; - case 2: - // sequential - if (VERBOSE) { - System.out.println("TEST: use seuquential ids"); - } - ids = new IDSource() { - int upto; - @Override - public String next() { - return Integer.toString(upto++); - } - }; - break; - case 3: - // zero-pad sequential - if (VERBOSE) { - System.out.println("TEST: use zero-pad seuquential ids"); - } - ids = new IDSource() { - final int radix = TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); - final String zeroPad = String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random(), 5, 20) + "d", 0); - int upto; - @Override - public String next() { - String s = Integer.toString(upto++); - return zeroPad.substring(zeroPad.length() - s.length()) + s; - } - }; - break; - case 4: - // random long - if (VERBOSE) { - System.out.println("TEST: use random long ids"); - } - ids = new IDSource() { - final int radix = TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); - int upto; - @Override - public String next() { - return Long.toString(random().nextLong() & 0x3ffffffffffffffL, radix); - } - }; - break; - case 5: - // zero-pad random long - if (VERBOSE) { - System.out.println("TEST: use zero-pad random long ids"); - } - ids = new IDSource() { - final int radix = TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); - final String zeroPad = String.format(Locale.ROOT, "%015d", 0); - int upto; - @Override - public String next() { - return Long.toString(random().nextLong() & 0x3ffffffffffffffL, radix); - } - }; - break; - default: - throw new AssertionError(); + case 0: + // random simple + if (VERBOSE) { + System.out.println("TEST: use random simple ids"); + } + ids = + new IDSource() { + @Override + public String next() { + return TestUtil.randomSimpleString(random()); + } + }; + break; + case 1: + // random realistic unicode + if (VERBOSE) { + System.out.println("TEST: use random realistic unicode ids"); + } + ids = + new IDSource() { + @Override + public String next() { + return TestUtil.randomRealisticUnicodeString(random()); + } + }; + break; + case 2: + // sequential + if (VERBOSE) { + System.out.println("TEST: use seuquential ids"); + } + ids = + new IDSource() { + int upto; + + @Override + public String next() { + return Integer.toString(upto++); + } + }; + break; + case 3: + // zero-pad sequential + if (VERBOSE) { + System.out.println("TEST: use zero-pad seuquential ids"); + } + ids = + new IDSource() { + final int radix = + TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); + final String zeroPad = + String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random(), 5, 20) + "d", 0); + int upto; + + @Override + public String next() { + String s = Integer.toString(upto++); + return zeroPad.substring(zeroPad.length() - s.length()) + s; + } + }; + break; + case 4: + // random long + if (VERBOSE) { + System.out.println("TEST: use random long ids"); + } + ids = + new IDSource() { + final int radix = + TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); + int upto; + + @Override + public String next() { + return Long.toString(random().nextLong() & 0x3ffffffffffffffL, radix); + } + }; + break; + case 5: + // zero-pad random long + if (VERBOSE) { + System.out.println("TEST: use zero-pad random long ids"); + } + ids = + new IDSource() { + final int radix = + TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); + final String zeroPad = String.format(Locale.ROOT, "%015d", 0); + int upto; + + @Override + public String next() { + return Long.toString(random().nextLong() & 0x3ffffffffffffffL, radix); + } + }; + break; + default: + throw new AssertionError(); } return ids; @@ -190,12 +202,14 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); int minItemsInBlock = TestUtil.nextInt(random(), 2, 50); - int maxItemsInBlock = 2*(minItemsInBlock-1) + random().nextInt(50); - iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat(minItemsInBlock, maxItemsInBlock))); + int maxItemsInBlock = 2 * (minItemsInBlock - 1) + random().nextInt(50); + iwc.setCodec( + TestUtil.alwaysPostingsFormat( + new IDVersionPostingsFormat(minItemsInBlock, maxItemsInBlock))); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false); - //IndexWriter w = new IndexWriter(dir, iwc); + // IndexWriter w = new IndexWriter(dir, iwc); int numDocs = atLeast(1000); - Map idValues = new HashMap(); + Map idValues = new HashMap(); int docUpto = 0; if (VERBOSE) { System.out.println("TEST: numDocs=" + numDocs); @@ -261,19 +275,19 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { } w.deleteDocuments(new Term("id", idValue)); idValues.remove(idValue); - } + } } docUpto++; } IndexReader r = w.getReader(); - //IndexReader r = DirectoryReader.open(w); + // IndexReader r = DirectoryReader.open(w); PerThreadVersionPKLookup lookup = new PerThreadVersionPKLookup(r, "id"); - List> idValuesList = new ArrayList<>(idValues.entrySet()); + List> idValuesList = new ArrayList<>(idValues.entrySet()); int iters = numDocs * 5; - for(int iter=0;iter { - w.addDocument(duplicate); - w.commit(false); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + w.addDocument(duplicate); + w.commit(false); + }); w.close(); dir.close(); @@ -382,12 +406,13 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { iwc.setMergePolicy(new TieredMergePolicy()); MergeScheduler ms = iwc.getMergeScheduler(); if (ms instanceof ConcurrentMergeScheduler) { - iwc.setMergeScheduler(new ConcurrentMergeScheduler() { - @Override - protected void handleMergeException(Throwable exc) { - assertTrue(exc instanceof IllegalArgumentException); - } - }); + iwc.setMergeScheduler( + new ConcurrentMergeScheduler() { + @Override + protected void handleMergeException(Throwable exc) { + assertTrue(exc instanceof IllegalArgumentException); + } + }); } IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); @@ -449,25 +474,28 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { Directory dir = newDirectory(); // MockAnalyzer minus maybePayload else it sometimes stuffs in an 8-byte payload! - Analyzer a = new Analyzer() { - @Override - public TokenStreamComponents createComponents(String fieldName) { - MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true, 100); - tokenizer.setEnableChecks(true); - MockTokenFilter filt = new MockTokenFilter(tokenizer, MockTokenFilter.EMPTY_STOPSET); - return new TokenStreamComponents(tokenizer, filt); - } - }; + Analyzer a = + new Analyzer() { + @Override + public TokenStreamComponents createComponents(String fieldName) { + MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true, 100); + tokenizer.setEnableChecks(true); + MockTokenFilter filt = new MockTokenFilter(tokenizer, MockTokenFilter.EMPTY_STOPSET); + return new TokenStreamComponents(tokenizer, filt); + } + }; IndexWriterConfig iwc = newIndexWriterConfig(a); iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat())); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false); Document doc = new Document(); doc.add(newTextField("id", "id", Field.Store.NO)); - expectThrows(IllegalArgumentException.class, () -> { - w.addDocument(doc); - w.commit(false); - }); - + expectThrows( + IllegalArgumentException.class, + () -> { + w.addDocument(doc); + w.commit(false); + }); + w.close(); dir.close(); } @@ -479,12 +507,13 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false); Document doc = new Document(); doc.add(newStringField("id", "id", Field.Store.NO)); - expectThrows(IllegalArgumentException.class, () -> { - w.addDocument(doc); - w.commit(false); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + w.addDocument(doc); + w.commit(false); + }); - w.close(); dir.close(); } @@ -496,11 +525,13 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false); Document doc = new Document(); doc.add(new StringAndPayloadField("id", "id", new BytesRef("foo"))); - expectThrows(IllegalArgumentException.class, () -> { - w.addDocument(doc); - w.commit(false); - }); - + expectThrows( + IllegalArgumentException.class, + () -> { + w.addDocument(doc); + w.commit(false); + }); + w.close(); dir.close(); } @@ -523,7 +554,8 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { dir.close(); } - // LUCENE-5693: because CheckIndex cross-checks term vectors with postings even for deleted docs, and because our PF only indexes the + // LUCENE-5693: because CheckIndex cross-checks term vectors with postings even for deleted docs, + // and because our PF only indexes the // non-deleted documents on flush, CheckIndex will see this as corruption: public void testCannotIndexTermVectors() throws Exception { Directory dir = newDirectory(); @@ -541,11 +573,13 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { ts.setValue("foo", payload); Field field = new Field("id", ts, ft); doc.add(new Field("id", ts, ft)); - expectThrows(IllegalArgumentException.class, () -> { - w.addDocument(doc); - w.commit(false); - fail("didn't hit expected exception"); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + w.addDocument(doc); + w.commit(false); + fail("didn't hit expected exception"); + }); w.close(); dir.close(); @@ -559,10 +593,12 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { Document doc = new Document(); doc.add(makeIDField("id", 17)); doc.add(makeIDField("id", 17)); - expectThrows(IllegalArgumentException.class, () -> { - w.addDocument(doc); - w.commit(false); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + w.addDocument(doc); + w.commit(false); + }); w.close(); dir.close(); @@ -575,14 +611,32 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false); Document doc = new Document(); // -1 - doc.add(new StringAndPayloadField("id", "id", new BytesRef(new byte[] {(byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff}))); - expectThrows(IllegalArgumentException.class, () -> { - w.addDocument(doc); - w.commit(false); - }); - expectThrows(AlreadyClosedException.class, () -> { - w.addDocument(doc); - }); + doc.add( + new StringAndPayloadField( + "id", + "id", + new BytesRef( + new byte[] { + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff + }))); + expectThrows( + IllegalArgumentException.class, + () -> { + w.addDocument(doc); + w.commit(false); + }); + expectThrows( + AlreadyClosedException.class, + () -> { + w.addDocument(doc); + }); dir.close(); } @@ -593,19 +647,38 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false); Document doc = new Document(); // Long.MAX_VALUE: - doc.add(new StringAndPayloadField("id", "id", new BytesRef(new byte[] {(byte)0x7f, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff}))); - expectThrows(IllegalArgumentException.class, () -> { - w.addDocument(doc); - w.commit(false); - }); - expectThrows(AlreadyClosedException.class, () -> { - w.addDocument(doc); - }); + doc.add( + new StringAndPayloadField( + "id", + "id", + new BytesRef( + new byte[] { + (byte) 0x7f, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff, + (byte) 0xff + }))); + expectThrows( + IllegalArgumentException.class, + () -> { + w.addDocument(doc); + w.commit(false); + }); + expectThrows( + AlreadyClosedException.class, + () -> { + w.addDocument(doc); + }); dir.close(); } - // Simulates optimistic concurrency in a distributed indexing app and confirms the latest version always wins: + // Simulates optimistic concurrency in a distributed indexing app and confirms the latest version + // always wins: public void testGlobalVersions() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); @@ -624,7 +697,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { final String[] ids = idsSeen.toArray(new String[numIDs]); final Object[] locks = new Object[ids.length]; - for(int i=0;i versionValues = new LiveFieldValues(mgr, missingValue) { - @Override - protected Long lookupFromSearcher(IndexSearcher s, String id) { - // TODO: would be cleaner if we could do our PerThreadLookup here instead of "up above": - // We always return missing: the caller then does a lookup against the current reader - return missingValue; - } - }; + final LiveFieldValues versionValues = + new LiveFieldValues(mgr, missingValue) { + @Override + protected Long lookupFromSearcher(IndexSearcher s, String id) { + // TODO: would be cleaner if we could do our PerThreadLookup here instead of "up above": + // We always return missing: the caller then does a lookup against the current reader + return missingValue; + } + }; // Maps to the version the id was lasted indexed with: - final Map truth = new ConcurrentHashMap<>(); + final Map truth = new ConcurrentHashMap<>(); final CountDownLatch startingGun = new CountDownLatch(1); @@ -665,118 +739,161 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { // Run for .5 sec in normal tests, else 60 seconds for nightly: final long stopTime = System.currentTimeMillis() + (TEST_NIGHTLY ? 60000 : 500); - for(int i=0;i= newVersion); + + boolean doIndex; + if (currentVersion == missingValue) { + if (VERBOSE) { + System.out.println( + Thread.currentThread().getName() + ": id not in RT cache"); + } + int otherDocID = lookup.lookup(new BytesRef(id), newVersion + 1); + if (otherDocID == -1) { + if (VERBOSE) { + System.out.println( + Thread.currentThread().getName() + + ": id not in index, or version is <= newVersion; will index"); + } + doIndex = true; + } else { + if (VERBOSE) { + System.out.println( + Thread.currentThread().getName() + + ": id is in index with version=" + + lookup.getVersion() + + "; will not index"); + } + doIndex = false; + if (truthVersion.longValue() != lookup.getVersion()) { + System.out.println(Thread.currentThread() + ": now fail0!"); + } + assertEquals(truthVersion.longValue(), lookup.getVersion()); + } + } else { + if (VERBOSE) { + System.out.println( + Thread.currentThread().getName() + + ": id is in RT cache: currentVersion=" + + currentVersion); + } + doIndex = newVersion > currentVersion; + } + + if (doIndex) { + if (VERBOSE) { + System.out.println(Thread.currentThread().getName() + ": now index"); + } + boolean passes = + truthVersion == null || truthVersion.longValue() <= newVersion; + if (passes == false) { + System.out.println(Thread.currentThread() + ": now fail!"); + } + assertTrue(passes); + Document doc = new Document(); + doc.add(makeIDField(id, newVersion)); + w.updateDocument(new Term("id", id), doc); + truth.put(id, newVersion); + versionValues.add(id, newVersion); + } else { + if (VERBOSE) { + System.out.println(Thread.currentThread().getName() + ": skip index"); + } + assertNotNull(truthVersion); + assertTrue(truthVersion.longValue() >= newVersion); + } + } finally { + mgr.release(s); } - } finally { - mgr.release(s); } } } - } - }; + }; threads[i].start(); } @@ -797,7 +914,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { } // Verify final index against truth: - for(int i=0;i<2;i++) { + for (int i = 0; i < 2; i++) { mgr.maybeRefresh(); IndexSearcher s = mgr.acquire(); try { @@ -811,7 +928,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase { } */ PerThreadVersionPKLookup lookup = new PerThreadVersionPKLookup(r, "id"); - for(Map.Entry ent : truth.entrySet()) { + for (Map.Entry ent : truth.entrySet()) { assertTrue(lookup.lookup(new BytesRef(ent.getKey()), -1L) != -1); assertEquals(ent.getValue().longValue(), lookup.getVersion()); } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestBigIntegerPoint.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestBigIntegerPoint.java index 1963dca37a0..d3d2adc2c27 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestBigIntegerPoint.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestBigIntegerPoint.java @@ -17,11 +17,9 @@ package org.apache.lucene.sandbox.document; import java.math.BigInteger; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.sandbox.document.BigIntegerPoint; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -40,21 +38,26 @@ public class TestBigIntegerPoint extends LuceneTestCase { BigInteger large = BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.valueOf(64)); document.add(new BigIntegerPoint("field", large)); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(BigIntegerPoint.newExactQuery("field", large))); - assertEquals(1, searcher.count(BigIntegerPoint.newRangeQuery("field", large.subtract(BigInteger.ONE), large.add(BigInteger.ONE)))); + assertEquals( + 1, + searcher.count( + BigIntegerPoint.newRangeQuery( + "field", large.subtract(BigInteger.ONE), large.add(BigInteger.ONE)))); assertEquals(1, searcher.count(BigIntegerPoint.newSetQuery("field", large))); - assertEquals(0, searcher.count(BigIntegerPoint.newSetQuery("field", large.subtract(BigInteger.ONE)))); + assertEquals( + 0, searcher.count(BigIntegerPoint.newSetQuery("field", large.subtract(BigInteger.ONE)))); assertEquals(0, searcher.count(BigIntegerPoint.newSetQuery("field"))); reader.close(); writer.close(); dir.close(); } - + /** Add a negative 1D point and search for it */ public void testNegative() throws Exception { Directory dir = newDirectory(); @@ -62,38 +65,56 @@ public class TestBigIntegerPoint extends LuceneTestCase { // add a doc with a large biginteger value Document document = new Document(); - BigInteger negative = BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.valueOf(64)).negate(); + BigInteger negative = + BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.valueOf(64)).negate(); document.add(new BigIntegerPoint("field", negative)); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(BigIntegerPoint.newExactQuery("field", negative))); - assertEquals(1, searcher.count(BigIntegerPoint.newRangeQuery("field", negative.subtract(BigInteger.ONE), negative.add(BigInteger.ONE)))); + assertEquals( + 1, + searcher.count( + BigIntegerPoint.newRangeQuery( + "field", negative.subtract(BigInteger.ONE), negative.add(BigInteger.ONE)))); reader.close(); writer.close(); dir.close(); } - + /** Test if we add a too-large value */ public void testTooLarge() throws Exception { BigInteger tooLarge = BigInteger.ONE.shiftLeft(128); - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - new BigIntegerPoint("field", tooLarge); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new BigIntegerPoint("field", tooLarge); + }); assertTrue(expected.getMessage().contains("requires more than 16 bytes storage")); } - + public void testToString() throws Exception { - assertEquals("BigIntegerPoint ", new BigIntegerPoint("field", BigInteger.ONE).toString()); - assertEquals("BigIntegerPoint ", new BigIntegerPoint("field", BigInteger.ONE, BigInteger.valueOf(-2)).toString()); - assertEquals("field:[1 TO 1]", BigIntegerPoint.newExactQuery("field", BigInteger.ONE).toString()); - assertEquals("field:[1 TO 17]", BigIntegerPoint.newRangeQuery("field", BigInteger.ONE, BigInteger.valueOf(17)).toString()); - assertEquals("field:[1 TO 17],[0 TO 42]", BigIntegerPoint.newRangeQuery("field", - new BigInteger[] {BigInteger.ONE, BigInteger.ZERO}, - new BigInteger[] {BigInteger.valueOf(17), BigInteger.valueOf(42)}).toString()); + assertEquals( + "BigIntegerPoint ", new BigIntegerPoint("field", BigInteger.ONE).toString()); + assertEquals( + "BigIntegerPoint ", + new BigIntegerPoint("field", BigInteger.ONE, BigInteger.valueOf(-2)).toString()); + assertEquals( + "field:[1 TO 1]", BigIntegerPoint.newExactQuery("field", BigInteger.ONE).toString()); + assertEquals( + "field:[1 TO 17]", + BigIntegerPoint.newRangeQuery("field", BigInteger.ONE, BigInteger.valueOf(17)).toString()); + assertEquals( + "field:[1 TO 17],[0 TO 42]", + BigIntegerPoint.newRangeQuery( + "field", + new BigInteger[] {BigInteger.ONE, BigInteger.ZERO}, + new BigInteger[] {BigInteger.valueOf(17), BigInteger.valueOf(42)}) + .toString()); assertEquals("field:{1}", BigIntegerPoint.newSetQuery("field", BigInteger.ONE).toString()); } @@ -103,8 +124,12 @@ public class TestBigIntegerPoint extends LuceneTestCase { q2 = BigIntegerPoint.newRangeQuery("a", BigInteger.valueOf(0), BigInteger.valueOf(1000)); assertEquals(q1, q2); assertEquals(q1.hashCode(), q2.hashCode()); - assertFalse(q1.equals(BigIntegerPoint.newRangeQuery("a", BigInteger.valueOf(1), BigInteger.valueOf(1000)))); - assertFalse(q1.equals(BigIntegerPoint.newRangeQuery("b", BigInteger.valueOf(0), BigInteger.valueOf(1000)))); + assertFalse( + q1.equals( + BigIntegerPoint.newRangeQuery("a", BigInteger.valueOf(1), BigInteger.valueOf(1000)))); + assertFalse( + q1.equals( + BigIntegerPoint.newRangeQuery("b", BigInteger.valueOf(0), BigInteger.valueOf(1000)))); q1 = BigIntegerPoint.newExactQuery("a", BigInteger.valueOf(1000)); q2 = BigIntegerPoint.newExactQuery("a", BigInteger.valueOf(1000)); @@ -112,10 +137,17 @@ public class TestBigIntegerPoint extends LuceneTestCase { assertEquals(q1.hashCode(), q2.hashCode()); assertFalse(q1.equals(BigIntegerPoint.newExactQuery("a", BigInteger.valueOf(1)))); - q1 = BigIntegerPoint.newSetQuery("a", BigInteger.valueOf(0), BigInteger.valueOf(1000), BigInteger.valueOf(17)); - q2 = BigIntegerPoint.newSetQuery("a", BigInteger.valueOf(17), BigInteger.valueOf(0), BigInteger.valueOf(1000)); + q1 = + BigIntegerPoint.newSetQuery( + "a", BigInteger.valueOf(0), BigInteger.valueOf(1000), BigInteger.valueOf(17)); + q2 = + BigIntegerPoint.newSetQuery( + "a", BigInteger.valueOf(17), BigInteger.valueOf(0), BigInteger.valueOf(1000)); assertEquals(q1, q2); assertEquals(q1.hashCode(), q2.hashCode()); - assertFalse(q1.equals(BigIntegerPoint.newSetQuery("a", BigInteger.valueOf(1), BigInteger.valueOf(17), BigInteger.valueOf(1000)))); - } + assertFalse( + q1.equals( + BigIntegerPoint.newSetQuery( + "a", BigInteger.valueOf(1), BigInteger.valueOf(17), BigInteger.valueOf(1000)))); + } } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestDoubleRangeField.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestDoubleRangeField.java index 37b54b9afb9..d4c365cc794 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestDoubleRangeField.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestDoubleRangeField.java @@ -20,9 +20,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.DoubleRange; import org.apache.lucene.util.LuceneTestCase; -/** - * Random testing for RangeField type. - **/ +/** Random testing for RangeField type. */ public class TestDoubleRangeField extends LuceneTestCase { private static final String FIELD_NAME = "rangeField"; @@ -31,12 +29,18 @@ public class TestDoubleRangeField extends LuceneTestCase { Document doc = new Document(); IllegalArgumentException expected; - expected = expectThrows(IllegalArgumentException.class, () -> - doc.add(new DoubleRange(FIELD_NAME, new double[] {Double.NaN}, new double[] {5}))); + expected = + expectThrows( + IllegalArgumentException.class, + () -> + doc.add(new DoubleRange(FIELD_NAME, new double[] {Double.NaN}, new double[] {5}))); assertTrue(expected.getMessage().contains("invalid min value")); - expected = expectThrows(IllegalArgumentException.class, () -> - doc.add(new DoubleRange(FIELD_NAME, new double[] {5}, new double[] {Double.NaN}))); + expected = + expectThrows( + IllegalArgumentException.class, + () -> + doc.add(new DoubleRange(FIELD_NAME, new double[] {5}, new double[] {Double.NaN}))); assertTrue(expected.getMessage().contains("invalid max value")); } @@ -44,8 +48,10 @@ public class TestDoubleRangeField extends LuceneTestCase { public void testUnevenArrays() { Document doc = new Document(); IllegalArgumentException expected; - expected = expectThrows(IllegalArgumentException.class, () -> - doc.add(new DoubleRange(FIELD_NAME, new double[] {5, 6}, new double[] {5}))); + expected = + expectThrows( + IllegalArgumentException.class, + () -> doc.add(new DoubleRange(FIELD_NAME, new double[] {5, 6}, new double[] {5}))); assertTrue(expected.getMessage().contains("min/max ranges must agree")); } @@ -53,8 +59,12 @@ public class TestDoubleRangeField extends LuceneTestCase { public void testOversizeDimensions() { Document doc = new Document(); IllegalArgumentException expected; - expected = expectThrows(IllegalArgumentException.class, () -> - doc.add(new DoubleRange(FIELD_NAME, new double[] {1, 2, 3, 4, 5}, new double[] {5}))); + expected = + expectThrows( + IllegalArgumentException.class, + () -> + doc.add( + new DoubleRange(FIELD_NAME, new double[] {1, 2, 3, 4, 5}, new double[] {5}))); assertTrue(expected.getMessage().contains("does not support greater than 4 dimensions")); } @@ -62,8 +72,10 @@ public class TestDoubleRangeField extends LuceneTestCase { public void testMinGreaterThanMax() { Document doc = new Document(); IllegalArgumentException expected; - expected = expectThrows(IllegalArgumentException.class, () -> - doc.add(new DoubleRange(FIELD_NAME, new double[] {3, 4}, new double[] {1, 2}))); + expected = + expectThrows( + IllegalArgumentException.class, + () -> doc.add(new DoubleRange(FIELD_NAME, new double[] {3, 4}, new double[] {1, 2}))); assertTrue(expected.getMessage().contains("is greater than max value")); } } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestFloatPointNearestNeighbor.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestFloatPointNearestNeighbor.java index 1c97785bdfb..26dd8f48fc6 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestFloatPointNearestNeighbor.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestFloatPointNearestNeighbor.java @@ -17,7 +17,6 @@ package org.apache.lucene.sandbox.document; import java.util.Arrays; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FloatPoint; @@ -30,9 +29,9 @@ import org.apache.lucene.index.PointValues; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.index.Term; +import org.apache.lucene.sandbox.search.LatLonPointPrototypeQueries; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.sandbox.search.LatLonPointPrototypeQueries; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; @@ -54,15 +53,18 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { w.addDocument(doc); DirectoryReader r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: IndexSearcher s = newSearcher(r, false); - FieldDoc hit = (FieldDoc) FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs[0]; + FieldDoc hit = + (FieldDoc) FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs[0]; assertEquals("0", r.document(hit.doc).getField("id").stringValue()); r.close(); w.deleteDocuments(new Term("id", "0")); r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: s = newSearcher(r, false); hit = (FieldDoc) LatLonPointPrototypeQueries.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; assertEquals("1", r.document(hit.doc).getField("id").stringValue()); @@ -84,18 +86,22 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { w.addDocument(doc); DirectoryReader r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: IndexSearcher s = newSearcher(r, false); - FieldDoc hit = (FieldDoc)FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs[0]; + FieldDoc hit = + (FieldDoc) FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs[0]; assertEquals("0", r.document(hit.doc).getField("id").stringValue()); r.close(); w.deleteDocuments(new Term("id", "0")); w.deleteDocuments(new Term("id", "1")); r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: s = newSearcher(r, false); - assertEquals(0, FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs.length); + assertEquals( + 0, FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs.length); r.close(); w.close(); dir.close(); @@ -114,8 +120,11 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { w.addDocument(doc); DirectoryReader r = DirectoryReader.open(w); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: - ScoreDoc[] hits = FloatPointNearestNeighbor.nearest(newSearcher(r, false), "point", 2, 45.0f, 50.0f).scoreDocs; + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: + ScoreDoc[] hits = + FloatPointNearestNeighbor.nearest(newSearcher(r, false), "point", 2, 45.0f, 50.0f) + .scoreDocs; assertEquals("0", r.document(hits[0].doc).getField("id").stringValue()); assertEquals("1", r.document(hits[1].doc).getField("id").stringValue()); @@ -128,8 +137,13 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, getIndexWriterConfig()); DirectoryReader r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: - assertEquals(0, FloatPointNearestNeighbor.nearest(newSearcher(r, false), "point", 1, 40.0f, 50.0f).scoreDocs.length); + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: + assertEquals( + 0, + FloatPointNearestNeighbor.nearest(newSearcher(r, false), "point", 1, 40.0f, 50.0f) + .scoreDocs + .length); r.close(); w.close(); dir.close(); @@ -150,8 +164,8 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { int dims = TestUtil.nextInt(random(), 1, PointValues.MAX_INDEX_DIMENSIONS); float[][] values = new float[numPoints][dims]; - for (int id = 0 ; id < numPoints ; ++id) { - for (int dim = 0 ; dim < dims ; ++dim) { + for (int id = 0; id < numPoints; ++id) { + for (int dim = 0; dim < dims; ++dim) { Float f = Float.NaN; while (f.isNaN()) { f = Float.intBitsToFloat(random().nextInt()); @@ -172,15 +186,16 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: reader=" + r); } - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: IndexSearcher s = newSearcher(r, false); int iters = atLeast(100); - for (int iter = 0 ; iter < iters ; ++iter) { + for (int iter = 0; iter < iters; ++iter) { if (VERBOSE) { System.out.println("\nTEST: iter=" + iter); } float[] origin = new float[dims]; - for (int dim = 0 ; dim < dims ; ++dim) { + for (int dim = 0; dim < dims; ++dim) { Float f = Float.NaN; while (f.isNaN()) { f = Float.intBitsToFloat(random().nextInt()); @@ -189,18 +204,21 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { } // dumb brute force search to get the expected result: - FloatPointNearestNeighbor.NearestHit[] expectedHits = new FloatPointNearestNeighbor.NearestHit[numPoints]; - for (int id = 0 ; id < numPoints ; ++id) { + FloatPointNearestNeighbor.NearestHit[] expectedHits = + new FloatPointNearestNeighbor.NearestHit[numPoints]; + for (int id = 0; id < numPoints; ++id) { FloatPointNearestNeighbor.NearestHit hit = new FloatPointNearestNeighbor.NearestHit(); hit.distanceSquared = euclideanDistanceSquared(origin, values[id]); hit.docID = id; expectedHits[id] = hit; } - Arrays.sort(expectedHits, (a, b) -> { - int cmp = Double.compare(a.distanceSquared, b.distanceSquared); - return cmp != 0 ? cmp : a.docID - b.docID; // tie break by smaller id - }); + Arrays.sort( + expectedHits, + (a, b) -> { + int cmp = Double.compare(a.distanceSquared, b.distanceSquared); + return cmp != 0 ? cmp : a.docID - b.docID; // tie break by smaller id + }); int topK = TestUtil.nextInt(random(), 1, numPoints); @@ -212,22 +230,34 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { assertEquals("fewer than expected hits: ", topK, hits.length); if (VERBOSE) { - for (int i = 0 ; i < topK ; ++i) { + for (int i = 0; i < topK; ++i) { FloatPointNearestNeighbor.NearestHit expected = expectedHits[i]; - FieldDoc actual = (FieldDoc)hits[i]; + FieldDoc actual = (FieldDoc) hits[i]; Document actualDoc = r.document(actual.doc); System.out.println("hit " + i); - System.out.println(" expected id=" + expected.docID + " " + Arrays.toString(values[expected.docID]) - + " distance=" + (float)Math.sqrt(expected.distanceSquared) + " distanceSquared=" + expected.distanceSquared); - System.out.println(" actual id=" + actualDoc.getField("id") + " distance=" + actual.fields[0]); + System.out.println( + " expected id=" + + expected.docID + + " " + + Arrays.toString(values[expected.docID]) + + " distance=" + + (float) Math.sqrt(expected.distanceSquared) + + " distanceSquared=" + + expected.distanceSquared); + System.out.println( + " actual id=" + actualDoc.getField("id") + " distance=" + actual.fields[0]); } } - for (int i = 0 ; i < topK ; ++i) { + for (int i = 0; i < topK; ++i) { FloatPointNearestNeighbor.NearestHit expected = expectedHits[i]; - FieldDoc actual = (FieldDoc)hits[i]; + FieldDoc actual = (FieldDoc) hits[i]; assertEquals("hit " + i + ":", expected.docID, actual.doc); - assertEquals("hit " + i + ":", (float)Math.sqrt(expected.distanceSquared), (Float)actual.fields[0], 0.000001); + assertEquals( + "hit " + i + ":", + (float) Math.sqrt(expected.distanceSquared), + (Float) actual.fields[0], + 0.000001); } } @@ -238,8 +268,8 @@ public class TestFloatPointNearestNeighbor extends LuceneTestCase { private static double euclideanDistanceSquared(float[] a, float[] b) { double sumOfSquaredDifferences = 0.0d; - for (int d = 0 ; d < a.length ; ++d) { - double diff = (double)a[d] - (double)b[d]; + for (int d = 0; d < a.length; ++d) { + double diff = (double) a[d] - (double) b[d]; sumOfSquaredDifferences += diff * diff; } return sumOfSquaredDifferences; diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestHalfFloatPoint.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestHalfFloatPoint.java index 09e3fcb5685..d4212e0f08c 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestHalfFloatPoint.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestHalfFloatPoint.java @@ -17,11 +17,9 @@ package org.apache.lucene.sandbox.document; import java.util.Arrays; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; import org.apache.lucene.util.ArrayUtil; @@ -40,7 +38,7 @@ public class TestHalfFloatPoint extends LuceneTestCase { public void testHalfFloatConversion() { assertEquals(0, HalfFloatPoint.halfFloatToShortBits(0f)); - assertEquals((short)(1 << 15), HalfFloatPoint.halfFloatToShortBits(-0f)); + assertEquals((short) (1 << 15), HalfFloatPoint.halfFloatToShortBits(-0f)); assertEquals(0, HalfFloatPoint.halfFloatToShortBits(Float.MIN_VALUE)); // rounded to zero testHalfFloat("0011110000000000", 1); @@ -48,7 +46,8 @@ public class TestHalfFloatPoint extends LuceneTestCase { testHalfFloat("1100000000000000", -2); testHalfFloat("0111101111111111", 65504); // max value testHalfFloat("0000010000000000", (float) Math.pow(2, -14)); // minimum positive normal - testHalfFloat("0000001111111111", (float) (Math.pow(2, -14) - Math.pow(2, -24))); // maximum subnormal + testHalfFloat( + "0000001111111111", (float) (Math.pow(2, -14) - Math.pow(2, -24))); // maximum subnormal testHalfFloat("0000000000000001", (float) Math.pow(2, -24)); // minimum positive subnormal testHalfFloat("0000000000000000", 0f); testHalfFloat("1000000000000000", -0f); @@ -99,7 +98,9 @@ public class TestHalfFloatPoint extends LuceneTestCase { int floatBits = random().nextInt(); f = Float.intBitsToFloat(floatBits); } else { - f = (float) ((2 * random().nextFloat() - 1) * Math.pow(2, TestUtil.nextInt(random(), -16, 16))); + f = + (float) + ((2 * random().nextFloat() - 1) * Math.pow(2, TestUtil.nextInt(random(), -16, 16))); } float rounded = HalfFloatPoint.shortBitsToHalfFloat(HalfFloatPoint.halfFloatToShortBits(f)); if (Float.isFinite(f) == false) { @@ -122,7 +123,8 @@ public class TestHalfFloatPoint extends LuceneTestCase { if (f - values[index - 1] < closest - f) { closest = values[index - 1]; } else if (f - values[index - 1] == closest - f - && Integer.numberOfTrailingZeros(Float.floatToIntBits(values[index - 1])) > Integer.numberOfTrailingZeros(Float.floatToIntBits(closest))) { + && Integer.numberOfTrailingZeros(Float.floatToIntBits(values[index - 1])) + > Integer.numberOfTrailingZeros(Float.floatToIntBits(closest))) { // in case of tie, round to even closest = values[index - 1]; } @@ -162,7 +164,10 @@ public class TestHalfFloatPoint extends LuceneTestCase { HalfFloatPoint.shortToSortableBytes((short) (i - 1), previous, 0); byte[] current = new byte[HalfFloatPoint.BYTES]; HalfFloatPoint.shortToSortableBytes((short) i, current, 0); - assertTrue(Arrays.compareUnsigned(previous, 0, HalfFloatPoint.BYTES, current, 0, HalfFloatPoint.BYTES) < 0); + assertTrue( + Arrays.compareUnsigned( + previous, 0, HalfFloatPoint.BYTES, current, 0, HalfFloatPoint.BYTES) + < 0); assertEquals(i, HalfFloatPoint.sortableBytesToShort(current, 0)); } } @@ -209,12 +214,18 @@ public class TestHalfFloatPoint extends LuceneTestCase { // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - assertEquals(1, searcher.count(HalfFloatPoint.newRangeQuery("field", - new float[]{0, -5}, new float[]{1.25f, -1}))); - assertEquals(0, searcher.count(HalfFloatPoint.newRangeQuery("field", - new float[]{0, 0}, new float[]{2, 2}))); - assertEquals(0, searcher.count(HalfFloatPoint.newRangeQuery("field", - new float[]{-10, -10}, new float[]{1, 2}))); + assertEquals( + 1, + searcher.count( + HalfFloatPoint.newRangeQuery("field", new float[] {0, -5}, new float[] {1.25f, -1}))); + assertEquals( + 0, + searcher.count( + HalfFloatPoint.newRangeQuery("field", new float[] {0, 0}, new float[] {2, 2}))); + assertEquals( + 0, + searcher.count( + HalfFloatPoint.newRangeQuery("field", new float[] {-10, -10}, new float[] {1, 2}))); reader.close(); writer.close(); @@ -229,7 +240,8 @@ public class TestHalfFloatPoint extends LuceneTestCase { assertEquals(HalfFloatPoint.shortBitsToHalfFloat((short) 1), HalfFloatPoint.nextUp(0f), 0f); // values that cannot be exactly represented as a half float assertEquals(HalfFloatPoint.nextUp(0f), HalfFloatPoint.nextUp(Float.MIN_VALUE), 0f); - assertEquals(Float.floatToIntBits(-0f), Float.floatToIntBits(HalfFloatPoint.nextUp(-Float.MIN_VALUE))); + assertEquals( + Float.floatToIntBits(-0f), Float.floatToIntBits(HalfFloatPoint.nextUp(-Float.MIN_VALUE))); assertEquals(Float.floatToIntBits(0f), Float.floatToIntBits(HalfFloatPoint.nextUp(-0f))); } @@ -239,7 +251,8 @@ public class TestHalfFloatPoint extends LuceneTestCase { assertEquals(65504, HalfFloatPoint.nextDown(Float.POSITIVE_INFINITY), 0f); assertEquals(Float.floatToIntBits(-0f), Float.floatToIntBits(HalfFloatPoint.nextDown(0f))); // values that cannot be exactly represented as a half float - assertEquals(Float.floatToIntBits(0f), Float.floatToIntBits(HalfFloatPoint.nextDown(Float.MIN_VALUE))); + assertEquals( + Float.floatToIntBits(0f), Float.floatToIntBits(HalfFloatPoint.nextDown(Float.MIN_VALUE))); assertEquals(HalfFloatPoint.nextDown(-0f), HalfFloatPoint.nextDown(-Float.MIN_VALUE), 0f); assertEquals(Float.floatToIntBits(-0f), Float.floatToIntBits(HalfFloatPoint.nextDown(+0f))); } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestFuzzyLikeThisQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestFuzzyLikeThisQuery.java index 2dbd1ee5005..35adbce0757 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestFuzzyLikeThisQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestFuzzyLikeThisQuery.java @@ -18,7 +18,6 @@ package org.apache.lucene.sandbox.queries; import java.io.IOException; import java.util.HashSet; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -47,9 +46,13 @@ public class TestFuzzyLikeThisQuery extends LuceneTestCase { analyzer = new MockAnalyzer(random()); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = + new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy())); - //Add series of docs with misspelt names + // Add series of docs with misspelt names addDoc(writer, "jonathon smythe", "1"); addDoc(writer, "jonathan smith", "2"); addDoc(writer, "johnathon smyth", "3"); @@ -74,8 +77,7 @@ public class TestFuzzyLikeThisQuery extends LuceneTestCase { writer.addDocument(doc); } - - //Tests that idf ranking is not favouring rare mis-spellings over a strong edit-distance match + // Tests that idf ranking is not favouring rare mis-spellings over a strong edit-distance match public void testClosestEditDistanceMatchComesFirst() throws Throwable { FuzzyLikeThisQuery flt = new FuzzyLikeThisQuery(10, analyzer); flt.addTerms("smith", "name", 2, 1); @@ -92,7 +94,7 @@ public class TestFuzzyLikeThisQuery extends LuceneTestCase { assertEquals("Should match most similar not most rare variant", "2", doc.get("id")); } - //Test multiple input words are having variants produced + // Test multiple input words are having variants produced public void testMultiWord() throws Throwable { FuzzyLikeThisQuery flt = new FuzzyLikeThisQuery(10, analyzer); flt.addTerms("jonathin smoth", "name", 2, 1); @@ -107,7 +109,7 @@ public class TestFuzzyLikeThisQuery extends LuceneTestCase { Document doc = searcher.doc(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2", doc.get("id")); } - + // LUCENE-4809 public void testNonExistingField() throws Throwable { FuzzyLikeThisQuery flt = new FuzzyLikeThisQuery(10, analyzer); @@ -126,8 +128,7 @@ public class TestFuzzyLikeThisQuery extends LuceneTestCase { assertEquals("Should match most similar when using 2 words", "2", doc.get("id")); } - - //Test bug found when first query word does not match anything + // Test bug found when first query word does not match anything public void testNoMatchFirstWordBug() throws Throwable { FuzzyLikeThisQuery flt = new FuzzyLikeThisQuery(10, analyzer); flt.addTerms("fernando smith", "name", 2, 1); @@ -148,7 +149,6 @@ public class TestFuzzyLikeThisQuery extends LuceneTestCase { fltq1.addTerms("javi", "subject", 2, 2); FuzzyLikeThisQuery fltq2 = new FuzzyLikeThisQuery(10, analyzer); fltq2.addTerms("javi", "subject", 2, 2); - assertEquals("FuzzyLikeThisQuery with same attributes is not equal", fltq1, - fltq2); + assertEquals("FuzzyLikeThisQuery with same attributes is not equal", fltq1, fltq2); } } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/LongHashSetTests.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/LongHashSetTests.java index c388d745bee..5401a1012fb 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/LongHashSetTests.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/LongHashSetTests.java @@ -21,8 +21,6 @@ import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.LongStream; - -import org.apache.lucene.sandbox.search.LongHashSet; import org.apache.lucene.util.LuceneTestCase; public class LongHashSetTests extends LuceneTestCase { @@ -91,11 +89,14 @@ public class LongHashSetTests extends LuceneTestCase { } } if (values.length > 0 && random().nextBoolean()) { - values[values.length/2] = Long.MIN_VALUE; + values[values.length / 2] = Long.MIN_VALUE; } - Set set1 = LongStream.of(values).mapToObj(Long::valueOf).collect(Collectors.toCollection(HashSet::new)); + Set set1 = + LongStream.of(values) + .mapToObj(Long::valueOf) + .collect(Collectors.toCollection(HashSet::new)); LongHashSet set2 = new LongHashSet(values); assertEquals(set1, set2); } } -} \ No newline at end of file +} diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestBM25FQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestBM25FQuery.java index 96bfc93d352..0f96fcd8fc7 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestBM25FQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestBM25FQuery.java @@ -16,6 +16,7 @@ */ package org.apache.lucene.sandbox.search; +import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; @@ -44,8 +45,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import java.io.IOException; - public class TestBM25FQuery extends LuceneTestCase { public void testInvalid() { BM25FQuery.Builder builder = new BM25FQuery.Builder(); @@ -68,10 +67,12 @@ public class TestBM25FQuery extends LuceneTestCase { assertEquals(actual, new TermQuery(new Term("field", "foo"))); builder.addTerm(new BytesRef("bar")); actual = searcher.rewrite(builder.build()); - assertEquals(actual, new SynonymQuery.Builder("field") - .addTerm(new Term("field", "foo")) - .addTerm(new Term("field", "bar")) - .build()); + assertEquals( + actual, + new SynonymQuery.Builder("field") + .addTerm(new Term("field", "foo")) + .addTerm(new Term("field", "bar")) + .build()); builder.addField("another_field", 1f); Query query = builder.build(); actual = searcher.rewrite(query); @@ -107,12 +108,15 @@ public class TestBM25FQuery extends LuceneTestCase { IndexReader reader = w.getReader(); IndexSearcher searcher = newSearcher(reader); - BM25FQuery query = new BM25FQuery.Builder() - .addField("f", 1f) - .addField("g", 1f) - .addTerm(new BytesRef("a")) - .build(); - TopScoreDocCollector collector = TopScoreDocCollector.create(Math.min(reader.numDocs(), Integer.MAX_VALUE), null, Integer.MAX_VALUE); + BM25FQuery query = + new BM25FQuery.Builder() + .addField("f", 1f) + .addField("g", 1f) + .addTerm(new BytesRef("a")) + .build(); + TopScoreDocCollector collector = + TopScoreDocCollector.create( + Math.min(reader.numDocs(), Integer.MAX_VALUE), null, Integer.MAX_VALUE); searcher.search(query, collector); TopDocs topDocs = collector.topDocs(); assertEquals(new TotalHits(11, TotalHits.Relation.EQUAL_TO), topDocs.totalHits); @@ -137,7 +141,7 @@ public class TestBM25FQuery extends LuceneTestCase { if (random().nextBoolean()) { doc.add(new TextField("a", "baz", Store.NO)); doc.add(new TextField("b", "baz", Store.NO)); - for (int k = 0; k < boost1+boost2; k++) { + for (int k = 0; k < boost1 + boost2; k++) { doc.add(new TextField("ab", "baz", Store.NO)); } w.addDocument(doc); @@ -160,18 +164,21 @@ public class TestBM25FQuery extends LuceneTestCase { IndexReader reader = w.getReader(); IndexSearcher searcher = newSearcher(reader); searcher.setSimilarity(new BM25Similarity()); - BM25FQuery query = new BM25FQuery.Builder() - .addField("a", (float) boost1) - .addField("b", (float) boost2) - .addTerm(new BytesRef("foo")) - .addTerm(new BytesRef("foo")) - .build(); + BM25FQuery query = + new BM25FQuery.Builder() + .addField("a", (float) boost1) + .addField("b", (float) boost2) + .addTerm(new BytesRef("foo")) + .addTerm(new BytesRef("foo")) + .build(); - TopScoreDocCollector bm25FCollector = TopScoreDocCollector.create(numMatch, null, Integer.MAX_VALUE); + TopScoreDocCollector bm25FCollector = + TopScoreDocCollector.create(numMatch, null, Integer.MAX_VALUE); searcher.search(query, bm25FCollector); TopDocs bm25FTopDocs = bm25FCollector.topDocs(); assertEquals(numMatch, bm25FTopDocs.totalHits.value); - TopScoreDocCollector collector = TopScoreDocCollector.create(reader.numDocs(), null, Integer.MAX_VALUE); + TopScoreDocCollector collector = + TopScoreDocCollector.create(reader.numDocs(), null, Integer.MAX_VALUE); searcher.search(new TermQuery(new Term("ab", "foo")), collector); TopDocs topDocs = collector.topDocs(); CheckHits.checkEqual(query, topDocs.scoreDocs, bm25FTopDocs.scoreDocs); @@ -190,20 +197,21 @@ public class TestBM25FQuery extends LuceneTestCase { String queryString = "foo"; Document doc = new Document(); - //both fields must contain tokens that match the query string "foo" + // both fields must contain tokens that match the query string "foo" doc.add(new TextField("f", "foo", Store.NO)); doc.add(new TextField("g", "foo baz", Store.NO)); w.addDocument(doc); IndexReader reader = w.getReader(); IndexSearcher searcher = newSearcher(reader); - BM25FQuery query = new BM25FQuery.Builder() + BM25FQuery query = + new BM25FQuery.Builder() .addField("f") .addField("g") .addTerm(new BytesRef(queryString)) .build(); TopDocs topDocs = searcher.search(query, 10); - CheckHits.checkDocIds("queried docs do not match", new int[]{0}, topDocs.scoreDocs); + CheckHits.checkDocIds("queried docs do not match", new int[] {0}, topDocs.scoreDocs); reader.close(); w.close(); @@ -231,14 +239,15 @@ public class TestBM25FQuery extends LuceneTestCase { IndexReader reader = w.getReader(); IndexSearcher searcher = newSearcher(reader); - BM25FQuery query = new BM25FQuery.Builder() + BM25FQuery query = + new BM25FQuery.Builder() .addField("f") .addField("g") .addTerm(new BytesRef(queryString)) .build(); TopDocs topDocs = searcher.search(query, 10); - //Return doc1 ahead of doc0 since its tf is higher - CheckHits.checkDocIds("queried docs do not match", new int[]{1,0}, topDocs.scoreDocs); + // Return doc1 ahead of doc0 since its tf is higher + CheckHits.checkDocIds("queried docs do not match", new int[] {1, 0}, topDocs.scoreDocs); reader.close(); w.close(); @@ -252,7 +261,8 @@ public class TestBM25FQuery extends LuceneTestCase { } @Override - public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + public SimScorer scorer( + float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { return new BM25Similarity().scorer(boost, collectionStats, termStats); } } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestCoveringQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestCoveringQuery.java index 4fcc54f28ca..c16685b73fd 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestCoveringQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestCoveringQuery.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.NumericDocValuesField; @@ -31,7 +30,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.Term; -import org.apache.lucene.sandbox.search.CoveringQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; @@ -59,7 +57,8 @@ public class TestCoveringQuery extends LuceneTestCase { QueryUtils.checkEqual(q1, q3); // values source matters - CoveringQuery q4 = new CoveringQuery(Arrays.asList(tq2, tq1), LongValuesSource.fromLongField("other_field")); + CoveringQuery q4 = + new CoveringQuery(Arrays.asList(tq2, tq1), LongValuesSource.fromLongField("other_field")); QueryUtils.checkUnequal(q1, q4); // duplicates matter @@ -87,8 +86,10 @@ public class TestCoveringQuery extends LuceneTestCase { TermQuery tq2 = new TermQuery(new Term("foo", "quux")); LongValuesSource vs = LongValuesSource.fromIntField("field"); CoveringQuery q = new CoveringQuery(Arrays.asList(tq1, tq2), vs); - assertEquals("CoveringQuery(queries=[foo:bar, foo:quux], minimumNumberMatch=long(field))", q.toString()); - assertEquals("CoveringQuery(queries=[bar, quux], minimumNumberMatch=long(field))", q.toString("foo")); + assertEquals( + "CoveringQuery(queries=[foo:bar, foo:quux], minimumNumberMatch=long(field))", q.toString()); + assertEquals( + "CoveringQuery(queries=[bar, quux], minimumNumberMatch=long(field))", q.toString("foo")); } public void testRandom() throws IOException { @@ -140,25 +141,23 @@ public class TestCoveringQuery extends LuceneTestCase { QueryUtils.check(random(), q, searcher); for (int i = 1; i < 4; ++i) { - BooleanQuery.Builder builder = new BooleanQuery.Builder() - .setMinimumNumberShouldMatch(i); + BooleanQuery.Builder builder = new BooleanQuery.Builder().setMinimumNumberShouldMatch(i); for (Query query : queries) { builder.add(query, Occur.SHOULD); } Query q1 = builder.build(); Query q2 = new CoveringQuery(queries, LongValuesSource.constant(i)); - assertEquals( - searcher.count(q1), - searcher.count(q2)); + assertEquals(searcher.count(q1), searcher.count(q2)); } - Query filtered = new BooleanQuery.Builder() - .add(q, Occur.MUST) - .add(new TermQuery(new Term("field", "A")), Occur.MUST) - .build(); + Query filtered = + new BooleanQuery.Builder() + .add(q, Occur.MUST) + .add(new TermQuery(new Term("field", "A")), Occur.MUST) + .build(); QueryUtils.check(random(), filtered, searcher); } - + r.close(); dir.close(); } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestDocValuesNumbersQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestDocValuesNumbersQuery.java index 8f13f03f5aa..969c8e8054e 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestDocValuesNumbersQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestDocValuesNumbersQuery.java @@ -16,6 +16,11 @@ */ package org.apache.lucene.sandbox.search; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.NumericDocValuesField; @@ -24,7 +29,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.sandbox.search.DocValuesNumbersQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -39,19 +43,19 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - public class TestDocValuesNumbersQuery extends LuceneTestCase { public void testEquals() { - assertEquals(new DocValuesNumbersQuery("field", 17L, 42L), new DocValuesNumbersQuery("field", 17L, 42L)); - assertEquals(new DocValuesNumbersQuery("field", 17L, 42L, 32416190071L), new DocValuesNumbersQuery("field", 17L, 32416190071L, 42L)); - assertFalse(new DocValuesNumbersQuery("field", 42L).equals(new DocValuesNumbersQuery("field2", 42L))); - assertFalse(new DocValuesNumbersQuery("field", 17L, 42L).equals(new DocValuesNumbersQuery("field", 17L, 32416190071L))); + assertEquals( + new DocValuesNumbersQuery("field", 17L, 42L), new DocValuesNumbersQuery("field", 17L, 42L)); + assertEquals( + new DocValuesNumbersQuery("field", 17L, 42L, 32416190071L), + new DocValuesNumbersQuery("field", 17L, 32416190071L, 42L)); + assertFalse( + new DocValuesNumbersQuery("field", 42L).equals(new DocValuesNumbersQuery("field2", 42L))); + assertFalse( + new DocValuesNumbersQuery("field", 17L, 42L) + .equals(new DocValuesNumbersQuery("field", 17L, 32416190071L))); } public void testDuelTermsQuery() throws IOException { @@ -71,7 +75,7 @@ public class TestDocValuesNumbersQuery extends LuceneTestCase { doc.add(new StringField("text", number.toString(), Store.NO)); doc.add(new NumericDocValuesField("long", number)); doc.add(new SortedNumericDocValuesField("twolongs", number)); - doc.add(new SortedNumericDocValuesField("twolongs", number*2)); + doc.add(new SortedNumericDocValuesField("twolongs", number * 2)); iw.addDocument(doc); } if (numNumbers > 1 && random().nextBoolean()) { @@ -90,13 +94,14 @@ public class TestDocValuesNumbersQuery extends LuceneTestCase { for (int i = 0; i < 100; ++i) { final float boost = random().nextFloat() * 10; - final int numQueryNumbers = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8)); + final int numQueryNumbers = + TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8)); Set queryNumbers = new HashSet<>(); Set queryNumbersX2 = new HashSet<>(); for (int j = 0; j < numQueryNumbers; ++j) { Long number = allNumbers.get(random().nextInt(allNumbers.size())); queryNumbers.add(number); - queryNumbersX2.add(2*number); + queryNumbersX2.add(2 * number); } final BooleanQuery.Builder bq = new BooleanQuery.Builder(); for (Long number : queryNumbers) { @@ -153,7 +158,8 @@ public class TestDocValuesNumbersQuery extends LuceneTestCase { for (int i = 0; i < 100; ++i) { final float boost = random().nextFloat() * 10; - final int numQueryNumbers = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8)); + final int numQueryNumbers = + TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8)); Set queryNumbers = new HashSet<>(); for (int j = 0; j < numQueryNumbers; ++j) { queryNumbers.add(allNumbers.get(random().nextInt(allNumbers.size()))); @@ -181,7 +187,8 @@ public class TestDocValuesNumbersQuery extends LuceneTestCase { } } - private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException { + private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) + throws IOException { final int maxDoc = searcher.getIndexReader().maxDoc(); final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER); final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestDocValuesTermsQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestDocValuesTermsQuery.java index 9cfbcc25037..24a5faca865 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestDocValuesTermsQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestDocValuesTermsQuery.java @@ -19,7 +19,6 @@ package org.apache.lucene.sandbox.search; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.SortedDocValuesField; @@ -27,7 +26,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.sandbox.search.DocValuesTermsQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -47,10 +45,14 @@ public class TestDocValuesTermsQuery extends LuceneTestCase { public void testEquals() { assertEquals(new DocValuesTermsQuery("foo", "bar"), new DocValuesTermsQuery("foo", "bar")); - assertEquals(new DocValuesTermsQuery("foo", "bar"), new DocValuesTermsQuery("foo", "bar", "bar")); - assertEquals(new DocValuesTermsQuery("foo", "bar", "baz"), new DocValuesTermsQuery("foo", "baz", "bar")); - assertFalse(new DocValuesTermsQuery("foo", "bar").equals(new DocValuesTermsQuery("foo2", "bar"))); - assertFalse(new DocValuesTermsQuery("foo", "bar").equals(new DocValuesTermsQuery("foo", "baz"))); + assertEquals( + new DocValuesTermsQuery("foo", "bar"), new DocValuesTermsQuery("foo", "bar", "bar")); + assertEquals( + new DocValuesTermsQuery("foo", "bar", "baz"), new DocValuesTermsQuery("foo", "baz", "bar")); + assertFalse( + new DocValuesTermsQuery("foo", "bar").equals(new DocValuesTermsQuery("foo2", "bar"))); + assertFalse( + new DocValuesTermsQuery("foo", "bar").equals(new DocValuesTermsQuery("foo", "baz"))); } public void testDuelTermsQuery() throws IOException { @@ -88,7 +90,8 @@ public class TestDocValuesTermsQuery extends LuceneTestCase { for (int i = 0; i < 100; ++i) { final float boost = random().nextFloat() * 10; - final int numQueryTerms = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8)); + final int numQueryTerms = + TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8)); List queryTerms = new ArrayList<>(); for (int j = 0; j < numQueryTerms; ++j) { queryTerms.add(allTerms.get(random().nextInt(allTerms.size()))); @@ -102,7 +105,8 @@ public class TestDocValuesTermsQuery extends LuceneTestCase { for (Term term : queryTerms) { bytesTerms.add(term.text()); } - final Query q2 = new BoostQuery(new DocValuesTermsQuery("f", bytesTerms.toArray(new String[0])), boost); + final Query q2 = + new BoostQuery(new DocValuesTermsQuery("f", bytesTerms.toArray(new String[0])), boost); assertSameMatches(searcher, q1, q2, true); } @@ -146,7 +150,8 @@ public class TestDocValuesTermsQuery extends LuceneTestCase { for (int i = 0; i < 100; ++i) { final float boost = random().nextFloat() * 10; - final int numQueryTerms = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8)); + final int numQueryTerms = + TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8)); List queryTerms = new ArrayList<>(); for (int j = 0; j < numQueryTerms; ++j) { queryTerms.add(allTerms.get(random().nextInt(allTerms.size()))); @@ -160,7 +165,8 @@ public class TestDocValuesTermsQuery extends LuceneTestCase { for (Term term : queryTerms) { bytesTerms.add(term.text()); } - final Query q2 = new BoostQuery(new DocValuesTermsQuery("f", bytesTerms.toArray(new String[0])), boost); + final Query q2 = + new BoostQuery(new DocValuesTermsQuery("f", bytesTerms.toArray(new String[0])), boost); BooleanQuery.Builder bq1 = new BooleanQuery.Builder(); bq1.add(q1, Occur.MUST); @@ -178,7 +184,8 @@ public class TestDocValuesTermsQuery extends LuceneTestCase { } } - private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException { + private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) + throws IOException { final int maxDoc = searcher.getIndexReader().maxDoc(); final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER); final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestFieldCacheTermsFilter.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestFieldCacheTermsFilter.java index 6a9663eb8dd..548fd3cc445 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestFieldCacheTermsFilter.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestFieldCacheTermsFilter.java @@ -16,20 +16,18 @@ */ package org.apache.lucene.sandbox.search; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.SortedDocValuesField; -import org.apache.lucene.sandbox.search.DocValuesTermsQuery; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.store.Directory; - import java.util.ArrayList; import java.util.List; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.LuceneTestCase; /** * A basic unit test for FieldCacheTermsFilter @@ -43,7 +41,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), rd); for (int i = 0; i < 100; i++) { Document doc = new Document(); - int term = i * 10; //terms are units of 10; + int term = i * 10; // terms are units of 10; doc.add(newStringField(fieldName, "" + term, Field.Store.YES)); doc.add(new SortedDocValuesField(fieldName, new BytesRef("" + term))); w.addDocument(doc); @@ -57,18 +55,24 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase { List terms = new ArrayList<>(); terms.add("5"); - results = searcher.search(new DocValuesTermsQuery(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs; + results = + searcher.search(new DocValuesTermsQuery(fieldName, terms.toArray(new String[0])), numDocs) + .scoreDocs; assertEquals("Must match nothing", 0, results.length); terms = new ArrayList<>(); terms.add("10"); - results = searcher.search(new DocValuesTermsQuery(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs; + results = + searcher.search(new DocValuesTermsQuery(fieldName, terms.toArray(new String[0])), numDocs) + .scoreDocs; assertEquals("Must match 1", 1, results.length); terms = new ArrayList<>(); terms.add("10"); terms.add("20"); - results = searcher.search(new DocValuesTermsQuery(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs; + results = + searcher.search(new DocValuesTermsQuery(fieldName, terms.toArray(new String[0])), numDocs) + .scoreDocs; assertEquals("Must match 2", 2, results.length); reader.close(); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestIndexSortSortedNumericDocValuesRangeQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestIndexSortSortedNumericDocValuesRangeQuery.java index 015549a2c11..885e589b0e1 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestIndexSortSortedNumericDocValuesRangeQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestIndexSortSortedNumericDocValuesRangeQuery.java @@ -16,8 +16,9 @@ */ package org.apache.lucene.sandbox.search; -import java.io.IOException; +import static org.hamcrest.CoreMatchers.instanceOf; +import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.LongPoint; @@ -28,7 +29,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.sandbox.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; @@ -47,8 +47,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -import static org.hamcrest.CoreMatchers.instanceOf; - public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCase { public void testSameHitsAsPointRangeQuery() throws IOException { @@ -83,8 +81,10 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas iw.close(); for (int i = 0; i < 100; ++i) { - final long min = random().nextBoolean() ? Long.MIN_VALUE : TestUtil.nextLong(random(), -100, 10000); - final long max = random().nextBoolean() ? Long.MAX_VALUE : TestUtil.nextLong(random(), -100, 10000); + final long min = + random().nextBoolean() ? Long.MIN_VALUE : TestUtil.nextLong(random(), -100, 10000); + final long max = + random().nextBoolean() ? Long.MAX_VALUE : TestUtil.nextLong(random(), -100, 10000); final Query q1 = LongPoint.newRangeQuery("idx", min, max); final Query q2 = createQuery("dv", min, max); assertSameHits(searcher, q1, q2, false); @@ -95,7 +95,8 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas } } - private void assertSameHits(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException { + private void assertSameHits(IndexSearcher searcher, Query q1, Query q2, boolean scores) + throws IOException { final int maxDoc = searcher.getIndexReader().maxDoc(); final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER); final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER); @@ -122,11 +123,17 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas assertEquals("[3 TO 5]", q1.toString("foo")); assertEquals("foo:[3 TO 5]", q1.toString("bar")); - Query q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true); + Query q2 = + SortedSetDocValuesField.newSlowRangeQuery( + "foo", new BytesRef("bar"), new BytesRef("baz"), true, true); assertEquals("foo:[[62 61 72] TO [62 61 7a]]", q2.toString()); - q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), false, true); + q2 = + SortedSetDocValuesField.newSlowRangeQuery( + "foo", new BytesRef("bar"), new BytesRef("baz"), false, true); assertEquals("foo:{[62 61 72] TO [62 61 7a]]", q2.toString()); - q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), false, false); + q2 = + SortedSetDocValuesField.newSlowRangeQuery( + "foo", new BytesRef("bar"), new BytesRef("baz"), false, false); assertEquals("foo:{[62 61 72] TO [62 61 7a]}", q2.toString()); q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("bar"), null, true, true); assertEquals("foo:[[62 61 72] TO *}", q2.toString()); @@ -265,7 +272,7 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas testIndexSortDocValuesWithSingleValue(true); } - private void testIndexSortDocValuesWithSingleValue(boolean reverse) throws IOException{ + private void testIndexSortDocValuesWithSingleValue(boolean reverse) throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); @@ -364,7 +371,8 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas assertNotEquals(query, rewrittenQuery); assertThat(rewrittenQuery, instanceOf(IndexSortSortedNumericDocValuesRangeQuery.class)); - IndexSortSortedNumericDocValuesRangeQuery rangeQuery = (IndexSortSortedNumericDocValuesRangeQuery) rewrittenQuery; + IndexSortSortedNumericDocValuesRangeQuery rangeQuery = + (IndexSortSortedNumericDocValuesRangeQuery) rewrittenQuery; assertEquals(new MatchNoDocsQuery(), rangeQuery.getFallbackQuery()); writer.close(); @@ -372,9 +380,7 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas dir.close(); } - /** - * Test that the index sort optimization not activated if there is no index sort. - */ + /** Test that the index sort optimization not activated if there is no index sort. */ public void testNoIndexSort() throws Exception { Directory dir = newDirectory(); @@ -387,10 +393,7 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas dir.close(); } - /** - * Test that the index sort optimization is not activated when the sort is - * on the wrong field. - */ + /** Test that the index sort optimization is not activated when the sort is on the wrong field. */ public void testIndexSortOnWrongField() throws Exception { Directory dir = newDirectory(); @@ -408,8 +411,8 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas } /** - * Test that the index sort optimization is not activated when some documents - * have multiple values. + * Test that the index sort optimization is not activated when some documents have multiple + * values. */ public void testMultiDocValues() throws Exception { Directory dir = newDirectory(); @@ -454,7 +457,9 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas } private Query createQuery(String field, long lowerValue, long upperValue) { - Query fallbackQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, lowerValue, upperValue); - return new IndexSortSortedNumericDocValuesRangeQuery(field, lowerValue, upperValue, fallbackQuery); + Query fallbackQuery = + SortedNumericDocValuesField.newSlowRangeQuery(field, lowerValue, upperValue); + return new IndexSortSortedNumericDocValuesRangeQuery( + field, lowerValue, upperValue, fallbackQuery); } } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestLargeNumHitsTopDocsCollector.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestLargeNumHitsTopDocsCollector.java index 12e2d705cb5..3dcd17057c4 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestLargeNumHitsTopDocsCollector.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestLargeNumHitsTopDocsCollector.java @@ -18,13 +18,11 @@ package org.apache.lucene.sandbox.search; import java.io.IOException; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.sandbox.search.LargeNumHitsTopDocsCollector; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.CheckHits; @@ -41,10 +39,11 @@ import org.apache.lucene.util.LuceneTestCase; public class TestLargeNumHitsTopDocsCollector extends LuceneTestCase { private Directory dir; private IndexReader reader; - private final Query testQuery = new BooleanQuery.Builder() - .add(new TermQuery(new Term("field", "5")), BooleanClause.Occur.SHOULD) - .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD) - .build(); + private final Query testQuery = + new BooleanQuery.Builder() + .add(new TermQuery(new Term("field", "5")), BooleanClause.Occur.SHOULD) + .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD) + .build(); @Override public void setUp() throws Exception { @@ -68,6 +67,7 @@ public class TestLargeNumHitsTopDocsCollector extends LuceneTestCase { dir = null; super.tearDown(); } + public void testRequestMoreHitsThanCollected() throws Exception { runNumHits(150); } @@ -83,16 +83,20 @@ public class TestLargeNumHitsTopDocsCollector extends LuceneTestCase { public void testIllegalArguments() throws IOException { IndexSearcher searcher = newSearcher(reader); LargeNumHitsTopDocsCollector largeCollector = new LargeNumHitsTopDocsCollector(15); - TopScoreDocCollector regularCollector = TopScoreDocCollector.create(15, null, Integer.MAX_VALUE); + TopScoreDocCollector regularCollector = + TopScoreDocCollector.create(15, null, Integer.MAX_VALUE); searcher.search(testQuery, largeCollector); searcher.search(testQuery, regularCollector); assertEquals(largeCollector.totalHits, regularCollector.getTotalHits()); - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - largeCollector.topDocs(350_000); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + largeCollector.topDocs(350_000); + }); assertTrue(expected.getMessage().contains("Incorrect number of hits requested")); } @@ -100,7 +104,8 @@ public class TestLargeNumHitsTopDocsCollector extends LuceneTestCase { public void testNoPQBuild() throws IOException { IndexSearcher searcher = newSearcher(reader); LargeNumHitsTopDocsCollector largeCollector = new LargeNumHitsTopDocsCollector(250_000); - TopScoreDocCollector regularCollector = TopScoreDocCollector.create(250_000, null, Integer.MAX_VALUE); + TopScoreDocCollector regularCollector = + TopScoreDocCollector.create(250_000, null, Integer.MAX_VALUE); searcher.search(testQuery, largeCollector); searcher.search(testQuery, regularCollector); @@ -114,7 +119,8 @@ public class TestLargeNumHitsTopDocsCollector extends LuceneTestCase { public void testPQBuild() throws IOException { IndexSearcher searcher = newSearcher(reader); LargeNumHitsTopDocsCollector largeCollector = new LargeNumHitsTopDocsCollector(50); - TopScoreDocCollector regularCollector = TopScoreDocCollector.create(50, null, Integer.MAX_VALUE); + TopScoreDocCollector regularCollector = + TopScoreDocCollector.create(50, null, Integer.MAX_VALUE); searcher.search(testQuery, largeCollector); searcher.search(testQuery, regularCollector); @@ -128,7 +134,8 @@ public class TestLargeNumHitsTopDocsCollector extends LuceneTestCase { public void testNoPQHitsOrder() throws IOException { IndexSearcher searcher = newSearcher(reader); LargeNumHitsTopDocsCollector largeCollector = new LargeNumHitsTopDocsCollector(250_000); - TopScoreDocCollector regularCollector = TopScoreDocCollector.create(250_000, null, Integer.MAX_VALUE); + TopScoreDocCollector regularCollector = + TopScoreDocCollector.create(250_000, null, Integer.MAX_VALUE); searcher.search(testQuery, largeCollector); searcher.search(testQuery, regularCollector); @@ -152,7 +159,8 @@ public class TestLargeNumHitsTopDocsCollector extends LuceneTestCase { private void runNumHits(int numHits) throws IOException { IndexSearcher searcher = newSearcher(reader); LargeNumHitsTopDocsCollector largeCollector = new LargeNumHitsTopDocsCollector(numHits); - TopScoreDocCollector regularCollector = TopScoreDocCollector.create(numHits, null, Integer.MAX_VALUE); + TopScoreDocCollector regularCollector = + TopScoreDocCollector.create(numHits, null, Integer.MAX_VALUE); searcher.search(testQuery, largeCollector); searcher.search(testQuery, regularCollector); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestLatLonBoundingBoxQueries.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestLatLonBoundingBoxQueries.java index ade7a769a0a..d66bbd95b32 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestLatLonBoundingBoxQueries.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestLatLonBoundingBoxQueries.java @@ -16,22 +16,22 @@ */ package org.apache.lucene.sandbox.search; -import org.apache.lucene.document.Document; -import org.apache.lucene.sandbox.document.LatLonBoundingBox; -import org.apache.lucene.geo.GeoTestUtil; -import org.apache.lucene.geo.Rectangle; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.search.BaseRangeFieldQueryTestCase; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.store.Directory; - import static org.apache.lucene.geo.GeoEncodingUtils.decodeLatitude; import static org.apache.lucene.geo.GeoEncodingUtils.decodeLongitude; import static org.apache.lucene.geo.GeoEncodingUtils.encodeLatitude; import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude; +import org.apache.lucene.document.Document; +import org.apache.lucene.geo.GeoTestUtil; +import org.apache.lucene.geo.Rectangle; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.sandbox.document.LatLonBoundingBox; +import org.apache.lucene.search.BaseRangeFieldQueryTestCase; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; + /** Random testing for GeoBoundingBoxField type. */ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { private static final String FIELD_NAME = "geoBoundingBoxField"; @@ -44,7 +44,7 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { @Override protected void addRange(Document doc, Range r) { - GeoBBox b = (GeoBBox)r; + GeoBBox b = (GeoBBox) r; doc.add(new LatLonBoundingBox(FIELD_NAME, b.minLat, b.minLon, b.maxLat, b.maxLon)); } @@ -65,7 +65,8 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { // intersects (contains, crosses) document = new Document(); - document.add(new LatLonBoundingBox(FIELD_NAME, -10.282592503353953d, -1d, 1d, 14.096488952636719d)); + document.add( + new LatLonBoundingBox(FIELD_NAME, -10.282592503353953d, -1d, 1d, 14.096488952636719d)); writer.addDocument(document); // intersects (crosses) @@ -86,14 +87,26 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { // search IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - assertEquals(5, searcher.count(LatLonBoundingBox.newIntersectsQuery(FIELD_NAME, - -10.282592503353953d, 0.0d, 0.0d, 14.096488952636719d))); - assertEquals(1, searcher.count(LatLonBoundingBox.newWithinQuery(FIELD_NAME, - -10.282592503353953d, 0.0d, 0.0d, 14.096488952636719d))); - assertEquals(1, searcher.count(LatLonBoundingBox.newContainsQuery(FIELD_NAME, - -10.282592503353953d, 0.0d, 0.0d, 14.096488952636719d))); - assertEquals(4, searcher.count(LatLonBoundingBox.newCrossesQuery(FIELD_NAME, - -10.282592503353953d, 0.0d, 0.0d, 14.096488952636719d))); + assertEquals( + 5, + searcher.count( + LatLonBoundingBox.newIntersectsQuery( + FIELD_NAME, -10.282592503353953d, 0.0d, 0.0d, 14.096488952636719d))); + assertEquals( + 1, + searcher.count( + LatLonBoundingBox.newWithinQuery( + FIELD_NAME, -10.282592503353953d, 0.0d, 0.0d, 14.096488952636719d))); + assertEquals( + 1, + searcher.count( + LatLonBoundingBox.newContainsQuery( + FIELD_NAME, -10.282592503353953d, 0.0d, 0.0d, 14.096488952636719d))); + assertEquals( + 4, + searcher.count( + LatLonBoundingBox.newCrossesQuery( + FIELD_NAME, -10.282592503353953d, 0.0d, 0.0d, 14.096488952636719d))); reader.close(); writer.close(); @@ -102,7 +115,8 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { public void testToString() { LatLonBoundingBox field = new LatLonBoundingBox(FIELD_NAME, -20d, -180d, 20d, -100d); - String expected = "LatLonBoundingBox "; + String expected = + "LatLonBoundingBox "; assertEquals(expected, field.toString()); } @@ -119,25 +133,25 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { @Override protected Query newIntersectsQuery(Range r) { - GeoBBox b = (GeoBBox)r; + GeoBBox b = (GeoBBox) r; return LatLonBoundingBox.newIntersectsQuery(FIELD_NAME, b.minLat, b.minLon, b.maxLat, b.maxLon); } @Override protected Query newContainsQuery(Range r) { - GeoBBox b = (GeoBBox)r; + GeoBBox b = (GeoBBox) r; return LatLonBoundingBox.newContainsQuery(FIELD_NAME, b.minLat, b.minLon, b.maxLat, b.maxLon); } @Override protected Query newWithinQuery(Range r) { - GeoBBox b = (GeoBBox)r; + GeoBBox b = (GeoBBox) r; return LatLonBoundingBox.newWithinQuery(FIELD_NAME, b.minLat, b.minLon, b.maxLat, b.maxLon); } @Override protected Query newCrossesQuery(Range r) { - GeoBBox b = (GeoBBox)r; + GeoBBox b = (GeoBBox) r; return LatLonBoundingBox.newCrossesQuery(FIELD_NAME, b.minLat, b.minLon, b.maxLat, b.maxLon); } @@ -153,16 +167,16 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { maxLat = quantizeLat(box.maxLat); maxLon = quantizeLon(box.maxLon); -// minLat = quantizeLat(Math.min(box.minLat, box.maxLat)); -// minLon = quantizeLon(Math.max(box.minLat, box.maxLat)); -// maxLat = quantizeLat(box.maxLat); -// maxLon = quantizeLon(box.maxLon); + // minLat = quantizeLat(Math.min(box.minLat, box.maxLat)); + // minLon = quantizeLon(Math.max(box.minLat, box.maxLat)); + // maxLat = quantizeLat(box.maxLat); + // maxLon = quantizeLon(box.maxLon); -// if (maxLon == -180d) { -// // index and search handle this fine, but the test validator -// // struggles when maxLon == -180; so lets correct -// maxLon = 180d; -// } + // if (maxLon == -180d) { + // // index and search handle this fine, but the test validator + // // struggles when maxLon == -180; so lets correct + // maxLon = 180d; + // } } protected static double quantizeLat(double lat) { @@ -191,9 +205,9 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { @Override protected void setMin(int dim, Object val) { if (dim == 0) { - setMinLat((Double)val); + setMinLat((Double) val); } else if (dim == 1) { - setMinLon((Double)val); + setMinLon((Double) val); } else { throw new IndexOutOfBoundsException("dimension " + dim + " is greater than " + dimension); } @@ -248,9 +262,9 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { @Override protected void setMax(int dim, Object val) { if (dim == 0) { - setMaxLat((Double)val); + setMaxLat((Double) val); } else if (dim == 1) { - setMaxLon((Double)val); + setMaxLon((Double) val); } else { throw new IndexOutOfBoundsException("dimension " + dim + " is greater than " + dimension); } @@ -258,7 +272,7 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { @Override protected boolean isEqual(Range other) { - GeoBBox o = (GeoBBox)other; + GeoBBox o = (GeoBBox) other; if (this.dimension != o.dimension) return false; if (this.minLat != o.minLat) return false; if (this.minLon != o.minLon) return false; @@ -269,7 +283,7 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { @Override protected boolean isDisjoint(Range other) { - GeoBBox o = (GeoBBox)other; + GeoBBox o = (GeoBBox) other; if (minLat > o.maxLat || maxLat < o.minLat) return true; if (minLon > o.maxLon || maxLon < o.minLon) return true; return false; @@ -277,13 +291,13 @@ public class TestLatLonBoundingBoxQueries extends BaseRangeFieldQueryTestCase { @Override protected boolean isWithin(Range other) { - GeoBBox o = (GeoBBox)other; + GeoBBox o = (GeoBBox) other; return o.contains(this); } @Override protected boolean contains(Range other) { - GeoBBox o = (GeoBBox)other; + GeoBBox o = (GeoBBox) other; if (minLat > o.minLat || maxLat < o.maxLat) return false; if (minLon > o.minLon || maxLon < o.maxLon) return false; return true; diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestMultiRangeQueries.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestMultiRangeQueries.java index a73d6d2d0ec..e7498ff2ac7 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestMultiRangeQueries.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestMultiRangeQueries.java @@ -18,18 +18,17 @@ package org.apache.lucene.sandbox.search; import java.io.IOException; - import org.apache.lucene.document.Document; import org.apache.lucene.document.DoublePoint; -import org.apache.lucene.sandbox.document.DoublePointMultiRangeBuilder; import org.apache.lucene.document.FloatPoint; -import org.apache.lucene.sandbox.document.FloatPointMultiRangeBuilder; import org.apache.lucene.document.IntPoint; -import org.apache.lucene.sandbox.document.IntPointMultiRangeBuilder; import org.apache.lucene.document.LongPoint; -import org.apache.lucene.sandbox.document.LongPointMultiRangeBuilder; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.sandbox.document.DoublePointMultiRangeBuilder; +import org.apache.lucene.sandbox.document.FloatPointMultiRangeBuilder; +import org.apache.lucene.sandbox.document.IntPointMultiRangeBuilder; +import org.apache.lucene.sandbox.document.LongPointMultiRangeBuilder; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -54,7 +53,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); DoublePointMultiRangeBuilder builder = new DoublePointMultiRangeBuilder("point", numDims); - for (int j = 0;j < numVals; j++) { + for (int j = 0; j < numVals; j++) { double[] lowerBound = new double[numDims]; double[] upperBound = new double[numDims]; for (int i = 0; i < numDims; ++i) { @@ -89,7 +88,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { iw.commit(); // One range matches - double[] firstLowerRange= {111.3, 294.2, 502.8}; + double[] firstLowerRange = {111.3, 294.2, 502.8}; double[] firstUpperRange = {117.3, 301.4, 514.5}; double[] secondLowerRange = {15.3, 4.5, 415.7}; @@ -109,7 +108,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { assertEquals(searcher.count(query), 1); // Both ranges match - double[] firstMatchingLowerRange= {111.3, 294.2, 502.4}; + double[] firstMatchingLowerRange = {111.3, 294.2, 502.4}; double[] firstMatchingUpperRange = {117.6, 301.8, 514.2}; double[] secondMatchingLowerRange = {212.4, 512.3, 415.7}; @@ -140,7 +139,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { assertEquals(searcher.count(query), 0); // Lower point is equal to a point - double[] firstEqualLowerRange= {112.4, 296.2, 512.7}; + double[] firstEqualLowerRange = {112.4, 296.2, 512.7}; double[] firstEqualUpperRange = {117.6, 301.8, 514.2}; double[] secondEqualLowerRange = {219.3, 514.7, 624.2}; @@ -175,7 +174,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); LongPointMultiRangeBuilder builder = new LongPointMultiRangeBuilder("point", numDims); - for (int j = 0;j < numVals; j++) { + for (int j = 0; j < numVals; j++) { long[] lowerBound = new long[numDims]; long[] upperBound = new long[numDims]; for (int i = 0; i < numDims; ++i) { @@ -210,7 +209,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { iw.commit(); // One range matches - long[] firstLowerRange= {111, 294, 502}; + long[] firstLowerRange = {111, 294, 502}; long[] firstUpperRange = {117, 301, 514}; long[] secondLowerRange = {15, 4, 415}; @@ -230,13 +229,12 @@ public class TestMultiRangeQueries extends LuceneTestCase { assertEquals(searcher.count(query), 1); // Both ranges match - long[] firstMatchingLowerRange= {111, 294, 502}; + long[] firstMatchingLowerRange = {111, 294, 502}; long[] firstMatchingUpperRange = {117, 301, 514}; long[] secondMatchingLowerRange = {212, 512, 415}; long[] secondMatchingUpperRange = {228, 538, 647}; - LongPointMultiRangeBuilder builder2 = new LongPointMultiRangeBuilder("point", 3); builder2.add(firstMatchingLowerRange, firstMatchingUpperRange); @@ -262,7 +260,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { assertEquals(searcher.count(query), 0); // Lower point is equal to a point - long[] firstEqualsLowerPoint= {112, 296, 512}; + long[] firstEqualsLowerPoint = {112, 296, 512}; long[] firstEqualsUpperPoint = {219, 514, 624}; long[] secondEqualsLowerPoint = {11246, 19388, 21248}; @@ -297,7 +295,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); FloatPointMultiRangeBuilder builder = new FloatPointMultiRangeBuilder("point", numDims); - for (int j = 0;j < numVals; j++) { + for (int j = 0; j < numVals; j++) { float[] lowerBound = new float[numDims]; float[] upperBound = new float[numDims]; for (int i = 0; i < numDims; ++i) { @@ -332,7 +330,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { iw.commit(); // One range matches - float[] firstLowerRange= {111.3f, 294.7f, 502.1f}; + float[] firstLowerRange = {111.3f, 294.7f, 502.1f}; float[] firstUpperRange = {117.2f, 301.6f, 514.3f}; float[] secondLowerRange = {15.2f, 4.3f, 415.2f}; @@ -352,7 +350,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { assertEquals(searcher.count(query), 1); // Both ranges match - float[] firstMatchingLowerRange= {111f, 294f, 502f}; + float[] firstMatchingLowerRange = {111f, 294f, 502f}; float[] firstMatchingUpperRange = {117f, 301f, 514f}; float[] secondMatchingLowerRange = {212f, 512f, 415f}; @@ -383,7 +381,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { assertEquals(searcher.count(query), 0); // Lower point is equal to a point - float[] firstEqualsLowerPoint= {112.4f, 296.3f, 512.1f}; + float[] firstEqualsLowerPoint = {112.4f, 296.3f, 512.1f}; float[] firstEqualsUpperPoint = {117.3f, 299.4f, 519.3f}; float[] secondEqualsLowerPoint = {219.7f, 514.2f, 624.6f}; @@ -418,7 +416,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); IntPointMultiRangeBuilder builder = new IntPointMultiRangeBuilder("point", numDims); - for (int j = 0;j < numVals; j++) { + for (int j = 0; j < numVals; j++) { int[] lowerBound = new int[numDims]; int[] upperBound = new int[numDims]; for (int i = 0; i < numDims; ++i) { @@ -453,7 +451,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { iw.commit(); // One range matches - int[] firstLowerRange= {111, 294, 502}; + int[] firstLowerRange = {111, 294, 502}; int[] firstUpperRange = {117, 301, 514}; int[] secondLowerRange = {15, 4, 415}; @@ -473,13 +471,12 @@ public class TestMultiRangeQueries extends LuceneTestCase { assertEquals(searcher.count(query), 1); // Both ranges match - int[] firstMatchingLowerRange= {111, 294, 502}; + int[] firstMatchingLowerRange = {111, 294, 502}; int[] firstMatchingUpperRange = {117, 301, 514}; int[] secondMatchingLowerRange = {212, 512, 415}; int[] secondMatchingUpperRange = {228, 538, 647}; - IntPointMultiRangeBuilder builder2 = new IntPointMultiRangeBuilder("point", 3); builder2.add(firstMatchingLowerRange, firstMatchingUpperRange); @@ -505,7 +502,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { assertEquals(searcher.count(query), 0); // None match - int[] firstEqualsPointLower= {112, 296, 512}; + int[] firstEqualsPointLower = {112, 296, 512}; int[] firstEqualsPointUpper = {117, 299, 517}; int[] secondEqualsPointLower = {219, 514, 624}; @@ -525,7 +522,7 @@ public class TestMultiRangeQueries extends LuceneTestCase { } public void testToString() { - double[] firstDoubleLowerRange= {111, 294.3, 502.4}; + double[] firstDoubleLowerRange = {111, 294.3, 502.4}; double[] firstDoubleUpperRange = {117.3, 301.8, 514.3}; double[] secondDoubleLowerRange = {15.3, 412.8, 415.1}; @@ -538,10 +535,11 @@ public class TestMultiRangeQueries extends LuceneTestCase { Query query = stringTestbuilder.build(); - assertEquals("point:{[111.0 TO 117.3],[294.3 TO 301.8],[502.4 TO 514.3]},{[15.3 TO 200.4],[412.8 TO 567.4],[415.1 TO 642.2]}", + assertEquals( + "point:{[111.0 TO 117.3],[294.3 TO 301.8],[502.4 TO 514.3]},{[15.3 TO 200.4],[412.8 TO 567.4],[415.1 TO 642.2]}", query.toString()); - long[] firstLongLowerRange= {111, 294, 502}; + long[] firstLongLowerRange = {111, 294, 502}; long[] firstLongUpperRange = {117, 301, 514}; long[] secondLongLowerRange = {15, 412, 415}; @@ -554,26 +552,29 @@ public class TestMultiRangeQueries extends LuceneTestCase { query = stringLongTestbuilder.build(); - assertEquals("point:{[111 TO 117],[294 TO 301],[502 TO 514]},{[15 TO 200],[412 TO 567],[415 TO 642]}", + assertEquals( + "point:{[111 TO 117],[294 TO 301],[502 TO 514]},{[15 TO 200],[412 TO 567],[415 TO 642]}", query.toString()); - float[] firstFloatLowerRange= {111.3f, 294.4f, 502.2f}; + float[] firstFloatLowerRange = {111.3f, 294.4f, 502.2f}; float[] firstFloatUpperRange = {117.7f, 301.2f, 514.4f}; float[] secondFloatLowerRange = {15.3f, 412.2f, 415.9f}; float[] secondFloatUpperRange = {200.2f, 567.4f, 642.3f}; - FloatPointMultiRangeBuilder stringFloatTestbuilder = new FloatPointMultiRangeBuilder("point", 3); + FloatPointMultiRangeBuilder stringFloatTestbuilder = + new FloatPointMultiRangeBuilder("point", 3); stringFloatTestbuilder.add(firstFloatLowerRange, firstFloatUpperRange); stringFloatTestbuilder.add(secondFloatLowerRange, secondFloatUpperRange); query = stringFloatTestbuilder.build(); - assertEquals("point:{[111.3 TO 117.7],[294.4 TO 301.2],[502.2 TO 514.4]},{[15.3 TO 200.2],[412.2 TO 567.4],[415.9 TO 642.3]}", + assertEquals( + "point:{[111.3 TO 117.7],[294.4 TO 301.2],[502.2 TO 514.4]},{[15.3 TO 200.2],[412.2 TO 567.4],[415.9 TO 642.3]}", query.toString()); - int[] firstIntLowerRange= {111, 294, 502}; + int[] firstIntLowerRange = {111, 294, 502}; int[] firstIntUpperRange = {117, 301, 514}; int[] secondIntLowerRange = {15, 412, 415}; @@ -586,7 +587,8 @@ public class TestMultiRangeQueries extends LuceneTestCase { query = stringIntTestbuilder.build(); - assertEquals("point:{[111 TO 117],[294 TO 301],[502 TO 514]},{[15 TO 200],[412 TO 567],[415 TO 642]}", + assertEquals( + "point:{[111 TO 117],[294 TO 301],[502 TO 514]},{[15 TO 200],[412 TO 567],[415 TO 642]}", query.toString()); } } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestNearest.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestNearest.java index f64c15d7826..2a16dc91818 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestNearest.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestNearest.java @@ -18,7 +18,6 @@ package org.apache.lucene.sandbox.search; import java.util.Arrays; import java.util.Comparator; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.LatLonDocValuesField; @@ -33,7 +32,6 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.index.Term; -import org.apache.lucene.sandbox.search.LatLonPointPrototypeQueries; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -44,7 +42,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.SloppyMath; import org.apache.lucene.util.TestUtil; - + public class TestNearest extends LuceneTestCase { public void testNearestNeighborWithDeletedDocs() throws Exception { @@ -61,15 +59,18 @@ public class TestNearest extends LuceneTestCase { w.addDocument(doc); DirectoryReader r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: IndexSearcher s = newSearcher(r, false); - FieldDoc hit = (FieldDoc) LatLonPointPrototypeQueries.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; + FieldDoc hit = + (FieldDoc) LatLonPointPrototypeQueries.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; assertEquals("0", r.document(hit.doc).getField("id").stringValue()); r.close(); w.deleteDocuments(new Term("id", "0")); r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: s = newSearcher(r, false); hit = (FieldDoc) LatLonPointPrototypeQueries.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; assertEquals("1", r.document(hit.doc).getField("id").stringValue()); @@ -91,18 +92,22 @@ public class TestNearest extends LuceneTestCase { w.addDocument(doc); DirectoryReader r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: IndexSearcher s = newSearcher(r, false); - FieldDoc hit = (FieldDoc) LatLonPointPrototypeQueries.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; + FieldDoc hit = + (FieldDoc) LatLonPointPrototypeQueries.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; assertEquals("0", r.document(hit.doc).getField("id").stringValue()); r.close(); w.deleteDocuments(new Term("id", "0")); w.deleteDocuments(new Term("id", "1")); r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: s = newSearcher(r, false); - assertEquals(0, LatLonPointPrototypeQueries.nearest(s, "point", 40.0, 50.0, 1).scoreDocs.length); + assertEquals( + 0, LatLonPointPrototypeQueries.nearest(s, "point", 40.0, 50.0, 1).scoreDocs.length); r.close(); w.close(); dir.close(); @@ -121,8 +126,11 @@ public class TestNearest extends LuceneTestCase { w.addDocument(doc); DirectoryReader r = DirectoryReader.open(w); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: - ScoreDoc[] hits = LatLonPointPrototypeQueries.nearest(newSearcher(r, false), "point", 45.0, 50.0, 2).scoreDocs; + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: + ScoreDoc[] hits = + LatLonPointPrototypeQueries.nearest(newSearcher(r, false), "point", 45.0, 50.0, 2) + .scoreDocs; assertEquals("0", r.document(hits[0].doc).getField("id").stringValue()); assertEquals("1", r.document(hits[1].doc).getField("id").stringValue()); @@ -135,8 +143,13 @@ public class TestNearest extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, getIndexWriterConfig()); DirectoryReader r = w.getReader(); - // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl: - assertEquals(0, LatLonPointPrototypeQueries.nearest(newSearcher(r, false), "point", 40.0, 50.0, 1).scoreDocs.length); + // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps + // with its own points impl: + assertEquals( + 0, + LatLonPointPrototypeQueries.nearest(newSearcher(r, false), "point", 40.0, 50.0, 1) + .scoreDocs + .length); r.close(); w.close(); dir.close(); @@ -151,7 +164,7 @@ public class TestNearest extends LuceneTestCase { } public void testNearestNeighborRandom() throws Exception { - + int numPoints = atLeast(1000); Directory dir; if (numPoints > 100000) { @@ -166,7 +179,7 @@ public class TestNearest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); iwc.setMergeScheduler(new SerialMergeScheduler()); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - for(int id=0;id - * The main goal of this class is to verify that {@link PhraseWildcardQuery} - * has the same ranking and same scoring than both {@link MultiPhraseQuery} - * and {@link SpanNearQuery}. - *

    - * Note that the ranking and scoring are equal if the segment optimization - * is disabled, otherwise it may change the score, but the ranking is most - * often the same. + * + *

    The main goal of this class is to verify that {@link PhraseWildcardQuery} has the same ranking + * and same scoring than both {@link MultiPhraseQuery} and {@link SpanNearQuery}. + * + *

    Note that the ranking and scoring are equal if the segment optimization is disabled, otherwise + * it may change the score, but the ranking is most often the same. */ public class TestPhraseWildcardQuery extends LuceneTestCase { @@ -81,10 +77,14 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random(), directory, - newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)); // do not accidentally merge - // the two segments we create - // here + RandomIndexWriter iw = + new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig() + .setMergePolicy(NoMergePolicy.INSTANCE)); // do not accidentally merge + // the two segments we create + // here iw.setDoRandomForceMerge(false); // Keep the segments separated. addSegments(iw); reader = iw.getReader(); @@ -109,10 +109,11 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { searchAndCheckResults(field(1), 100, "e*", "b*"); assertCounters().singleTermAnalysis(0).multiTermAnalysis(2).segmentUse(4).segmentSkip(0); - expectDifferentScoreForSpanNearQueryWithMultiTermSubset(() -> { - searchAndCheckResults(field(2), 100, "tim*", "t*"); - assertCounters().singleTermAnalysis(0).multiTermAnalysis(2).segmentUse(2).segmentSkip(1); - }); + expectDifferentScoreForSpanNearQueryWithMultiTermSubset( + () -> { + searchAndCheckResults(field(2), 100, "tim*", "t*"); + assertCounters().singleTermAnalysis(0).multiTermAnalysis(2).segmentUse(2).segmentSkip(1); + }); } public void testThreeMultiTerms() throws Exception { @@ -122,10 +123,11 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { searchAndCheckResults(field(0), 100, "t?e", "u*", "e*"); assertCounters().singleTermAnalysis(0).multiTermAnalysis(3).segmentUse(4).segmentSkip(1); - expectDifferentScoreForSpanNearQueryWithMultiTermSubset(() -> { - searchAndCheckResults(field(0), 100, "t?e", "b*", "b*"); - assertCounters().singleTermAnalysis(0).multiTermAnalysis(3).segmentUse(4).segmentSkip(1); - }); + expectDifferentScoreForSpanNearQueryWithMultiTermSubset( + () -> { + searchAndCheckResults(field(0), 100, "t?e", "b*", "b*"); + assertCounters().singleTermAnalysis(0).multiTermAnalysis(3).segmentUse(4).segmentSkip(1); + }); } public void testOneSingleTermTwoMultiTerms() throws Exception { @@ -181,7 +183,8 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { assertCounters().multiTermAnalysis(2).segmentUse(4).segmentSkip(0).queryEarlyStop(0); searchAndCheckResults(field(0), 100, 0, true, "t?e", "b*", "e*"); - // "t?e" matches only in the first segment. This term adds 2 segment accesses and 1 segment skip. + // "t?e" matches only in the first segment. This term adds 2 segment accesses and 1 segment + // skip. // The other multi-terms match in the first segment. Each one adds 1 segment access. // So expecting 3 segment accesses and 1 segment skips. assertCounters().multiTermAnalysis(3).segmentUse(4).segmentSkip(1).queryEarlyStop(0); @@ -189,46 +192,77 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { searchAndCheckResults(field(0), 100, 0, true, "t?e", "blind", "e*"); assertCounters().multiTermAnalysis(1).segmentUse(3).segmentSkip(2).queryEarlyStop(1); - expectDifferentScoreForSpanNearQueryWithMultiTermSubset(() -> { - searchAndCheckResults(field(2), 100, 0, true, "tim*", "t*"); - assertCounters().multiTermAnalysis(2).segmentUse(2).segmentSkip(1).queryEarlyStop(0); - }); + expectDifferentScoreForSpanNearQueryWithMultiTermSubset( + () -> { + searchAndCheckResults(field(2), 100, 0, true, "tim*", "t*"); + assertCounters().multiTermAnalysis(2).segmentUse(2).segmentSkip(1).queryEarlyStop(0); + }); } public void testMultiplePhraseWildcards() throws Exception { - searchAndCheckResultsMultiplePhraseWildcards(new String[]{field(1), field(0), field(3)}, 100, 0, new String[][]{ - new String[]{"e*", "b*"}, - new String[]{"t?e", "utopia"} - }); - searchAndCheckResultsMultiplePhraseWildcards(new String[]{field(1), field(0), field(3)}, 100, 0, new String[][]{ - new String[]{"e*", "b*"}, - new String[]{"d*", "b*"} - }); - searchAndCheckResultsMultiplePhraseWildcards(new String[]{field(1), field(0), field(3)}, 100, 0, new String[][]{ - new String[]{"e*", "b*"}, - new String[]{"t?e", "utopia"}, - new String[]{"d*", "b*"} - }); - expectDifferentScoreForSpanNearQueryWithMultiTermSubset(() -> - searchAndCheckResultsMultiplePhraseWildcards(new String[]{field(1), field(0), field(3)}, 100, 0, new String[][]{ - new String[]{"e*", "b*"}, - new String[]{"b*", "b*"} - })); - expectDifferentScoreForSpanNearQueryWithMultiTermSubset(() -> - searchAndCheckResultsMultiplePhraseWildcards(new String[]{field(1), field(0), field(3)}, 100, 0, new String[][]{ - new String[]{"e*", "b*"}, - new String[]{"b*", "b*"}, - new String[]{"t?e", "utopia"} - })); - searchAndCheckResultsMultiplePhraseWildcards(new String[]{field(1), field(0), field(3)}, 100, 0, new String[][]{ - new String[]{"e*", "b*"}, - new String[]{"e*", "b*"} - }); - searchAndCheckResultsMultiplePhraseWildcards(new String[]{field(1), field(0), field(3)}, 100, 0, new String[][]{ - new String[]{"e*", "b*"}, - new String[]{"t?e", "utopia"}, - new String[]{"e*", "b*"} - }); + searchAndCheckResultsMultiplePhraseWildcards( + new String[] {field(1), field(0), field(3)}, + 100, + 0, + new String[][] { + new String[] {"e*", "b*"}, + new String[] {"t?e", "utopia"} + }); + searchAndCheckResultsMultiplePhraseWildcards( + new String[] {field(1), field(0), field(3)}, + 100, + 0, + new String[][] { + new String[] {"e*", "b*"}, + new String[] {"d*", "b*"} + }); + searchAndCheckResultsMultiplePhraseWildcards( + new String[] {field(1), field(0), field(3)}, + 100, + 0, + new String[][] { + new String[] {"e*", "b*"}, + new String[] {"t?e", "utopia"}, + new String[] {"d*", "b*"} + }); + expectDifferentScoreForSpanNearQueryWithMultiTermSubset( + () -> + searchAndCheckResultsMultiplePhraseWildcards( + new String[] {field(1), field(0), field(3)}, + 100, + 0, + new String[][] { + new String[] {"e*", "b*"}, + new String[] {"b*", "b*"} + })); + expectDifferentScoreForSpanNearQueryWithMultiTermSubset( + () -> + searchAndCheckResultsMultiplePhraseWildcards( + new String[] {field(1), field(0), field(3)}, + 100, + 0, + new String[][] { + new String[] {"e*", "b*"}, + new String[] {"b*", "b*"}, + new String[] {"t?e", "utopia"} + })); + searchAndCheckResultsMultiplePhraseWildcards( + new String[] {field(1), field(0), field(3)}, + 100, + 0, + new String[][] { + new String[] {"e*", "b*"}, + new String[] {"e*", "b*"} + }); + searchAndCheckResultsMultiplePhraseWildcards( + new String[] {field(1), field(0), field(3)}, + 100, + 0, + new String[][] { + new String[] {"e*", "b*"}, + new String[] {"t?e", "utopia"}, + new String[] {"e*", "b*"} + }); } public void testToString() { @@ -249,7 +283,8 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { for (ScoreDoc scoreDoc : searcher.search(testQuery, MAX_DOCS).scoreDocs) { Explanation explanation = searcher.explain(testQuery, scoreDoc.doc); assertTrue(explanation.getValue().doubleValue() > 0); - assertTrue("Unexpected explanation \"" + explanation.getDescription() + "\"", + assertTrue( + "Unexpected explanation \"" + explanation.getDescription() + "\"", explanation.getDescription().startsWith("weight(phraseWildcard(title:\"t?e b* b*\")")); } @@ -279,11 +314,12 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { } /** - * With two similar multi-terms which expansions are subsets (e.g. "tim*" and "t*"), - * we expect {@link PhraseWildcardQuery} and {@link MultiPhraseQuery} to - * have the same scores, but {@link SpanNearQuery} scores are different. + * With two similar multi-terms which expansions are subsets (e.g. "tim*" and "t*"), we expect + * {@link PhraseWildcardQuery} and {@link MultiPhraseQuery} to have the same scores, but {@link + * SpanNearQuery} scores are different. */ - protected void expectDifferentScoreForSpanNearQueryWithMultiTermSubset(RunnableWithIOException runnable) throws IOException { + protected void expectDifferentScoreForSpanNearQueryWithMultiTermSubset( + RunnableWithIOException runnable) throws IOException { try { differentScoreExpectedForSpanNearQuery = true; runnable.run(); @@ -293,18 +329,24 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { } /** - * Compares {@link PhraseWildcardQuery} to both {@link MultiPhraseQuery} - * and {@link SpanNearQuery}. + * Compares {@link PhraseWildcardQuery} to both {@link MultiPhraseQuery} and {@link + * SpanNearQuery}. */ - protected void searchAndCheckResults(String field, int maxExpansions, String... terms) throws IOException { + protected void searchAndCheckResults(String field, int maxExpansions, String... terms) + throws IOException { for (int slop = 0; slop <= 1; slop++) { searchAndCheckResults(field, maxExpansions, slop, false, terms); searchAndCheckResults(field, maxExpansions, slop, true, terms); } } - protected void searchAndCheckResults(String field, int maxExpansions, int slop, - boolean segmentOptimizationEnabled, String... terms) throws IOException { + protected void searchAndCheckResults( + String field, + int maxExpansions, + int slop, + boolean segmentOptimizationEnabled, + String... terms) + throws IOException { searchAndCheckSameResults( phraseWildcardQuery(field, maxExpansions, slop, segmentOptimizationEnabled, terms), multiPhraseQuery(field, maxExpansions, slop, terms), @@ -312,44 +354,64 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { segmentOptimizationEnabled); } - protected void searchAndCheckResultsMultiplePhraseWildcards(String[] fields, int maxExpansions, - int slop, String[][] multiPhraseTerms) throws IOException { - searchAndCheckResultsMultiplePhraseWildcards(fields, maxExpansions, slop, false, multiPhraseTerms); - searchAndCheckResultsMultiplePhraseWildcards(fields, maxExpansions, slop, true, multiPhraseTerms); + protected void searchAndCheckResultsMultiplePhraseWildcards( + String[] fields, int maxExpansions, int slop, String[][] multiPhraseTerms) + throws IOException { + searchAndCheckResultsMultiplePhraseWildcards( + fields, maxExpansions, slop, false, multiPhraseTerms); + searchAndCheckResultsMultiplePhraseWildcards( + fields, maxExpansions, slop, true, multiPhraseTerms); } - protected void searchAndCheckResultsMultiplePhraseWildcards(String[] fields, int maxExpansions, int slop, - boolean segmentOptimizationEnabled, String[][] multiPhraseTerms) throws IOException { + protected void searchAndCheckResultsMultiplePhraseWildcards( + String[] fields, + int maxExpansions, + int slop, + boolean segmentOptimizationEnabled, + String[][] multiPhraseTerms) + throws IOException { BooleanQuery.Builder phraseWildcardQueryBuilder = new BooleanQuery.Builder(); BooleanQuery.Builder multiPhraseQueryBuilder = new BooleanQuery.Builder(); BooleanQuery.Builder spanNearQueryBuilder = new BooleanQuery.Builder(); for (String[] terms : multiPhraseTerms) { - BooleanClause.Occur occur = random().nextBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD; - phraseWildcardQueryBuilder.add(disMaxQuery(phraseWildcardQueries(fields, maxExpansions, slop, segmentOptimizationEnabled, terms)), occur); - multiPhraseQueryBuilder.add(disMaxQuery(multiPhraseQueries(fields, maxExpansions, slop, terms)), occur); + BooleanClause.Occur occur = + random().nextBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD; + phraseWildcardQueryBuilder.add( + disMaxQuery( + phraseWildcardQueries( + fields, maxExpansions, slop, segmentOptimizationEnabled, terms)), + occur); + multiPhraseQueryBuilder.add( + disMaxQuery(multiPhraseQueries(fields, maxExpansions, slop, terms)), occur); spanNearQueryBuilder.add(disMaxQuery(spanNearQueries(fields, slop, terms)), occur); } searchAndCheckSameResults( phraseWildcardQueryBuilder.build(), multiPhraseQueryBuilder.build(), spanNearQueryBuilder.build(), - segmentOptimizationEnabled - ); + segmentOptimizationEnabled); } protected Query disMaxQuery(Query... disjuncts) { return new DisjunctionMaxQuery(Arrays.asList(disjuncts), 0.1f); } - protected Query[] phraseWildcardQueries(String[] fields, int maxExpansions, int slop, boolean segmentOptimizationEnabled, String... terms) { + protected Query[] phraseWildcardQueries( + String[] fields, + int maxExpansions, + int slop, + boolean segmentOptimizationEnabled, + String... terms) { Query[] queries = new Query[fields.length]; for (int i = 0; i < fields.length; i++) { - queries[i] = phraseWildcardQuery(fields[i], maxExpansions, slop, segmentOptimizationEnabled, terms); + queries[i] = + phraseWildcardQuery(fields[i], maxExpansions, slop, segmentOptimizationEnabled, terms); } return queries; } - protected Query[] multiPhraseQueries(String[] fields, int maxExpansions, int slop, String... terms) throws IOException { + protected Query[] multiPhraseQueries( + String[] fields, int maxExpansions, int slop, String... terms) throws IOException { Query[] queries = new Query[fields.length]; for (int i = 0; i < fields.length; i++) { queries[i] = multiPhraseQuery(fields[i], maxExpansions, slop, terms); @@ -365,7 +427,12 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { return queries; } - protected void searchAndCheckSameResults(Query testQuery, Query multiPhraseQuery, Query spanNearQuery, boolean segmentOptimizationEnabled) throws IOException { + protected void searchAndCheckSameResults( + Query testQuery, + Query multiPhraseQuery, + Query spanNearQuery, + boolean segmentOptimizationEnabled) + throws IOException { // Search and compare results with MultiPhraseQuery. // Do not compare the scores if the segment optimization is enabled because // it changes the score (but not the result ranking). @@ -379,33 +446,55 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { searchAndCheckSameResults(testQuery, spanNearQuery, sameScoreExpected); } - protected void searchAndCheckSameResults(Query testQuery, Query referenceQuery, - boolean compareScores) throws IOException { + protected void searchAndCheckSameResults( + Query testQuery, Query referenceQuery, boolean compareScores) throws IOException { ScoreDoc[] testResults = searcher.search(testQuery, MAX_DOCS).scoreDocs; ScoreDoc[] referenceResults = searcher.search(referenceQuery, MAX_DOCS).scoreDocs; - assertEquals("Number of results differ when comparing to " + referenceQuery.getClass().getSimpleName(), - referenceResults.length, testResults.length); + assertEquals( + "Number of results differ when comparing to " + referenceQuery.getClass().getSimpleName(), + referenceResults.length, + testResults.length); if (compareScores) { for (int i = 0; i < testResults.length; i++) { ScoreDoc testResult = testResults[i]; ScoreDoc referenceResult = referenceResults[i]; - assertTrue("Result " + i + " differ when comparing to " + referenceQuery.getClass().getSimpleName() - + "\ntestResults=" + Arrays.toString(testResults) + "\nreferenceResults=" + Arrays.toString(referenceResults), + assertTrue( + "Result " + + i + + " differ when comparing to " + + referenceQuery.getClass().getSimpleName() + + "\ntestResults=" + + Arrays.toString(testResults) + + "\nreferenceResults=" + + Arrays.toString(referenceResults), equals(testResult, referenceResult)); } } else { - Set testResultDocIds = Arrays.stream(testResults).map(scoreDoc -> scoreDoc.doc).collect(Collectors.toSet()); - Set referenceResultDocIds = Arrays.stream(referenceResults).map(scoreDoc -> scoreDoc.doc).collect(Collectors.toSet()); - assertEquals("Results differ when comparing to " + referenceQuery.getClass().getSimpleName() - + " ignoring score\ntestResults=" + Arrays.toString(testResults) + "\nreferenceResults=" + Arrays.toString(referenceResults), - referenceResultDocIds, testResultDocIds); + Set testResultDocIds = + Arrays.stream(testResults).map(scoreDoc -> scoreDoc.doc).collect(Collectors.toSet()); + Set referenceResultDocIds = + Arrays.stream(referenceResults).map(scoreDoc -> scoreDoc.doc).collect(Collectors.toSet()); + assertEquals( + "Results differ when comparing to " + + referenceQuery.getClass().getSimpleName() + + " ignoring score\ntestResults=" + + Arrays.toString(testResults) + + "\nreferenceResults=" + + Arrays.toString(referenceResults), + referenceResultDocIds, + testResultDocIds); } } - protected PhraseWildcardQuery phraseWildcardQuery(String field, int maxExpansions, - int slop, boolean segmentOptimizationEnabled, String... terms) { - PhraseWildcardQuery.Builder builder = createPhraseWildcardQueryBuilder(field, maxExpansions, segmentOptimizationEnabled) - .setSlop(slop); + protected PhraseWildcardQuery phraseWildcardQuery( + String field, + int maxExpansions, + int slop, + boolean segmentOptimizationEnabled, + String... terms) { + PhraseWildcardQuery.Builder builder = + createPhraseWildcardQueryBuilder(field, maxExpansions, segmentOptimizationEnabled) + .setSlop(slop); for (String term : terms) { if (term.contains("*") || term.contains("?")) { builder.addMultiTerm(new WildcardQuery(new Term(field, term))); @@ -425,16 +514,17 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { SpanQuery[] spanQueries = new SpanQuery[terms.length]; for (int i = 0; i < terms.length; i++) { String term = terms[i]; - spanQueries[i] = term.contains("*") || term.contains("?") ? - new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term(field, term))) - : new SpanTermQuery(new Term(field, term)); + spanQueries[i] = + term.contains("*") || term.contains("?") + ? new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term(field, term))) + : new SpanTermQuery(new Term(field, term)); } return new SpanNearQuery(spanQueries, slop, true); } - protected MultiPhraseQuery multiPhraseQuery(String field, int maxExpansions, int slop, String... terms) throws IOException { - MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder() - .setSlop(slop); + protected MultiPhraseQuery multiPhraseQuery( + String field, int maxExpansions, int slop, String... terms) throws IOException { + MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder().setSlop(slop); for (String term : terms) { if (term.contains("*") || term.contains("?")) { Term[] expansions = expandMultiTerm(field, term, maxExpansions); @@ -450,7 +540,8 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { return builder.build(); } - protected Term[] expandMultiTerm(String field, String term, int maxExpansions) throws IOException { + protected Term[] expandMultiTerm(String field, String term, int maxExpansions) + throws IOException { if (maxExpansions == 0) { return new Term[0]; } @@ -480,41 +571,30 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { protected void addSegments(RandomIndexWriter iw) throws IOException { // First segment. - addDocs(iw, + addDocs( + iw, doc( field(field(0), "time conversion"), field(field(1), "eric hawk"), - field(field(2), "time travel") - ), + field(field(2), "time travel")), doc( field(field(0), "the blinking books"), field(field(1), "donald ever"), - field(field(2), "time travel") - ), + field(field(2), "time travel")), doc( field(field(0), "the utopia experiment"), field(field(1), "dylan brief"), field(field(2), "utopia"), - field(field(3), "travelling to utopiapolis") - ) - ); + field(field(3), "travelling to utopiapolis"))); iw.commit(); // Second segment. // No field(2). - addDocs(iw, - doc( - field(field(0), "serene evasion"), - field(field(1), "eric brown") - ), - doc( - field(field(0), "my blind experiment"), - field(field(1), "eric bright") - ), - doc( - field(field(3), "two times travel") - ) - ); + addDocs( + iw, + doc(field(field(0), "serene evasion"), field(field(1), "eric brown")), + doc(field(field(0), "my blind experiment"), field(field(1), "eric bright")), + doc(field(field(3), "two times travel"))); iw.commit(); } @@ -551,19 +631,35 @@ public class TestPhraseWildcardQuery extends LuceneTestCase { return reader.leaves().size() == 2 ? new AssertCounters() : AssertCounters.NO_OP; } - /** - * Fluent API to assert {@link TestCounters}. - */ + /** Fluent API to assert {@link TestCounters}. */ static class AssertCounters { - static final AssertCounters NO_OP = new AssertCounters() { - AssertCounters singleTermAnalysis(int c) {return this;} - AssertCounters multiTermAnalysis(int c) {return this;} - AssertCounters segmentUse(int c) {return this;} - AssertCounters segmentSkip(int c) {return this;} - AssertCounters queryEarlyStop(int c) {return this;} - AssertCounters expansion(int c) {return this;} - }; + static final AssertCounters NO_OP = + new AssertCounters() { + AssertCounters singleTermAnalysis(int c) { + return this; + } + + AssertCounters multiTermAnalysis(int c) { + return this; + } + + AssertCounters segmentUse(int c) { + return this; + } + + AssertCounters segmentSkip(int c) { + return this; + } + + AssertCounters queryEarlyStop(int c) { + return this; + } + + AssertCounters expansion(int c) { + return this; + } + }; AssertCounters singleTermAnalysis(int expectedCount) { assertEquals(expectedCount, TestCounters.get().singleTermAnalysisCount); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java index 5778d960a3c..3c2c83b6f8e 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java @@ -25,7 +25,6 @@ import java.util.Locale; import java.util.Objects; import java.util.Random; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CannedTokenStream; import org.apache.lucene.analysis.MockTokenFilter; @@ -43,8 +42,6 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.sandbox.search.TermAutomatonQuery; -import org.apache.lucene.sandbox.search.TokenStreamToTermAutomatonQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; @@ -228,7 +225,7 @@ public class TestTermAutomatonQuery extends LuceneTestCase { q.finish(); // System.out.println("DOT:\n" + q.toDot()); - + assertEquals(4, s.search(q, 1).totalHits.value); w.close(); @@ -263,14 +260,16 @@ public class TestTermAutomatonQuery extends LuceneTestCase { IndexReader r = w.getReader(); IndexSearcher s = newSearcher(r); - TokenStream ts = new CannedTokenStream(new Token[] { - token("fast", 1, 1), - token("speedy", 0, 1), - token("wi", 1, 1), - token("wifi", 0, 2), - token("fi", 1, 1), - token("network", 1, 1) - }); + TokenStream ts = + new CannedTokenStream( + new Token[] { + token("fast", 1, 1), + token("speedy", 0, 1), + token("wi", 1, 1), + token("wifi", 0, 2), + token("fi", 1, 1), + token("network", 1, 1) + }); TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts); // System.out.println("DOT: " + q.toDot()); @@ -321,9 +320,11 @@ public class TestTermAutomatonQuery extends LuceneTestCase { q.setAccept(s2, true); q.addAnyTransition(s0, s1); q.addTransition(s1, s2, "b"); - expectThrows(IllegalStateException.class, () -> { - q.finish(); - }); + expectThrows( + IllegalStateException.class, + () -> { + q.finish(); + }); } public void testInvalidTrailWithAny() throws Exception { @@ -334,11 +335,13 @@ public class TestTermAutomatonQuery extends LuceneTestCase { q.setAccept(s2, true); q.addTransition(s0, s1, "b"); q.addAnyTransition(s1, s2); - expectThrows(IllegalStateException.class, () -> { - q.finish(); - }); + expectThrows( + IllegalStateException.class, + () -> { + q.finish(); + }); } - + public void testAnyFromTokenStream() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); @@ -362,13 +365,15 @@ public class TestTermAutomatonQuery extends LuceneTestCase { IndexReader r = w.getReader(); IndexSearcher s = newSearcher(r); - TokenStream ts = new CannedTokenStream(new Token[] { - token("comes", 1, 1), - token("comes", 0, 2), - token("*", 1, 1), - token("sun", 1, 1), - token("moon", 0, 1) - }); + TokenStream ts = + new CannedTokenStream( + new Token[] { + token("comes", 1, 1), + token("comes", 0, 2), + token("*", 1, 1), + token("sun", 1, 1), + token("moon", 0, 1) + }); TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts); // System.out.println("DOT: " + q.toDot()); @@ -389,7 +394,8 @@ public class TestTermAutomatonQuery extends LuceneTestCase { private static class RandomSynonymFilter extends TokenFilter { private boolean synNext; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncAtt = + addAttribute(PositionIncrementAttribute.class); public RandomSynonymFilter(TokenFilter in) { super(in); @@ -402,7 +408,7 @@ public class TestTermAutomatonQuery extends LuceneTestCase { clearAttributes(); restoreState(state); posIncAtt.setPositionIncrement(0); - termAtt.append(""+((char) 97 + random().nextInt(3))); + termAtt.append("" + ((char) 97 + random().nextInt(3))); synNext = false; return true; } @@ -429,32 +435,33 @@ public class TestTermAutomatonQuery extends LuceneTestCase { Directory dir = newDirectory(); // Adds occasional random synonyms: - Analyzer analyzer = new Analyzer() { - @Override - public TokenStreamComponents createComponents(String fieldName) { - MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true, 100); - tokenizer.setEnableChecks(true); - TokenFilter filt = new MockTokenFilter(tokenizer, MockTokenFilter.EMPTY_STOPSET); - filt = new RandomSynonymFilter(filt); - return new TokenStreamComponents(tokenizer, filt); - } - }; + Analyzer analyzer = + new Analyzer() { + @Override + public TokenStreamComponents createComponents(String fieldName) { + MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true, 100); + tokenizer.setEnableChecks(true); + TokenFilter filt = new MockTokenFilter(tokenizer, MockTokenFilter.EMPTY_STOPSET); + filt = new RandomSynonymFilter(filt); + return new TokenStreamComponents(tokenizer, filt); + } + }; IndexWriterConfig iwc = newIndexWriterConfig(analyzer); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - for(int i=0;i " + contents); } @@ -466,11 +473,10 @@ public class TestTermAutomatonQuery extends LuceneTestCase { IndexSearcher s = newSearcher(r); // Used to match ANY using MultiPhraseQuery: - Term[] allTerms = new Term[] {new Term("field", "a"), - new Term("field", "b"), - new Term("field", "c")}; + Term[] allTerms = + new Term[] {new Term("field", "a"), new Term("field", "b"), new Term("field", "c")}; int numIters = atLeast(1000); - for(int iter=0;iter strings = new HashSet<>(); - for(int i=0;i 0 && j < numTokens-1 && random().nextInt(5) == 3) { + for (int j = 0; j < numTokens; j++) { + if (j > 0 && j < numTokens - 1 && random().nextInt(5) == 3) { sb.append('*'); } else { sb.append((char) (97 + random().nextInt(3))); @@ -490,11 +496,11 @@ public class TestTermAutomatonQuery extends LuceneTestCase { } String string = sb.toString(); MultiPhraseQuery.Builder mpqb = new MultiPhraseQuery.Builder(); - for(int j=0;j toDocIDs(IndexSearcher s, TopDocs hits) throws IOException { Set result = new HashSet<>(); - for(ScoreDoc hit : hits.scoreDocs) { + for (ScoreDoc hit : hits.scoreDocs) { result.add(s.doc(hit.doc).get("id")); } return result; @@ -602,20 +604,22 @@ public class TestTermAutomatonQuery extends LuceneTestCase { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { int maxDoc = context.reader().maxDoc(); FixedBitSet bits = new FixedBitSet(maxDoc); Random random = new Random(seed ^ context.docBase); - for(int docID=0;docID - * Different implementations will support different features. A strategy should - * document these common elements: + * The SpatialStrategy encapsulates an approach to indexing and searching based on shapes. + * + *

    Different implementations will support different features. A strategy should document these + * common elements: + * *

      - *
    • Can it index more than one shape per field?
    • - *
    • What types of shapes can be indexed?
    • - *
    • What types of query shapes can be used?
    • - *
    • What types of query operations are supported? - * This might vary per shape.
    • - *
    • Does it use some type of cache? When? + *
    • Can it index more than one shape per field? + *
    • What types of shapes can be indexed? + *
    • What types of query shapes can be used? + *
    • What types of query operations are supported? This might vary per shape. + *
    • Does it use some type of cache? When? *
    - * If a strategy only supports certain shapes at index or query time, then in - * general it will throw an exception if given an incompatible one. It will not - * be coerced into compatibility. - *

    - * Note that a SpatialStrategy is not involved with the Lucene stored field - * values of shapes, which is immaterial to indexing and search. - *

    - * Thread-safe. - *

    - * This API is marked as experimental, however it is quite stable. + * + * If a strategy only supports certain shapes at index or query time, then in general it will throw + * an exception if given an incompatible one. It will not be coerced into compatibility. + * + *

    Note that a SpatialStrategy is not involved with the Lucene stored field values of shapes, + * which is immaterial to indexing and search. + * + *

    Thread-safe. + * + *

    This API is marked as experimental, however it is quite stable. * * @lucene.experimental */ @@ -58,12 +57,9 @@ public abstract class SpatialStrategy { protected final SpatialContext ctx; private final String fieldName; - /** - * Constructs the spatial strategy with its mandatory arguments. - */ + /** Constructs the spatial strategy with its mandatory arguments. */ public SpatialStrategy(SpatialContext ctx, String fieldName) { - if (ctx == null) - throw new IllegalArgumentException("ctx is required"); + if (ctx == null) throw new IllegalArgumentException("ctx is required"); this.ctx = ctx; if (fieldName == null || fieldName.length() == 0) throw new IllegalArgumentException("fieldName is required"); @@ -75,8 +71,8 @@ public abstract class SpatialStrategy { } /** - * The name of the field or the prefix of them if there are multiple - * fields needed internally. + * The name of the field or the prefix of them if there are multiple fields needed internally. + * * @return Not null. */ public String getFieldName() { @@ -84,15 +80,17 @@ public abstract class SpatialStrategy { } /** - * Returns the IndexableField(s) from the {@code shape} that are to be - * added to the {@link org.apache.lucene.document.Document}. These fields - * are expected to be marked as indexed and not stored. - *

    - * Note: If you want to store the shape as a string for retrieval in - * search results, you could add it like this: + * Returns the IndexableField(s) from the {@code shape} that are to be added to the {@link + * org.apache.lucene.document.Document}. These fields are expected to be marked as indexed and not + * stored. + * + *

    Note: If you want to store the shape as a string for retrieval in search results, you + * could add it like this: + * *

    document.add(new StoredField(fieldName,ctx.toString(shape)));
    - * The particular string representation used doesn't matter to the Strategy - * since it doesn't use it. + * + * The particular string representation used doesn't matter to the Strategy since it doesn't use + * it. * * @return Not null nor will it have null elements. * @throws UnsupportedOperationException if given a shape incompatible with the strategy @@ -100,51 +98,54 @@ public abstract class SpatialStrategy { public abstract Field[] createIndexableFields(Shape shape); /** - * See {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point, double)} called with - * a multiplier of 1.0 (i.e. units of degrees). + * See {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point, double)} called + * with a multiplier of 1.0 (i.e. units of degrees). */ public DoubleValuesSource makeDistanceValueSource(Point queryPoint) { return makeDistanceValueSource(queryPoint, 1.0); } /** - * Make a ValueSource returning the distance between the center of the - * indexed shape and {@code queryPoint}. If there are multiple indexed shapes - * then the closest one is chosen. The result is multiplied by {@code multiplier}, which - * conveniently is used to get the desired units. + * Make a ValueSource returning the distance between the center of the indexed shape and {@code + * queryPoint}. If there are multiple indexed shapes then the closest one is chosen. The result is + * multiplied by {@code multiplier}, which conveniently is used to get the desired units. */ public abstract DoubleValuesSource makeDistanceValueSource(Point queryPoint, double multiplier); /** - * Make a Query based principally on {@link org.apache.lucene.spatial.query.SpatialOperation} - * and {@link Shape} from the supplied {@code args}. It should be constant scoring of 1. + * Make a Query based principally on {@link org.apache.lucene.spatial.query.SpatialOperation} and + * {@link Shape} from the supplied {@code args}. It should be constant scoring of 1. * - * @throws UnsupportedOperationException If the strategy does not support the shape in {@code args} - * @throws org.apache.lucene.spatial.query.UnsupportedSpatialOperation If the strategy does not support the {@link - * org.apache.lucene.spatial.query.SpatialOperation} in {@code args}. + * @throws UnsupportedOperationException If the strategy does not support the shape in {@code + * args} + * @throws org.apache.lucene.spatial.query.UnsupportedSpatialOperation If the strategy does not + * support the {@link org.apache.lucene.spatial.query.SpatialOperation} in {@code args}. */ public abstract Query makeQuery(SpatialArgs args); /** - * Returns a ValueSource with values ranging from 1 to 0, depending inversely - * on the distance from {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point,double)}. - * The formula is {@code zScaling/(d + zScaling)} where 'd' is the distance and 'zScaling' is - * one tenth the distance to the farthest edge from the center. Thus the - * scores will be 1 for indexed points at the center of the query shape and as - * low as ~0.1 at its furthest edges. + * Returns a ValueSource with values ranging from 1 to 0, depending inversely on the distance from + * {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point,double)}. The formula is + * {@code zScaling/(d + zScaling)} where 'd' is the distance and 'zScaling' is one tenth the + * distance to the farthest edge from the center. Thus the scores will be 1 for indexed points at + * the center of the query shape and as low as ~0.1 at its furthest edges. */ public final DoubleValuesSource makeRecipDistanceValueSource(Shape queryShape) { Rectangle bbox = queryShape.getBoundingBox(); - double diagonalDist = ctx.getDistCalc().distance( - ctx.getShapeFactory().pointXY(bbox.getMinX(), bbox.getMinY()), bbox.getMaxX(), bbox.getMaxY()); + double diagonalDist = + ctx.getDistCalc() + .distance( + ctx.getShapeFactory().pointXY(bbox.getMinX(), bbox.getMinY()), + bbox.getMaxX(), + bbox.getMaxY()); double distToEdge = diagonalDist * 0.5; - float c = (float)distToEdge * 0.1f;//one tenth + float c = (float) distToEdge * 0.1f; // one tenth DoubleValuesSource distance = makeDistanceValueSource(queryShape.getCenter(), 1.0); return new ReciprocalDoubleValuesSource(c, distance); } @Override public String toString() { - return getClass().getSimpleName()+" field:"+fieldName+" ctx="+ctx; + return getClass().getSimpleName() + " field:" + fieldName + " ctx=" + ctx; } } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxOverlapRatioValueSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxOverlapRatioValueSource.java index e83279e0c1e..31dcc9003a2 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxOverlapRatioValueSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxOverlapRatioValueSource.java @@ -17,69 +17,82 @@ package org.apache.lucene.spatial.bbox; import java.util.concurrent.atomic.AtomicReference; - import org.apache.lucene.search.Explanation; import org.apache.lucene.spatial.ShapeValuesSource; import org.locationtech.spatial4j.shape.Rectangle; /** - * The algorithm is implemented as envelope on envelope (rect on rect) overlays rather than - * complex polygon on complex polygon overlays. - *

    - * Spatial relevance scoring algorithm: - *

    - *
    queryArea
    the area of the input query envelope
    - *
    targetArea
    the area of the target envelope (per Lucene document)
    - *
    intersectionArea
    the area of the intersection between the query and target envelopes
    - *
    queryTargetProportion
    A 0-1 factor that divides the score proportion between query and target. - * 0.5 is evenly.
    + * The algorithm is implemented as envelope on envelope (rect on rect) overlays rather than complex + * polygon on complex polygon overlays. * - *
    queryRatio
    intersectionArea / queryArea; (see note)
    - *
    targetRatio
    intersectionArea / targetArea; (see note)
    - *
    queryFactor
    queryRatio * queryTargetProportion;
    - *
    targetFactor
    targetRatio * (1 - queryTargetProportion);
    - *
    score
    queryFactor + targetFactor;
    - *
    - * Additionally, note that an optional minimum side length {@code minSideLength} may be used whenever an - * area is calculated (queryArea, targetArea, intersectionArea). This allows for points or horizontal/vertical lines - * to be used as the query shape and in such case the descending order should have smallest boxes up front. Without - * this, a point or line query shape typically scores everything with the same value since there is 0 area. - *

    - * Note: The actual computation of queryRatio and targetRatio is more complicated so that it considers - * points and lines. Lines have the ratio of overlap, and points are either 1.0 or 0.0 depending on whether - * it intersects or not. - *

    - * Originally based on Geoportal's - * - * SpatialRankingValueSource but modified quite a bit. GeoPortal's algorithm will yield a score of 0 - * if either a line or point is compared, and it doesn't output a 0-1 normalized score (it multiplies the factors), - * and it doesn't support minSideLength, and it had dateline bugs. + *

    Spatial relevance scoring algorithm: + * + *

    + *
    queryArea + *
    the area of the input query envelope + *
    targetArea + *
    the area of the target envelope (per Lucene document) + *
    intersectionArea + *
    the area of the intersection between the query and target envelopes + *
    queryTargetProportion + *
    A 0-1 factor that divides the score proportion between query and target. 0.5 is evenly. + *
    queryRatio + *
    intersectionArea / queryArea; (see note) + *
    targetRatio + *
    intersectionArea / targetArea; (see note) + *
    queryFactor + *
    queryRatio * queryTargetProportion; + *
    targetFactor + *
    targetRatio * (1 - queryTargetProportion); + *
    score + *
    queryFactor + targetFactor; + *
    + * + * Additionally, note that an optional minimum side length {@code minSideLength} may be used + * whenever an area is calculated (queryArea, targetArea, intersectionArea). This allows for points + * or horizontal/vertical lines to be used as the query shape and in such case the descending order + * should have smallest boxes up front. Without this, a point or line query shape typically scores + * everything with the same value since there is 0 area. + * + *

    Note: The actual computation of queryRatio and targetRatio is more complicated so that it + * considers points and lines. Lines have the ratio of overlap, and points are either 1.0 or 0.0 + * depending on whether it intersects or not. + * + *

    Originally based on Geoportal's + * SpatialRankingValueSource but modified quite a bit. GeoPortal's algorithm will yield a score + * of 0 if either a line or point is compared, and it doesn't output a 0-1 normalized score (it + * multiplies the factors), and it doesn't support minSideLength, and it had dateline bugs. * * @lucene.experimental */ public class BBoxOverlapRatioValueSource extends BBoxSimilarityValueSource { - private final boolean isGeo;//-180/+180 degrees (not part of identity; attached to parent strategy/field) + // -180/+180 degrees (not part of identity; attached to parent strategy/field) + private final boolean isGeo; private final Rectangle queryExtent; - private final double queryArea;//not part of identity + private final double queryArea; // not part of identity private final double minSideLength; private final double queryTargetProportion; - //TODO option to compute geodetic area + // TODO option to compute geodetic area /** - * * @param rectValueSource mandatory; source of rectangles * @param isGeo True if ctx.isGeo() and thus dateline issues should be attended to * @param queryExtent mandatory; the query rectangle * @param queryTargetProportion see class javadocs. Between 0 and 1. * @param minSideLength see class javadocs. 0.0 will effectively disable. */ - public BBoxOverlapRatioValueSource(ShapeValuesSource rectValueSource, boolean isGeo, Rectangle queryExtent, - double queryTargetProportion, double minSideLength) { + public BBoxOverlapRatioValueSource( + ShapeValuesSource rectValueSource, + boolean isGeo, + Rectangle queryExtent, + double queryTargetProportion, + double minSideLength) { super(rectValueSource); this.isGeo = isGeo; this.minSideLength = minSideLength; @@ -91,8 +104,10 @@ public class BBoxOverlapRatioValueSource extends BBoxSimilarityValueSource { throw new IllegalArgumentException("queryTargetProportion must be >= 0 and <= 1"); } - /** Construct with 75% weighting towards target (roughly GeoPortal's default), geo degrees assumed, no - * minimum side length. */ + /** + * Construct with 75% weighting towards target (roughly GeoPortal's default), geo degrees assumed, + * no minimum side length. + */ public BBoxOverlapRatioValueSource(ShapeValuesSource rectValueSource, Rectangle queryExtent) { this(rectValueSource, true, queryExtent, 0.25, 0.0); } @@ -137,7 +152,7 @@ public class BBoxOverlapRatioValueSource extends BBoxSimilarityValueSource { if (exp != null) { exp.set(Explanation.noMatch("No intersection")); } - return 0;//no intersection + return 0; // no intersection } // calculate "width": the intersection width between two boxes. @@ -146,45 +161,44 @@ public class BBoxOverlapRatioValueSource extends BBoxSimilarityValueSource { Rectangle a = queryExtent; Rectangle b = target; if (a.getCrossesDateLine() == b.getCrossesDateLine()) { - //both either cross or don't + // both either cross or don't double left = Math.max(a.getMinX(), b.getMinX()); double right = Math.min(a.getMaxX(), b.getMaxX()); - if (!a.getCrossesDateLine()) {//both don't + if (!a.getCrossesDateLine()) { // both don't if (left <= right) { width = right - left; - } else if (isGeo && (Math.abs(a.getMinX()) == 180 || Math.abs(a.getMaxX()) == 180) + } else if (isGeo + && (Math.abs(a.getMinX()) == 180 || Math.abs(a.getMaxX()) == 180) && (Math.abs(b.getMinX()) == 180 || Math.abs(b.getMaxX()) == 180)) { - width = 0;//both adjacent to dateline + width = 0; // both adjacent to dateline } else { if (exp != null) { exp.set(Explanation.noMatch("No intersection")); } - return 0;//no intersection + return 0; // no intersection } - } else {//both cross + } else { // both cross width = right - left + 360; } } else { - if (!a.getCrossesDateLine()) {//then flip + if (!a.getCrossesDateLine()) { // then flip a = target; b = queryExtent; } - //a crosses, b doesn't + // a crosses, b doesn't double qryWestLeft = Math.max(a.getMinX(), b.getMinX()); double qryWestRight = b.getMaxX(); - if (qryWestLeft < qryWestRight) - width += qryWestRight - qryWestLeft; + if (qryWestLeft < qryWestRight) width += qryWestRight - qryWestLeft; double qryEastLeft = b.getMinX(); double qryEastRight = Math.min(a.getMaxX(), b.getMaxX()); - if (qryEastLeft < qryEastRight) - width += qryEastRight - qryEastLeft; + if (qryEastLeft < qryEastRight) width += qryEastRight - qryEastLeft; if (qryWestLeft > qryWestRight && qryEastLeft > qryEastRight) { if (exp != null) { exp.set(Explanation.noMatch("No intersection")); } - return 0;//no intersection + return 0; // no intersection } } } @@ -194,12 +208,12 @@ public class BBoxOverlapRatioValueSource extends BBoxSimilarityValueSource { double queryRatio; if (queryArea > 0) { queryRatio = intersectionArea / queryArea; - } else if (queryExtent.getHeight() > 0) {//vert line + } else if (queryExtent.getHeight() > 0) { // vert line queryRatio = height / queryExtent.getHeight(); - } else if (queryExtent.getWidth() > 0) {//horiz line + } else if (queryExtent.getWidth() > 0) { // horiz line queryRatio = width / queryExtent.getWidth(); } else { - queryRatio = queryExtent.relate(target).intersects() ? 1 : 0;//could be optimized + queryRatio = queryExtent.relate(target).intersects() ? 1 : 0; // could be optimized } double targetArea = calcArea(target.getWidth(), target.getHeight()); @@ -207,12 +221,12 @@ public class BBoxOverlapRatioValueSource extends BBoxSimilarityValueSource { double targetRatio; if (targetArea > 0) { targetRatio = intersectionArea / targetArea; - } else if (target.getHeight() > 0) {//vert line + } else if (target.getHeight() > 0) { // vert line targetRatio = height / target.getHeight(); - } else if (target.getWidth() > 0) {//horiz line + } else if (target.getWidth() > 0) { // horiz line targetRatio = width / target.getWidth(); } else { - targetRatio = target.relate(queryExtent).intersects() ? 1 : 0;//could be optimized + targetRatio = target.relate(queryExtent).intersects() ? 1 : 0; // could be optimized } assert queryRatio >= 0 && queryRatio <= 1 : queryRatio; assert targetRatio >= 0 && targetRatio <= 1 : targetRatio; @@ -223,20 +237,28 @@ public class BBoxOverlapRatioValueSource extends BBoxSimilarityValueSource { double targetFactor = targetRatio * (1.0 - queryTargetProportion); double score = queryFactor + targetFactor; - if (exp!=null) { - String minSideDesc = minSideLength > 0.0 ? " (minSide="+minSideLength+")" : ""; - exp.set(Explanation.match((float) score, - this.getClass().getSimpleName()+": queryFactor + targetFactor", - Explanation.match((float)intersectionArea, "IntersectionArea" + minSideDesc, - Explanation.match((float)width, "width"), - Explanation.match((float)height, "height"), - Explanation.match((float)queryTargetProportion, "queryTargetProportion")), - Explanation.match((float)queryFactor, "queryFactor", - Explanation.match((float)targetRatio, "ratio"), - Explanation.match((float)queryArea, "area of " + queryExtent + minSideDesc)), - Explanation.match((float)targetFactor, "targetFactor", - Explanation.match((float)targetRatio, "ratio"), - Explanation.match((float)targetArea, "area of " + target + minSideDesc)))); + if (exp != null) { + String minSideDesc = minSideLength > 0.0 ? " (minSide=" + minSideLength + ")" : ""; + exp.set( + Explanation.match( + (float) score, + this.getClass().getSimpleName() + ": queryFactor + targetFactor", + Explanation.match( + (float) intersectionArea, + "IntersectionArea" + minSideDesc, + Explanation.match((float) width, "width"), + Explanation.match((float) height, "height"), + Explanation.match((float) queryTargetProportion, "queryTargetProportion")), + Explanation.match( + (float) queryFactor, + "queryFactor", + Explanation.match((float) targetRatio, "ratio"), + Explanation.match((float) queryArea, "area of " + queryExtent + minSideDesc)), + Explanation.match( + (float) targetFactor, + "targetFactor", + Explanation.match((float) targetRatio, "ratio"), + Explanation.match((float) targetArea, "area of " + target + minSideDesc)))); } return score; @@ -246,5 +268,4 @@ public class BBoxOverlapRatioValueSource extends BBoxSimilarityValueSource { private double calcArea(double width, double height) { return Math.max(minSideLength, width) * Math.max(minSideLength, height); } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java index d6597c67224..03fa53f0898 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java @@ -18,7 +18,6 @@ package org.apache.lucene.spatial.bbox; import java.io.IOException; import java.util.concurrent.atomic.AtomicReference; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; @@ -29,12 +28,10 @@ import org.apache.lucene.spatial.ShapeValuesSource; import org.locationtech.spatial4j.shape.Rectangle; /** - * A base class for calculating a spatial relevance rank per document from a provided - * {@link ShapeValuesSource} returning a {@link - * org.locationtech.spatial4j.shape.Rectangle} per-document. - *

    - * Implementers: remember to implement equals and hashCode if you have - * fields! + * A base class for calculating a spatial relevance rank per document from a provided {@link + * ShapeValuesSource} returning a {@link org.locationtech.spatial4j.shape.Rectangle} per-document. + * + *

    Implementers: remember to implement equals and hashCode if you have fields! * * @lucene.experimental */ @@ -53,32 +50,42 @@ public abstract class BBoxSimilarityValueSource extends DoubleValuesSource { @Override public String toString() { - return getClass().getSimpleName()+"(" + bboxValueSource.toString() + "," + similarityDescription() + ")"; + return getClass().getSimpleName() + + "(" + + bboxValueSource.toString() + + "," + + similarityDescription() + + ")"; } - /** A comma-separated list of configurable items of the subclass to put into {@link #toString()}. */ + /** + * A comma-separated list of configurable items of the subclass to put into {@link #toString()}. + */ protected abstract String similarityDescription(); @Override - public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) throws IOException { + public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) + throws IOException { final ShapeValues shapeValues = bboxValueSource.getValues(readerContext); - return DoubleValues.withDefault(new DoubleValues() { - @Override - public double doubleValue() throws IOException { - return score((Rectangle) shapeValues.value(), null); - } - - @Override - public boolean advanceExact(int doc) throws IOException { - return shapeValues.advanceExact(doc); - } - }, 0); + return DoubleValues.withDefault( + new DoubleValues() { + @Override + public double doubleValue() throws IOException { + return score((Rectangle) shapeValues.value(), null); + } + @Override + public boolean advanceExact(int doc) throws IOException { + return shapeValues.advanceExact(doc); + } + }, + 0); } /** * Return a relevancy score. If {@code exp} is provided then diagnostic information is added. + * * @param rect The indexed rectangle; not null. * @param exp Optional diagnostic holder. * @return a score. @@ -88,7 +95,7 @@ public abstract class BBoxSimilarityValueSource extends DoubleValuesSource { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false;//same class + if (o == null || getClass() != o.getClass()) return false; // same class BBoxSimilarityValueSource that = (BBoxSimilarityValueSource) o; @@ -103,8 +110,13 @@ public abstract class BBoxSimilarityValueSource extends DoubleValuesSource { } @Override - public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException { - DoubleValues dv = getValues(ctx, DoubleValuesSource.constant(scoreExplanation.getValue().doubleValue()).getValues(ctx, null)); + public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) + throws IOException { + DoubleValues dv = + getValues( + ctx, + DoubleValuesSource.constant(scoreExplanation.getValue().doubleValue()) + .getValues(ctx, null)); if (dv.advanceExact(docId)) { AtomicReference explanation = new AtomicReference<>(); final ShapeValues shapeValues = bboxValueSource.getValues(ctx); diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java index 5029d0e1b0f..87db68a9fa1 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java @@ -42,51 +42,48 @@ import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; - /** - * A SpatialStrategy for indexing and searching Rectangles by storing its - * coordinates in numeric fields. It supports all {@link SpatialOperation}s and - * has a custom overlap relevancy. It is based on GeoPortal's SpatialClauseAdapter. - *

    - * Characteristics: - *
    + * + *

    Characteristics:
    + * *

      - *
    • Only indexes Rectangles; just one per field value. Other shapes can be provided - * and the bounding box will be used.
    • - *
    • Can query only by a Rectangle. Providing other shapes is an error.
    • - *
    • Supports most {@link SpatialOperation}s but not Overlaps.
    • - *
    • Uses the DocValues API for any sorting / relevancy.
    • + *
    • Only indexes Rectangles; just one per field value. Other shapes can be provided and the + * bounding box will be used. + *
    • Can query only by a Rectangle. Providing other shapes is an error. + *
    • Supports most {@link SpatialOperation}s but not Overlaps. + *
    • Uses the DocValues API for any sorting / relevancy. *
    - *

    - * Implementation: - *

    - * This uses 4 double fields for minX, maxX, minY, maxY - * and a boolean to mark a dateline cross. Depending on the particular {@link - * SpatialOperation}s, there are a variety of range queries on {@link DoublePoint}s to be - * done. - * The {@link #makeOverlapRatioValueSource(org.locationtech.spatial4j.shape.Rectangle, double)} - * works by calculating the query bbox overlap percentage against the indexed - * shape overlap percentage. The indexed shape's coordinates are retrieved from - * {@link org.apache.lucene.index.LeafReader#getNumericDocValues}. + * + *

    Implementation: + * + *

    This uses 4 double fields for minX, maxX, minY, maxY and a boolean to mark a dateline cross. + * Depending on the particular {@link SpatialOperation}s, there are a variety of range queries on + * {@link DoublePoint}s to be done. The {@link + * #makeOverlapRatioValueSource(org.locationtech.spatial4j.shape.Rectangle, double)} works by + * calculating the query bbox overlap percentage against the indexed shape overlap percentage. The + * indexed shape's coordinates are retrieved from {@link + * org.apache.lucene.index.LeafReader#getNumericDocValues}. * * @lucene.experimental */ public class BBoxStrategy extends SpatialStrategy { - // note: we use a FieldType to articulate the options we want on the field. We don't use it as-is with a Field, we + // note: we use a FieldType to articulate the options we want on the field. We don't use it as-is + // with a Field, we // create more than one Field. - /** - * pointValues, docValues, and nothing else. - */ + /** pointValues, docValues, and nothing else. */ public static FieldType DEFAULT_FIELDTYPE; static { // Default: pointValues + docValues FieldType type = new FieldType(); - type.setDimensions(1, Double.BYTES);//pointValues (assume Double) - type.setDocValuesType(DocValuesType.NUMERIC);//docValues + type.setDimensions(1, Double.BYTES); // pointValues (assume Double) + type.setDocValuesType(DocValuesType.NUMERIC); // docValues type.setStored(false); type.freeze(); DEFAULT_FIELDTYPE = type; @@ -96,7 +93,7 @@ public class BBoxStrategy extends SpatialStrategy { public static final String SUFFIX_MAXX = "__maxX"; public static final String SUFFIX_MINY = "__minY"; public static final String SUFFIX_MAXY = "__maxY"; - public static final String SUFFIX_XDL = "__xdl"; + public static final String SUFFIX_XDL = "__xdl"; /* * The Bounding Box gets stored as four fields for x/y min/max and a flag @@ -109,7 +106,8 @@ public class BBoxStrategy extends SpatialStrategy { final String field_maxY; final String field_xdl; // crosses dateline - private final FieldType optionsFieldType;//from constructor; aggregate field type used to express all options + // from constructor; aggregate field type used to express all options + private final FieldType optionsFieldType; private final int fieldsLen; private final boolean hasStored; private final boolean hasDocVals; @@ -117,17 +115,17 @@ public class BBoxStrategy extends SpatialStrategy { private final FieldType xdlFieldType; /** - * Creates a new {@link BBoxStrategy} instance that uses {@link DoublePoint} and {@link DoublePoint#newRangeQuery} + * Creates a new {@link BBoxStrategy} instance that uses {@link DoublePoint} and {@link + * DoublePoint#newRangeQuery} */ public static BBoxStrategy newInstance(SpatialContext ctx, String fieldNamePrefix) { return new BBoxStrategy(ctx, fieldNamePrefix, DEFAULT_FIELDTYPE); } /** - * Creates this strategy. - * {@code fieldType} is used to customize the indexing options of the 4 number fields, and to a lesser degree the XDL - * field too. Search requires pointValues (or legacy numerics), and relevancy requires docValues. If these features - * aren't needed then disable them. + * Creates this strategy. {@code fieldType} is used to customize the indexing options of the 4 + * number fields, and to a lesser degree the XDL field too. Search requires pointValues (or legacy + * numerics), and relevancy requires docValues. If these features aren't needed then disable them. */ public BBoxStrategy(SpatialContext ctx, String fieldNamePrefix, FieldType fieldType) { super(ctx, fieldNamePrefix); @@ -163,15 +161,17 @@ public class BBoxStrategy extends SpatialStrategy { this.fieldsLen = numQuads * 4 + (xdlFieldType != null ? 1 : 0); } - /** Returns a field type representing the set of field options. This is identical to what was passed into the - * constructor. It's frozen. */ + /** + * Returns a field type representing the set of field options. This is identical to what was + * passed into the constructor. It's frozen. + */ public FieldType getFieldType() { return optionsFieldType; } - //--------------------------------- + // --------------------------------- // Indexing - //--------------------------------- + // --------------------------------- @Override public Field[] createIndexableFields(Shape shape) { @@ -200,50 +200,52 @@ public class BBoxStrategy extends SpatialStrategy { fields[++idx] = new DoublePoint(field_maxY, bbox.getMaxY()); } if (xdlFieldType != null) { - fields[++idx] = new Field(field_xdl, bbox.getCrossesDateLine()?"T":"F", xdlFieldType); + fields[++idx] = new Field(field_xdl, bbox.getCrossesDateLine() ? "T" : "F", xdlFieldType); } assert idx == fields.length - 1; return fields; } - - //--------------------------------- + // --------------------------------- // Value Source / Relevancy - //--------------------------------- + // --------------------------------- - /** - * Provides access to each rectangle per document as a {@link ShapeValuesSource} - */ //TODO raise to SpatialStrategy + /** Provides access to each rectangle per document as a {@link ShapeValuesSource} */ + // TODO raise to SpatialStrategy public ShapeValuesSource makeShapeValueSource() { return new BBoxValueSource(this); } @Override public DoubleValuesSource makeDistanceValueSource(Point queryPoint, double multiplier) { - //TODO if makeShapeValueSource gets lifted to the top; this could become a generic impl. + // TODO if makeShapeValueSource gets lifted to the top; this could become a generic impl. return new DistanceToShapeValueSource(makeShapeValueSource(), queryPoint, multiplier, ctx); } - /** Returns a similarity based on {@link BBoxOverlapRatioValueSource}. This is just a - * convenience method. */ - public DoubleValuesSource makeOverlapRatioValueSource(Rectangle queryBox, double queryTargetProportion) { + /** + * Returns a similarity based on {@link BBoxOverlapRatioValueSource}. This is just a convenience + * method. + */ + public DoubleValuesSource makeOverlapRatioValueSource( + Rectangle queryBox, double queryTargetProportion) { return new BBoxOverlapRatioValueSource( makeShapeValueSource(), ctx.isGeo(), queryBox, queryTargetProportion, 0.0); } - //--------------------------------- + // --------------------------------- // Query Building - //--------------------------------- + // --------------------------------- // Utility on SpatialStrategy? -// public Query makeQueryWithValueSource(SpatialArgs args, ValueSource valueSource) { -// return new CustomScoreQuery(makeQuery(args), new FunctionQuery(valueSource)); - //or... -// return new BooleanQuery.Builder() -// .add(new FunctionQuery(valueSource), BooleanClause.Occur.MUST)//matches everything and provides score -// .add(filterQuery, BooleanClause.Occur.FILTER)//filters (score isn't used) -// .build(); -// } + // public Query makeQueryWithValueSource(SpatialArgs args, ValueSource valueSource) { + // return new CustomScoreQuery(makeQuery(args), new FunctionQuery(valueSource)); + // or... + // return new BooleanQuery.Builder() + // .add(new FunctionQuery(valueSource), BooleanClause.Occur.MUST)//matches everything and + // provides score + // .add(filterQuery, BooleanClause.Occur.FILTER)//filters (score isn't used) + // .build(); + // } @Override public Query makeQuery(SpatialArgs args) { @@ -257,15 +259,15 @@ public class BBoxStrategy extends SpatialStrategy { // Useful for understanding Relations: // http://edndoc.esri.com/arcsde/9.1/general_topics/understand_spatial_relations.htm SpatialOperation op = args.getOperation(); - if( op == SpatialOperation.BBoxIntersects ) spatial = makeIntersects(bbox); - else if( op == SpatialOperation.BBoxWithin ) spatial = makeWithin(bbox); - else if( op == SpatialOperation.Contains ) spatial = makeContains(bbox); - else if( op == SpatialOperation.Intersects ) spatial = makeIntersects(bbox); - else if( op == SpatialOperation.IsEqualTo ) spatial = makeEquals(bbox); - else if( op == SpatialOperation.IsDisjointTo ) spatial = makeDisjoint(bbox); - else if( op == SpatialOperation.IsWithin ) spatial = makeWithin(bbox); - else { //no Overlaps support yet - throw new UnsupportedSpatialOperation(op); + if (op == SpatialOperation.BBoxIntersects) spatial = makeIntersects(bbox); + else if (op == SpatialOperation.BBoxWithin) spatial = makeWithin(bbox); + else if (op == SpatialOperation.Contains) spatial = makeContains(bbox); + else if (op == SpatialOperation.Intersects) spatial = makeIntersects(bbox); + else if (op == SpatialOperation.IsEqualTo) spatial = makeEquals(bbox); + else if (op == SpatialOperation.IsDisjointTo) spatial = makeDisjoint(bbox); + else if (op == SpatialOperation.IsWithin) spatial = makeWithin(bbox); + else { // no Overlaps support yet + throw new UnsupportedSpatialOperation(op); } return new ConstantScoreQuery(spatial); } @@ -278,7 +280,8 @@ public class BBoxStrategy extends SpatialStrategy { Query makeContains(Rectangle bbox) { // general case - // docMinX <= queryExtent.getMinX() AND docMinY <= queryExtent.getMinY() AND docMaxX >= queryExtent.getMaxX() AND docMaxY >= queryExtent.getMaxY() + // docMinX <= queryExtent.getMinX() AND docMinY <= queryExtent.getMinY() AND docMaxX >= + // queryExtent.getMaxX() AND docMaxY >= queryExtent.getMaxY() // Y conditions // docMinY <= queryExtent.getMinY() AND docMaxY >= queryExtent.getMaxY() @@ -314,9 +317,12 @@ public class BBoxStrategy extends SpatialStrategy { Query qEdgeDL = null; if (bbox.getMinX() == bbox.getMaxX() && Math.abs(bbox.getMinX()) == 180) { - double edge = bbox.getMinX() * -1;//opposite dateline edge - qEdgeDL = makeQuery(BooleanClause.Occur.SHOULD, - makeNumberTermQuery(field_minX, edge), makeNumberTermQuery(field_maxX, edge)); + double edge = bbox.getMinX() * -1; // opposite dateline edge + qEdgeDL = + makeQuery( + BooleanClause.Occur.SHOULD, + makeNumberTermQuery(field_minX, edge), + makeNumberTermQuery(field_maxX, edge)); } // apply the non-XDL and XDL conditions @@ -333,10 +339,14 @@ public class BBoxStrategy extends SpatialStrategy { // docMinXLeft <= queryExtent.getMinX() AND docMaxXRight >= queryExtent.getMaxX() Query qXDLLeft = this.makeNumericRangeQuery(field_minX, null, bbox.getMinX(), false, true); Query qXDLRight = this.makeNumericRangeQuery(field_maxX, bbox.getMaxX(), null, true, false); - Query qXDLLeftRight = this.makeXDL(true, this.makeQuery(BooleanClause.Occur.MUST, qXDLLeft, qXDLRight)); + Query qXDLLeftRight = + this.makeXDL(true, this.makeQuery(BooleanClause.Occur.MUST, qXDLLeft, qXDLRight)); - Query qWorld = makeQuery(BooleanClause.Occur.MUST, - makeNumberTermQuery(field_minX, -180), makeNumberTermQuery(field_maxX, 180)); + Query qWorld = + makeQuery( + BooleanClause.Occur.MUST, + makeNumberTermQuery(field_minX, -180), + makeNumberTermQuery(field_maxX, 180)); xConditions = makeQuery(BooleanClause.Occur.SHOULD, qXDLLeftRight, qWorld); } @@ -353,7 +363,8 @@ public class BBoxStrategy extends SpatialStrategy { Query makeDisjoint(Rectangle bbox) { // general case - // docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX() OR docMinY > queryExtent.getMaxY() OR docMaxY < queryExtent.getMinY() + // docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX() OR docMinY > + // queryExtent.getMaxY() OR docMaxY < queryExtent.getMinY() // Y conditions // docMinY > queryExtent.getMaxY() OR docMaxY < queryExtent.getMinY() @@ -370,7 +381,7 @@ public class BBoxStrategy extends SpatialStrategy { // X Conditions for documents that do not cross the date line, // docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX() Query qMinX = this.makeNumericRangeQuery(field_minX, bbox.getMaxX(), null, false, false); - if (bbox.getMinX() == -180.0 && ctx.isGeo()) {//touches dateline; -180 == 180 + if (bbox.getMinX() == -180.0 && ctx.isGeo()) { // touches dateline; -180 == 180 BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(qMinX, BooleanClause.Occur.MUST); bq.add(makeNumberTermQuery(field_maxX, 180.0), BooleanClause.Occur.MUST_NOT); @@ -378,7 +389,7 @@ public class BBoxStrategy extends SpatialStrategy { } Query qMaxX = this.makeNumericRangeQuery(field_maxX, null, bbox.getMinX(), false, false); - if (bbox.getMaxX() == 180.0 && ctx.isGeo()) {//touches dateline; -180 == 180 + if (bbox.getMaxX() == 180.0 && ctx.isGeo()) { // touches dateline; -180 == 180 BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(qMaxX, BooleanClause.Occur.MUST); bq.add(makeNumberTermQuery(field_minX, -180.0), BooleanClause.Occur.MUST_NOT); @@ -396,10 +407,14 @@ public class BBoxStrategy extends SpatialStrategy { // (docMinXLeft > queryExtent.getMaxX() OR docMaxXLeft < queryExtent.getMinX()) AND // (docMinXRight > queryExtent.getMaxX() OR docMaxXRight < queryExtent.getMinX()) // where: docMaxXLeft = 180.0, docMinXRight = -180.0 - // (docMaxXLeft < queryExtent.getMinX()) equates to (180.0 < queryExtent.getMinX()) and is ignored - // (docMinXRight > queryExtent.getMaxX()) equates to (-180.0 > queryExtent.getMaxX()) and is ignored - Query qMinXLeft = this.makeNumericRangeQuery(field_minX, bbox.getMaxX(), null, false, false); - Query qMaxXRight = this.makeNumericRangeQuery(field_maxX, null, bbox.getMinX(), false, false); + // (docMaxXLeft < queryExtent.getMinX()) equates to (180.0 < queryExtent.getMinX()) and is + // ignored + // (docMinXRight > queryExtent.getMaxX()) equates to (-180.0 > queryExtent.getMaxX()) and is + // ignored + Query qMinXLeft = + this.makeNumericRangeQuery(field_minX, bbox.getMaxX(), null, false, false); + Query qMaxXRight = + this.makeNumericRangeQuery(field_maxX, null, bbox.getMinX(), false, false); Query qLeftRight = this.makeQuery(BooleanClause.Occur.MUST, qMinXLeft, qMaxXRight); Query qXDL = this.makeXDL(true, qLeftRight); @@ -411,7 +426,8 @@ public class BBoxStrategy extends SpatialStrategy { // X Conditions for documents that do not cross the date line, // the document must be disjoint to both the left and right query portions - // (docMinX > queryExtent.getMaxX()Left OR docMaxX < queryExtent.getMinX()) AND (docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX()Left) + // (docMinX > queryExtent.getMaxX()Left OR docMaxX < queryExtent.getMinX()) AND (docMinX > + // queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX()Left) // where: queryExtent.getMaxX()Left = 180.0, queryExtent.getMinX()Left = -180.0 Query qMinXLeft = this.makeNumericRangeQuery(field_minX, 180.0, null, false, false); Query qMaxXLeft = this.makeNumericRangeQuery(field_maxX, null, bbox.getMinX(), false, false); @@ -437,7 +453,8 @@ public class BBoxStrategy extends SpatialStrategy { */ Query makeEquals(Rectangle bbox) { - // docMinX = queryExtent.getMinX() AND docMinY = queryExtent.getMinY() AND docMaxX = queryExtent.getMaxX() AND docMaxY = queryExtent.getMaxY() + // docMinX = queryExtent.getMinX() AND docMinY = queryExtent.getMinY() AND docMaxX = + // queryExtent.getMaxX() AND docMaxY = queryExtent.getMaxY() Query qMinX = makeNumberTermQuery(field_minX, bbox.getMinX()); Query qMinY = makeNumberTermQuery(field_minY, bbox.getMinY()); Query qMaxX = makeNumberTermQuery(field_maxX, bbox.getMaxX()); @@ -473,10 +490,10 @@ public class BBoxStrategy extends SpatialStrategy { Query qDisjoint = makeDisjoint(bbox); qNotDisjoint.add(qDisjoint, BooleanClause.Occur.MUST_NOT); - //Query qDisjoint = makeDisjoint(); - //BooleanQuery qNotDisjoint = new BooleanQuery(); - //qNotDisjoint.add(new MatchAllDocsQuery(),BooleanClause.Occur.SHOULD); - //qNotDisjoint.add(qDisjoint,BooleanClause.Occur.MUST_NOT); + // Query qDisjoint = makeDisjoint(); + // BooleanQuery qNotDisjoint = new BooleanQuery(); + // qNotDisjoint.add(new MatchAllDocsQuery(),BooleanClause.Occur.SHOULD); + // qNotDisjoint.add(qDisjoint,BooleanClause.Occur.MUST_NOT); return qNotDisjoint.build(); } @@ -490,8 +507,7 @@ public class BBoxStrategy extends SpatialStrategy { BooleanQuery makeQuery(BooleanClause.Occur occur, Query... queries) { BooleanQuery.Builder bq = new BooleanQuery.Builder(); for (Query query : queries) { - if (query != null) - bq.add(query, occur); + if (query != null) bq.add(query, occur); } return bq.build(); } @@ -504,7 +520,8 @@ public class BBoxStrategy extends SpatialStrategy { Query makeWithin(Rectangle bbox) { // general case - // docMinX >= queryExtent.getMinX() AND docMinY >= queryExtent.getMinY() AND docMaxX <= queryExtent.getMaxX() AND docMaxY <= queryExtent.getMaxY() + // docMinX >= queryExtent.getMinX() AND docMinY >= queryExtent.getMinY() AND docMaxX <= + // queryExtent.getMaxX() AND docMaxY <= queryExtent.getMaxY() // Y conditions // docMinY >= queryExtent.getMinY() AND docMaxY <= queryExtent.getMaxY() @@ -516,7 +533,7 @@ public class BBoxStrategy extends SpatialStrategy { Query xConditions; if (ctx.isGeo() && bbox.getMinX() == -180.0 && bbox.getMaxX() == 180.0) { - //if query world-wraps, only the y condition matters + // if query world-wraps, only the y condition matters return yConditions; } else if (!bbox.getCrossesDateLine()) { @@ -527,14 +544,15 @@ public class BBoxStrategy extends SpatialStrategy { Query qMaxX = this.makeNumericRangeQuery(field_maxX, null, bbox.getMaxX(), false, true); Query qMinMax = this.makeQuery(BooleanClause.Occur.MUST, qMinX, qMaxX); - double edge = 0;//none, otherwise opposite dateline of query - if (bbox.getMinX() == -180.0) - edge = 180; - else if (bbox.getMaxX() == 180.0) - edge = -180; + double edge = 0; // none, otherwise opposite dateline of query + if (bbox.getMinX() == -180.0) edge = 180; + else if (bbox.getMaxX() == 180.0) edge = -180; if (edge != 0 && ctx.isGeo()) { - Query edgeQ = makeQuery(BooleanClause.Occur.MUST, - makeNumberTermQuery(field_minX, edge), makeNumberTermQuery(field_maxX, edge)); + Query edgeQ = + makeQuery( + BooleanClause.Occur.MUST, + makeNumberTermQuery(field_minX, edge), + makeNumberTermQuery(field_maxX, edge)); qMinMax = makeQuery(BooleanClause.Occur.SHOULD, qMinMax, edgeQ); } @@ -618,8 +636,8 @@ public class BBoxStrategy extends SpatialStrategy { } /** - * Returns a numeric range query based on FieldType - * {@link DoublePoint#newRangeQuery} is used for indexes created using {@link DoublePoint} fields + * Returns a numeric range query based on FieldType {@link DoublePoint#newRangeQuery} is used for + * indexes created using {@link DoublePoint} fields * * @param fieldname field name. must not be null. * @param min minimum value of the range. @@ -627,7 +645,8 @@ public class BBoxStrategy extends SpatialStrategy { * @param minInclusive include the minimum value if true. * @param maxInclusive include the maximum value if true */ - private Query makeNumericRangeQuery(String fieldname, Double min, Double max, boolean minInclusive, boolean maxInclusive) { + private Query makeNumericRangeQuery( + String fieldname, Double min, Double max, boolean minInclusive, boolean maxInclusive) { if (hasPointVals) { if (min == null) { min = Double.NEGATIVE_INFINITY; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxValueSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxValueSource.java index 0a84879c25e..f396818ca12 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxValueSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxValueSource.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.bbox; import java.io.IOException; - import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -53,14 +52,17 @@ class BBoxValueSource extends ShapeValuesSource { final NumericDocValues maxX = DocValues.getNumeric(reader, strategy.field_maxX); final NumericDocValues maxY = DocValues.getNumeric(reader, strategy.field_maxY); - //reused - final Rectangle rect = strategy.getSpatialContext().getShapeFactory().rect(0,0,0,0); + // reused + final Rectangle rect = strategy.getSpatialContext().getShapeFactory().rect(0, 0, 0, 0); return new ShapeValues() { @Override public boolean advanceExact(int doc) throws IOException { - return minX.advanceExact(doc) && minY.advanceExact(doc) && maxX.advanceExact(doc) && maxY.advanceExact(doc); + return minX.advanceExact(doc) + && minY.advanceExact(doc) + && maxX.advanceExact(doc) + && maxY.advanceExact(doc); } @Override @@ -72,14 +74,13 @@ class BBoxValueSource extends ShapeValuesSource { rect.reset(minXValue, maxXValue, minYValue, maxYValue); return rect; } - }; } @Override public boolean isCacheable(LeafReaderContext ctx) { - return DocValues.isCacheable(ctx, - strategy.field_minX, strategy.field_minY, strategy.field_maxX, strategy.field_maxY); + return DocValues.isCacheable( + ctx, strategy.field_minX, strategy.field_minY, strategy.field_maxX, strategy.field_maxY); } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/package-info.java index 518f4475252..f0461794ce9 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/package-info.java @@ -17,7 +17,7 @@ /** * Bounding Box Spatial Strategy - *

    - * Index a shape extent using 4 numeric fields and a flag to say if it crosses the dateline + * + *

    Index a shape extent using 4 numeric fields and a flag to say if it crosses the dateline */ -package org.apache.lucene.spatial.bbox; \ No newline at end of file +package org.apache.lucene.spatial.bbox; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/CompositeSpatialStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/CompositeSpatialStrategy.java index 348b7d6edb7..043214af888 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/CompositeSpatialStrategy.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/CompositeSpatialStrategy.java @@ -19,7 +19,6 @@ package org.apache.lucene.spatial.composite; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.lucene.document.Field; import org.apache.lucene.search.DoubleValuesSource; import org.apache.lucene.search.Query; @@ -35,26 +34,30 @@ import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; /** - * A composite {@link SpatialStrategy} based on {@link RecursivePrefixTreeStrategy} (RPT) and - * {@link SerializedDVStrategy} (SDV). - * RPT acts as an index to the precision available in SDV, and in some circumstances can avoid geometry lookups based - * on where a cell is in relation to the query shape. Currently the only predicate optimized like this is Intersects. - * All predicates are supported except for the BBox* ones, and Disjoint. + * A composite {@link SpatialStrategy} based on {@link RecursivePrefixTreeStrategy} (RPT) and {@link + * SerializedDVStrategy} (SDV). RPT acts as an index to the precision available in SDV, and in some + * circumstances can avoid geometry lookups based on where a cell is in relation to the query shape. + * Currently the only predicate optimized like this is Intersects. All predicates are supported + * except for the BBox* ones, and Disjoint. * * @lucene.experimental */ public class CompositeSpatialStrategy extends SpatialStrategy { - //TODO support others? (BBox) + // TODO support others? (BBox) private final RecursivePrefixTreeStrategy indexStrategy; - /** Has the geometry. */ // TODO support others? + /** Has the geometry. */ + // TODO support others? private final SerializedDVStrategy geometryStrategy; + private boolean optimizePredicates = true; - public CompositeSpatialStrategy(String fieldName, - RecursivePrefixTreeStrategy indexStrategy, SerializedDVStrategy geometryStrategy) { - super(indexStrategy.getSpatialContext(), fieldName);//field name; unused + public CompositeSpatialStrategy( + String fieldName, + RecursivePrefixTreeStrategy indexStrategy, + SerializedDVStrategy geometryStrategy) { + super(indexStrategy.getSpatialContext(), fieldName); // field name; unused this.indexStrategy = indexStrategy; this.geometryStrategy = geometryStrategy; } @@ -71,8 +74,10 @@ public class CompositeSpatialStrategy extends SpatialStrategy { return optimizePredicates; } - /** Set to false to NOT use optimized search predicates that avoid checking the geometry sometimes. Only useful for - * benchmarking. */ + /** + * Set to false to NOT use optimized search predicates that avoid checking the geometry sometimes. + * Only useful for benchmarking. + */ public void setOptimizePredicates(boolean optimizePredicates) { this.optimizePredicates = optimizePredicates; } @@ -87,7 +92,8 @@ public class CompositeSpatialStrategy extends SpatialStrategy { @Override public DoubleValuesSource makeDistanceValueSource(Point queryPoint, double multiplier) { - //TODO consider indexing center-point in DV? Guarantee contained by the shape, which could then be used for + // TODO consider indexing center-point in DV? Guarantee contained by the shape, which could + // then be used for // other purposes like faster WITHIN predicate? throw new UnsupportedOperationException(); } @@ -101,32 +107,41 @@ public class CompositeSpatialStrategy extends SpatialStrategy { } if (pred == SpatialOperation.IsDisjointTo) { -// final Query intersectQuery = makeQuery(new SpatialArgs(SpatialOperation.Intersects, args.getShape())); -// DocValues.getDocsWithField(reader, geometryStrategy.getFieldName()); - //TODO resurrect Disjoint spatial query utility accepting a field name known to have DocValues. + // final Query intersectQuery = makeQuery(new SpatialArgs(SpatialOperation.Intersects, + // args.getShape())); + // DocValues.getDocsWithField(reader, geometryStrategy.getFieldName()); + // TODO resurrect Disjoint spatial query utility accepting a field name known to have + // DocValues. // update class docs when it's added. throw new UnsupportedSpatialOperation(pred); } final ShapeValuesPredicate predicateValueSource = new ShapeValuesPredicate(geometryStrategy.makeShapeValueSource(), pred, args.getShape()); - //System.out.println("PredOpt: " + optimizePredicates); + // System.out.println("PredOpt: " + optimizePredicates); if (pred == SpatialOperation.Intersects && optimizePredicates) { // We have a smart Intersects impl final SpatialPrefixTree grid = indexStrategy.getGrid(); - final int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, 0.0));//default to max precision - return new IntersectsRPTVerifyQuery(args.getShape(), indexStrategy.getFieldName(), grid, - detailLevel, indexStrategy.getPrefixGridScanLevel(), predicateValueSource); + final int detailLevel = + grid.getLevelForDistance(args.resolveDistErr(ctx, 0.0)); // default to max precision + return new IntersectsRPTVerifyQuery( + args.getShape(), + indexStrategy.getFieldName(), + grid, + detailLevel, + indexStrategy.getPrefixGridScanLevel(), + predicateValueSource); } else { - //The general path; all index matches get verified + // The general path; all index matches get verified SpatialArgs indexArgs; if (pred == SpatialOperation.Contains) { - // note: we could map IsWithin as well but it's pretty darned slow since it touches all world grids + // note: we could map IsWithin as well but it's pretty darned slow since it touches all + // world grids indexArgs = args; } else { - //TODO add args.clone method with new predicate? Or simply make non-final? + // TODO add args.clone method with new predicate? Or simply make non-final? indexArgs = new SpatialArgs(SpatialOperation.Intersects, args.getShape()); indexArgs.setDistErr(args.getDistErr()); indexArgs.setDistErrPct(args.getDistErrPct()); @@ -140,5 +155,4 @@ public class CompositeSpatialStrategy extends SpatialStrategy { return new CompositeVerifyQuery(indexQuery, predicateValueSource); } } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/CompositeVerifyQuery.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/CompositeVerifyQuery.java index e8149cbb53c..354a0196229 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/CompositeVerifyQuery.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/CompositeVerifyQuery.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.composite; import java.io.IOException; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ConstantScoreScorer; @@ -39,7 +38,7 @@ import org.apache.lucene.spatial.util.ShapeValuesPredicate; */ public class CompositeVerifyQuery extends Query { - private final Query indexQuery;//approximation (matches more than needed) + private final Query indexQuery; // approximation (matches more than needed) private final ShapeValuesPredicate predicateValueSource; public CompositeVerifyQuery(Query indexQuery, ShapeValuesPredicate predicateValueSource) { @@ -58,13 +57,12 @@ public class CompositeVerifyQuery extends Query { @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } - + private boolean equalsTo(CompositeVerifyQuery other) { - return indexQuery.equals(other.indexQuery) && - predicateValueSource.equals(other.predicateValueSource); + return indexQuery.equals(other.indexQuery) + && predicateValueSource.equals(other.predicateValueSource); } @Override @@ -77,8 +75,13 @@ public class CompositeVerifyQuery extends Query { @Override public String toString(String field) { - //TODO verify this looks good - return getClass().getSimpleName() + "(" + indexQuery.toString(field) + ", " + predicateValueSource + ")"; + // TODO verify this looks good + return getClass().getSimpleName() + + "(" + + indexQuery.toString(field) + + ", " + + predicateValueSource + + ")"; } @Override @@ -87,8 +90,11 @@ public class CompositeVerifyQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { - final Weight indexQueryWeight = indexQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);//scores aren't unsupported + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { + final Weight indexQueryWeight = + indexQuery.createWeight( + searcher, ScoreMode.COMPLETE_NO_SCORES, boost); // scores aren't unsupported return new ConstantScoreWeight(this, boost) { @@ -100,7 +106,8 @@ public class CompositeVerifyQuery extends Query { return null; } - final TwoPhaseIterator predFuncValues = predicateValueSource.iterator(context, indexQueryScorer.iterator()); + final TwoPhaseIterator predFuncValues = + predicateValueSource.iterator(context, indexQueryScorer.iterator()); return new ConstantScoreScorer(this, score(), scoreMode, predFuncValues); } @@ -108,7 +115,6 @@ public class CompositeVerifyQuery extends Query { public boolean isCacheable(LeafReaderContext ctx) { return predicateValueSource.isCacheable(ctx); } - }; } } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/IntersectsRPTVerifyQuery.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/IntersectsRPTVerifyQuery.java index 7fa98643507..8fa4a36bff6 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/IntersectsRPTVerifyQuery.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/IntersectsRPTVerifyQuery.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.composite; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; @@ -39,9 +38,9 @@ import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; /** - * A spatial Intersects predicate that distinguishes an approximated match from an exact match based on which cells - * are within the query shape. It exposes a {@link TwoPhaseIterator} that will verify a match with a provided - * predicate in the form of an ShapeValuesPredicate. + * A spatial Intersects predicate that distinguishes an approximated match from an exact match based + * on which cells are within the query shape. It exposes a {@link TwoPhaseIterator} that will verify + * a match with a provided predicate in the form of an ShapeValuesPredicate. * * @lucene.internal */ @@ -50,11 +49,17 @@ public class IntersectsRPTVerifyQuery extends Query { private final IntersectsDifferentiatingQuery intersectsDiffQuery; private final ShapeValuesPredicate predicateValueSource; - public IntersectsRPTVerifyQuery(Shape queryShape, String fieldName, SpatialPrefixTree grid, int detailLevel, - int prefixGridScanLevel, ShapeValuesPredicate predicateValueSource) { + public IntersectsRPTVerifyQuery( + Shape queryShape, + String fieldName, + SpatialPrefixTree grid, + int detailLevel, + int prefixGridScanLevel, + ShapeValuesPredicate predicateValueSource) { this.predicateValueSource = predicateValueSource; - this.intersectsDiffQuery = new IntersectsDifferentiatingQuery(queryShape, fieldName, grid, detailLevel, - prefixGridScanLevel); + this.intersectsDiffQuery = + new IntersectsDifferentiatingQuery( + queryShape, fieldName, grid, detailLevel, prefixGridScanLevel); } @Override @@ -64,13 +69,12 @@ public class IntersectsRPTVerifyQuery extends Query { @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } private boolean equalsTo(IntersectsRPTVerifyQuery other) { - return intersectsDiffQuery.equals(other.intersectsDiffQuery) && - predicateValueSource.equals(other.predicateValueSource); + return intersectsDiffQuery.equals(other.intersectsDiffQuery) + && predicateValueSource.equals(other.predicateValueSource); } @Override @@ -87,7 +91,8 @@ public class IntersectsRPTVerifyQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { return new ConstantScoreWeight(this, boost) { @Override @@ -104,7 +109,8 @@ public class IntersectsRPTVerifyQuery extends Query { } final DocIdSetIterator exactIterator; if (result.exactDocIdSet != null) { - // If both sets are the same, there's nothing to verify; we needn't return a TwoPhaseIterator + // If both sets are the same, there's nothing to verify; we needn't return a + // TwoPhaseIterator if (result.approxDocIdSet == result.exactDocIdSet) { return new ConstantScoreScorer(this, score(), scoreMode, approxDISI); } @@ -114,29 +120,31 @@ public class IntersectsRPTVerifyQuery extends Query { exactIterator = null; } - final TwoPhaseIterator twoPhaseIterator = new TwoPhaseIterator(approxDISI) { + final TwoPhaseIterator twoPhaseIterator = + new TwoPhaseIterator(approxDISI) { - final TwoPhaseIterator predFuncValues = predicateValueSource.iterator(context, approxDISI); + final TwoPhaseIterator predFuncValues = + predicateValueSource.iterator(context, approxDISI); - @Override - public boolean matches() throws IOException { - final int doc = approxDISI.docID(); - if (exactIterator != null) { - if (exactIterator.docID() < doc) { - exactIterator.advance(doc); + @Override + public boolean matches() throws IOException { + final int doc = approxDISI.docID(); + if (exactIterator != null) { + if (exactIterator.docID() < doc) { + exactIterator.advance(doc); + } + if (exactIterator.docID() == doc) { + return true; + } + } + return predFuncValues.matches(); } - if (exactIterator.docID() == doc) { - return true; - } - } - return predFuncValues.matches(); - } - @Override - public float matchCost() { - return 100; // TODO: use cost of exactIterator.advance() and predFuncValues.cost() - } - }; + @Override + public float matchCost() { + return 100; // TODO: use cost of exactIterator.advance() and predFuncValues.cost() + } + }; return new ConstantScoreScorer(this, score(), scoreMode, twoPhaseIterator); } @@ -145,26 +153,31 @@ public class IntersectsRPTVerifyQuery extends Query { public boolean isCacheable(LeafReaderContext ctx) { return predicateValueSource.isCacheable(ctx); } - }; } - //This may be a "Query" but we don't use it as-such; the caller calls the constructor and then compute() and examines - // the results which consists of two parts -- the approximated results, and a subset of exact matches. The + // This may be a "Query" but we don't use it as-such; the caller calls the constructor and then + // compute() and examines + // the results which consists of two parts -- the approximated results, and a subset of exact + // matches. The // difference needs to be verified. // TODO refactor AVPTQ to not be a Query? private static class IntersectsDifferentiatingQuery extends AbstractVisitingPrefixTreeQuery { - public IntersectsDifferentiatingQuery(Shape queryShape, String fieldName, SpatialPrefixTree grid, - int detailLevel, int prefixGridScanLevel) { + public IntersectsDifferentiatingQuery( + Shape queryShape, + String fieldName, + SpatialPrefixTree grid, + int detailLevel, + int prefixGridScanLevel) { super(queryShape, fieldName, grid, detailLevel, prefixGridScanLevel); } - IntersectsDifferentiatingQuery.IntersectsDifferentiatingVisitor compute(LeafReaderContext context) - throws IOException { + IntersectsDifferentiatingQuery.IntersectsDifferentiatingVisitor compute( + LeafReaderContext context) throws IOException { final IntersectsDifferentiatingQuery.IntersectsDifferentiatingVisitor result = new IntersectsDifferentiatingQuery.IntersectsDifferentiatingVisitor(context); - result.getDocIdSet();//computes + result.getDocIdSet(); // computes return result; } @@ -196,21 +209,21 @@ public class IntersectsRPTVerifyQuery extends Query { exactDocIdSet = exactBuilder.build(); } if (approxIsEmpty) { - approxDocIdSet = exactDocIdSet;//optimization + approxDocIdSet = exactDocIdSet; // optimization } else { if (exactDocIdSet != null) { approxBuilder.add(exactDocIdSet.iterator()); } approxDocIdSet = approxBuilder.build(); } - return null;//unused in this weird re-use of AVPTQ + return null; // unused in this weird re-use of AVPTQ } @Override protected boolean visitPrefix(Cell cell) throws IOException { if (cell.getShapeRel() == SpatialRelation.WITHIN) { exactIsEmpty = false; - collectDocs(exactBuilder);//note: we'll add exact to approx on finish() + collectDocs(exactBuilder); // note: we'll add exact to approx on finish() return false; } else if (cell.getLevel() == detailLevel) { approxIsEmpty = false; @@ -224,7 +237,7 @@ public class IntersectsRPTVerifyQuery extends Query { protected void visitLeaf(Cell cell) throws IOException { if (cell.getShapeRel() == SpatialRelation.WITHIN) { exactIsEmpty = false; - collectDocs(exactBuilder);//note: we'll add exact to approx on finish() + collectDocs(exactBuilder); // note: we'll add exact to approx on finish() } else { approxIsEmpty = false; collectDocs(approxBuilder); diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/package-info.java index c207ea6b293..f71b8fd2de8 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/composite/package-info.java @@ -16,4 +16,4 @@ */ /** Composite strategies. */ -package org.apache.lucene.spatial.composite; \ No newline at end of file +package org.apache.lucene.spatial.composite; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/package-info.java index ce3163c6d73..2fbea2879b4 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/package-info.java @@ -16,4 +16,4 @@ */ /** Lucene advanced spatial search */ -package org.apache.lucene.spatial; \ No newline at end of file +package org.apache.lucene.spatial; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeQuery.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeQuery.java index 943aa31a4ab..1307136cd09 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeQuery.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeQuery.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.prefix; import java.io.IOException; - import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; @@ -40,16 +39,19 @@ import org.locationtech.spatial4j.shape.Shape; /** * Base class for Lucene Queries on SpatialPrefixTree fields. + * * @lucene.internal */ public abstract class AbstractPrefixTreeQuery extends Query { protected final Shape queryShape; protected final String fieldName; - protected final SpatialPrefixTree grid;//not in equals/hashCode since it's implied for a specific field + protected final SpatialPrefixTree + grid; // not in equals/hashCode since it's implied for a specific field protected final int detailLevel; - public AbstractPrefixTreeQuery(Shape queryShape, String fieldName, SpatialPrefixTree grid, int detailLevel) { + public AbstractPrefixTreeQuery( + Shape queryShape, String fieldName, SpatialPrefixTree grid, int detailLevel) { this.queryShape = queryShape; this.fieldName = fieldName; this.grid = grid; @@ -58,14 +60,13 @@ public abstract class AbstractPrefixTreeQuery extends Query { @Override public boolean equals(Object o) { - return sameClassAs(o) && - equalsTo(getClass().cast(o)); + return sameClassAs(o) && equalsTo(getClass().cast(o)); } private boolean equalsTo(AbstractPrefixTreeQuery other) { - return detailLevel == other.detailLevel && - fieldName.equals(other.fieldName) && - queryShape.equals(other.queryShape); + return detailLevel == other.detailLevel + && fieldName.equals(other.fieldName) + && queryShape.equals(other.queryShape); } @Override @@ -85,7 +86,8 @@ public abstract class AbstractPrefixTreeQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { @@ -109,16 +111,18 @@ public abstract class AbstractPrefixTreeQuery extends Query { protected abstract DocIdSet getDocIdSet(LeafReaderContext context) throws IOException; - /** Holds transient state and docid collecting utility methods as part of - * traversing a {@link TermsEnum} for a {@link org.apache.lucene.index.LeafReaderContext}. */ - public abstract class BaseTermsEnumTraverser {//TODO rename to LeafTermsEnumTraverser ? - //note: only 'fieldName' (accessed in constructor) keeps this from being a static inner class + /** + * Holds transient state and docid collecting utility methods as part of traversing a {@link + * TermsEnum} for a {@link org.apache.lucene.index.LeafReaderContext}. + */ + public abstract class BaseTermsEnumTraverser { // TODO rename to LeafTermsEnumTraverser ? + // note: only 'fieldName' (accessed in constructor) keeps this from being a static inner class protected final LeafReaderContext context; protected final int maxDoc; protected final Terms terms; // maybe null - protected final TermsEnum termsEnum;//remember to check for null! + protected final TermsEnum termsEnum; // remember to check for null! protected PostingsEnum postingsEnum; public BaseTermsEnumTraverser(LeafReaderContext context) throws IOException { @@ -145,5 +149,4 @@ public abstract class AbstractPrefixTreeQuery extends Query { docSetBuilder.add(postingsEnum); } } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeQuery.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeQuery.java index 8ccee99c40c..e0be8265623 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeQuery.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeQuery.java @@ -18,9 +18,6 @@ package org.apache.lucene.spatial.prefix; import java.io.IOException; import java.util.Iterator; - -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.SpatialRelation; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSet; @@ -28,85 +25,88 @@ import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.CellIterator; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; import org.apache.lucene.util.BytesRef; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.SpatialRelation; /** - * Traverses a {@link SpatialPrefixTree} indexed field, using the template and - * visitor design patterns for subclasses to guide the traversal and collect - * matching documents. - *

    - * Subclasses implement {@link #getDocIdSet(org.apache.lucene.index.LeafReaderContext)} - * by instantiating a custom {@link VisitorTemplate} subclass (i.e. an anonymous inner class) - * and implement the required methods. + * Traverses a {@link SpatialPrefixTree} indexed field, using the template and visitor design + * patterns for subclasses to guide the traversal and collect matching documents. + * + *

    Subclasses implement {@link #getDocIdSet(org.apache.lucene.index.LeafReaderContext)} by + * instantiating a custom {@link VisitorTemplate} subclass (i.e. an anonymous inner class) and + * implement the required methods. * * @lucene.internal */ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTreeQuery { - //Historical note: this code resulted from a refactoring of RecursivePrefixTreeQuery, + // Historical note: this code resulted from a refactoring of RecursivePrefixTreeQuery, // which in turn came out of SOLR-2155 - //This class perhaps could have been implemented in terms of FilteredTermsEnum & MultiTermQuery. + // This class perhaps could have been implemented in terms of FilteredTermsEnum & MultiTermQuery. // Maybe so for simple Intersects predicate but not for when we want to collect terms - // differently depending on cell state like IsWithin and for fuzzy/accurate collection planned improvements. At + // differently depending on cell state like IsWithin and for fuzzy/accurate collection planned + // improvements. At // least it would just make things more complicated. - protected final int prefixGridScanLevel;//at least one less than grid.getMaxLevels() + protected final int prefixGridScanLevel; // at least one less than grid.getMaxLevels() - public AbstractVisitingPrefixTreeQuery(Shape queryShape, String fieldName, SpatialPrefixTree grid, - int detailLevel, int prefixGridScanLevel) { + public AbstractVisitingPrefixTreeQuery( + Shape queryShape, + String fieldName, + SpatialPrefixTree grid, + int detailLevel, + int prefixGridScanLevel) { super(queryShape, fieldName, grid, detailLevel); this.prefixGridScanLevel = Math.max(0, Math.min(prefixGridScanLevel, grid.getMaxLevels() - 1)); assert detailLevel <= grid.getMaxLevels(); } /** - * An abstract class designed to make it easy to implement predicates or - * other operations on a {@link SpatialPrefixTree} indexed field. An instance - * of this class is not designed to be re-used across LeafReaderContext - * instances so simply create a new one per-leaf. - * The {@link #getDocIdSet()} method here starts the work. It first checks - * that there are indexed terms; if not it quickly returns null. Then it calls - * {@link #start()} so a subclass can set up a return value, like an - * {@link org.apache.lucene.util.FixedBitSet}. Then it starts the traversal - * process, calling {@link #findSubCellsToVisit(org.apache.lucene.spatial.prefix.tree.Cell)} - * which by default finds the top cells that intersect {@code queryShape}. If - * there isn't an indexed cell for a corresponding cell returned for this - * method then it's short-circuited until it finds one, at which point - * {@link #visitPrefix(org.apache.lucene.spatial.prefix.tree.Cell)} is called. At - * some depths, of the tree, the algorithm switches to a scanning mode that - * calls {@link #visitScanned(org.apache.lucene.spatial.prefix.tree.Cell)} - * for each leaf cell found. + * An abstract class designed to make it easy to implement predicates or other operations on a + * {@link SpatialPrefixTree} indexed field. An instance of this class is not designed to be + * re-used across LeafReaderContext instances so simply create a new one per-leaf. The {@link + * #getDocIdSet()} method here starts the work. It first checks that there are indexed terms; if + * not it quickly returns null. Then it calls {@link #start()} so a subclass can set up a return + * value, like an {@link org.apache.lucene.util.FixedBitSet}. Then it starts the traversal + * process, calling {@link #findSubCellsToVisit(org.apache.lucene.spatial.prefix.tree.Cell)} which + * by default finds the top cells that intersect {@code queryShape}. If there isn't an indexed + * cell for a corresponding cell returned for this method then it's short-circuited until it finds + * one, at which point {@link #visitPrefix(org.apache.lucene.spatial.prefix.tree.Cell)} is called. + * At some depths, of the tree, the algorithm switches to a scanning mode that calls {@link + * #visitScanned(org.apache.lucene.spatial.prefix.tree.Cell)} for each leaf cell found. * * @lucene.internal */ public abstract class VisitorTemplate extends BaseTermsEnumTraverser { - /* Future potential optimizations: + /* Future potential optimizations: - * Can a polygon query shape be optimized / made-simpler at recursive depths - (e.g. intersection of shape + cell box) + * Can a polygon query shape be optimized / made-simpler at recursive depths + (e.g. intersection of shape + cell box) - * RE "scan" vs divide & conquer performance decision: - We should use termsEnum.docFreq() as an estimate on the number of places at - this depth. It would be nice if termsEnum knew how many terms - start with the current term without having to repeatedly next() & test to find out. + * RE "scan" vs divide & conquer performance decision: + We should use termsEnum.docFreq() as an estimate on the number of places at + this depth. It would be nice if termsEnum knew how many terms + start with the current term without having to repeatedly next() & test to find out. - * Perhaps don't do intermediate seek()'s to cells above detailLevel that have Intersects - relation because we won't be collecting those docs any way. However seeking - does act as a short-circuit. So maybe do some percent of the time or when the level - is above some threshold. + * Perhaps don't do intermediate seek()'s to cells above detailLevel that have Intersects + relation because we won't be collecting those docs any way. However seeking + does act as a short-circuit. So maybe do some percent of the time or when the level + is above some threshold. - */ + */ // // TODO MAJOR REFACTOR SIMPLIFICATION BASED ON TreeCellIterator TODO // - private VNode curVNode;//current pointer, derived from query shape - private BytesRef curVNodeTerm = new BytesRef();//curVNode.cell's term, without leaf. in main loop only + private VNode curVNode; // current pointer, derived from query shape + // curVNode.cell's term, without leaf. in main loop only + private BytesRef curVNodeTerm = new BytesRef(); - private BytesRef thisTerm;//the result of termsEnum.term() - private Cell indexedCell;//Cell wrapper of thisTerm. Always updated when thisTerm is. + private BytesRef thisTerm; // the result of termsEnum.term() + private Cell indexedCell; // Cell wrapper of thisTerm. Always updated when thisTerm is. public VisitorTemplate(LeafReaderContext context) throws IOException { super(context); @@ -114,9 +114,8 @@ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTree public DocIdSet getDocIdSet() throws IOException { assert curVNode == null : "Called more than once?"; - if (termsEnum == null) - return null; - if (!nextTerm()) {//advances + if (termsEnum == null) return null; + if (!nextTerm()) { // advances return null; } @@ -127,34 +126,36 @@ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTree addIntersectingChildren(); - main: while (thisTerm != null) {//terminates for other reasons too! + main: + while (thisTerm != null) { // terminates for other reasons too! - //Advance curVNode pointer + // Advance curVNode pointer if (curVNode.children != null) { - //-- HAVE CHILDREN: DESCEND - assert curVNode.children.hasNext();//if we put it there then it has something + // -- HAVE CHILDREN: DESCEND + assert curVNode.children.hasNext(); // if we put it there then it has something preSiblings(curVNode); curVNode = curVNode.children.next(); } else { - //-- NO CHILDREN: ADVANCE TO NEXT SIBLING + // -- NO CHILDREN: ADVANCE TO NEXT SIBLING VNode parentVNode = curVNode.parent; while (true) { - if (parentVNode == null) + if (parentVNode == null) { break main; // all done + } if (parentVNode.children.hasNext()) { - //advance next sibling + // advance next sibling curVNode = parentVNode.children.next(); break; } else { - //reached end of siblings; pop up + // reached end of siblings; pop up postSiblings(parentVNode); - parentVNode.children = null;//GC + parentVNode.children = null; // GC parentVNode = parentVNode.parent; } } } - //Seek to curVNode's cell (or skip if termsEnum has moved beyond) + // Seek to curVNode's cell (or skip if termsEnum has moved beyond) final int compare = indexedCell.compareToNoLeaf(curVNode.cell); if (compare > 0) { // The indexed cell is after; continue loop to next query cell @@ -165,104 +166,105 @@ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTree // Seek ! curVNode.cell.getTokenBytesNoLeaf(curVNodeTerm); TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(curVNodeTerm); - if (seekStatus == TermsEnum.SeekStatus.END) + if (seekStatus == TermsEnum.SeekStatus.END) { break; // all done + } thisTerm = termsEnum.term(); indexedCell = grid.readCell(thisTerm, indexedCell); if (seekStatus == TermsEnum.SeekStatus.NOT_FOUND) { // Did we find a leaf of the cell we were looking for or something after? - if (!indexedCell.isLeaf() || indexedCell.compareToNoLeaf(curVNode.cell) != 0) + if (!indexedCell.isLeaf() || indexedCell.compareToNoLeaf(curVNode.cell) != 0) { continue; // The indexed cell is after; continue loop to next query cell + } } } // indexedCell == queryCell (disregarding leaf). - // If indexedCell is a leaf then there's no prefix (prefix sorts before) -- just visit and continue + // If indexedCell is a leaf then there's no prefix (prefix sorts before) -- just visit and + // continue if (indexedCell.isLeaf()) { - visitLeaf(indexedCell);//TODO or query cell? Though shouldn't matter. + visitLeaf(indexedCell); // TODO or query cell? Though shouldn't matter. if (!nextTerm()) break; continue; } // If a prefix (non-leaf) then visit; see if we descend. - final boolean descend = visitPrefix(curVNode.cell);//need to use curVNode.cell not indexedCell + // need to use curVNode.cell not indexedCell + final boolean descend = visitPrefix(curVNode.cell); if (!nextTerm()) break; // Check for adjacent leaf with the same prefix if (indexedCell.isLeaf() && indexedCell.getLevel() == curVNode.cell.getLevel()) { - visitLeaf(indexedCell);//TODO or query cell? Though shouldn't matter. + visitLeaf(indexedCell); // TODO or query cell? Though shouldn't matter. if (!nextTerm()) break; } - if (descend) { addIntersectingChildren(); } - - }//main loop + } return finish(); } - /** Called initially, and whenever {@link #visitPrefix(org.apache.lucene.spatial.prefix.tree.Cell)} - * returns true. */ + /** + * Called initially, and whenever {@link + * #visitPrefix(org.apache.lucene.spatial.prefix.tree.Cell)} returns true. + */ private void addIntersectingChildren() throws IOException { assert thisTerm != null; Cell cell = curVNode.cell; - if (cell.getLevel() >= detailLevel) - throw new IllegalStateException("Spatial logic error"); + if (cell.getLevel() >= detailLevel) throw new IllegalStateException("Spatial logic error"); - //Decide whether to continue to divide & conquer, or whether it's time to + // Decide whether to continue to divide & conquer, or whether it's time to // scan through terms beneath this cell. // Scanning is a performance optimization trade-off. - //TODO use termsEnum.docFreq() as heuristic - boolean scan = cell.getLevel() >= prefixGridScanLevel;//simple heuristic + // TODO use termsEnum.docFreq() as heuristic + boolean scan = cell.getLevel() >= prefixGridScanLevel; // simple heuristic if (!scan) { - //Divide & conquer (ultimately termsEnum.seek()) + // Divide & conquer (ultimately termsEnum.seek()) Iterator subCellsIter = findSubCellsToVisit(cell); - if (!subCellsIter.hasNext())//not expected - return; + if (!subCellsIter.hasNext()) // not expected + return; curVNode.children = new VNodeCellIterator(subCellsIter, new VNode(curVNode)); } else { - //Scan (loop of termsEnum.next()) + // Scan (loop of termsEnum.next()) scan(detailLevel); } } /** - * Called when doing a divide and conquer to find the next intersecting cells - * of the query shape that are beneath {@code cell}. {@code cell} is - * guaranteed to have an intersection and thus this must return some number - * of nodes. + * Called when doing a divide and conquer to find the next intersecting cells of the query shape + * that are beneath {@code cell}. {@code cell} is guaranteed to have an intersection and thus + * this must return some number of nodes. */ protected CellIterator findSubCellsToVisit(Cell cell) { return cell.getNextLevelCells(queryShape); } /** - * Scans ({@code termsEnum.next()}) terms until a term is found that does - * not start with curVNode's cell. If it finds a leaf cell or a cell at - * level {@code scanDetailLevel} then it calls {@link - * #visitScanned(org.apache.lucene.spatial.prefix.tree.Cell)}. + * Scans ({@code termsEnum.next()}) terms until a term is found that does not start with + * curVNode's cell. If it finds a leaf cell or a cell at level {@code scanDetailLevel} then it + * calls {@link #visitScanned(org.apache.lucene.spatial.prefix.tree.Cell)}. */ protected void scan(int scanDetailLevel) throws IOException { - //note: this can be a do-while instead in 6x; 5x has a back-compat with redundant leaves -- LUCENE-4942 + // note: this can be a do-while instead in 6x; 5x has a back-compat with redundant leaves -- + // LUCENE-4942 while (curVNode.cell.isPrefixOf(indexedCell)) { if (indexedCell.getLevel() == scanDetailLevel || (indexedCell.getLevel() < scanDetailLevel && indexedCell.isLeaf())) { visitScanned(indexedCell); } - //advance + // advance if (!nextTerm()) break; } } private boolean nextTerm() throws IOException { - if ((thisTerm = termsEnum.next()) == null) - return false; + if ((thisTerm = termsEnum.next()) == null) return false; indexedCell = grid.readCell(thisTerm, indexedCell); return true; } @@ -291,7 +293,7 @@ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTree } @Override - public void remove() {//it always removes + public void remove() { // it always removes } } @@ -302,10 +304,9 @@ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTree protected abstract DocIdSet finish() throws IOException; /** - * Visit an indexed non-leaf cell. The presence of a prefix cell implies - * there are leaf cells at further levels. The cell passed should have it's - * {@link org.apache.lucene.spatial.prefix.tree.Cell#getShapeRel()} set - * relative to the filtered shape. + * Visit an indexed non-leaf cell. The presence of a prefix cell implies there are leaf cells at + * further levels. The cell passed should have it's {@link + * org.apache.lucene.spatial.prefix.tree.Cell#getShapeRel()} set relative to the filtered shape. * * @param cell An intersecting cell; not a leaf. * @return true to descend to more levels. @@ -313,24 +314,23 @@ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTree protected abstract boolean visitPrefix(Cell cell) throws IOException; /** - * Called when an indexed leaf cell is found. An - * indexed leaf cell usually means associated documents won't be found at - * further detail levels. However, if a document has - * multiple overlapping shapes at different resolutions, then this isn't true. + * Called when an indexed leaf cell is found. An indexed leaf cell usually means associated + * documents won't be found at further detail levels. However, if a document has multiple + * overlapping shapes at different resolutions, then this isn't true. */ protected abstract void visitLeaf(Cell cell) throws IOException; /** - * The cell is either indexed as a leaf or is the last level of detail. It - * might not even intersect the query shape, so be sure to check for that. - * The default implementation will check that and if passes then call - * {@link #visitLeaf(org.apache.lucene.spatial.prefix.tree.Cell)} or - * {@link #visitPrefix(org.apache.lucene.spatial.prefix.tree.Cell)}. + * The cell is either indexed as a leaf or is the last level of detail. It might not even + * intersect the query shape, so be sure to check for that. The default implementation will + * check that and if passes then call {@link + * #visitLeaf(org.apache.lucene.spatial.prefix.tree.Cell)} or {@link + * #visitPrefix(org.apache.lucene.spatial.prefix.tree.Cell)}. */ protected void visitScanned(Cell cell) throws IOException { final SpatialRelation relate = cell.getShape().relate(queryShape); if (relate.intersects()) { - cell.setShapeRel(relate);//just being pedantic + cell.setShapeRel(relate); // just being pedantic if (cell.isLeaf()) { visitLeaf(cell); } else { @@ -339,33 +339,28 @@ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTree } } - protected void preSiblings(VNode vNode) throws IOException { - } + protected void preSiblings(VNode vNode) throws IOException {} - protected void postSiblings(VNode vNode) throws IOException { - } - }//class VisitorTemplate + protected void postSiblings(VNode vNode) throws IOException {} + } // class VisitorTemplate /** - * A visitor node/cell found via the query shape for {@link VisitorTemplate}. - * Sometimes these are reset(cell). It's like a LinkedList node but forms a - * tree. + * A visitor node/cell found via the query shape for {@link VisitorTemplate}. Sometimes these are + * reset(cell). It's like a LinkedList node but forms a tree. * * @lucene.internal */ protected static class VNode { - //Note: The VNode tree adds more code to debug/maintain v.s. a flattened + // Note: The VNode tree adds more code to debug/maintain v.s. a flattened // LinkedList that we used to have. There is more opportunity here for // custom behavior (see preSiblings & postSiblings) but that's not // leveraged yet. Maybe this is slightly more GC friendly. - final VNode parent;//only null at the root - Iterator children;//null, then sometimes set, then null - Cell cell;//not null (except initially before reset()) + final VNode parent; // only null at the root + Iterator children; // null, then sometimes set, then null + Cell cell; // not null (except initially before reset()) - /** - * call reset(cell) after to set the cell. - */ + /** call reset(cell) after to set the cell. */ VNode(VNode parent) { // remember to call reset(cell) after this.parent = parent; } @@ -375,6 +370,5 @@ public abstract class AbstractVisitingPrefixTreeQuery extends AbstractPrefixTree this.cell = cell; assert children == null; } - } } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java index 9be0e716ba2..8785c1dca08 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.prefix; import java.io.IOException; - import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute; import org.apache.lucene.util.BytesRef; @@ -57,14 +56,13 @@ public class BytesRefIteratorTokenStream extends TokenStream { } else { clearAttributes(); bytesAtt.setBytesRef(bytes); - //note: we don't bother setting posInc or type attributes. There's no point to it. + // note: we don't bother setting posInc or type attributes. There's no point to it. return true; } } - //members + // members private final BytesTermAttribute bytesAtt = addAttribute(BytesTermAttribute.class); private BytesRefIterator bytesIter = null; // null means not initialized - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/CellToBytesRefIterator.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/CellToBytesRefIterator.java index 0b81b262ddf..544356c05a3 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/CellToBytesRefIterator.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/CellToBytesRefIterator.java @@ -17,17 +17,15 @@ package org.apache.lucene.spatial.prefix; import java.util.Iterator; - import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; /** - * A reset'able {@link org.apache.lucene.util.BytesRefIterator} wrapper around - * an {@link java.util.Iterator} of {@link org.apache.lucene.spatial.prefix.tree.Cell}s. + * A reset'able {@link org.apache.lucene.util.BytesRefIterator} wrapper around an {@link + * java.util.Iterator} of {@link org.apache.lucene.spatial.prefix.tree.Cell}s. * * @see PrefixTreeStrategy#newCellToBytesRefIterator() - * * @lucene.internal */ public class CellToBytesRefIterator implements BytesRefIterator { diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeQuery.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeQuery.java index b0864f67cd3..2e2551d10b0 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeQuery.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeQuery.java @@ -18,9 +18,6 @@ package org.apache.lucene.spatial.prefix; import java.io.IOException; import java.util.Arrays; - -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.SpatialRelation; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.TermsEnum; @@ -33,34 +30,42 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.SentinelIntSet; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.SpatialRelation; /** - * Finds docs where its indexed shape {@link org.apache.lucene.spatial.query.SpatialOperation#Contains - * CONTAINS} the query shape. For use on {@link RecursivePrefixTreeStrategy}. + * Finds docs where its indexed shape {@link + * org.apache.lucene.spatial.query.SpatialOperation#Contains CONTAINS} the query shape. For use on + * {@link RecursivePrefixTreeStrategy}. * * @lucene.experimental */ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { /** - * If the spatial data for a document is comprised of multiple overlapping or adjacent parts, - * it might fail to match a query shape when doing the CONTAINS predicate when the sum of - * those shapes contain the query shape but none do individually. Set this to false to - * increase performance if you don't care about that circumstance (such as if your indexed - * data doesn't even have such conditions). See LUCENE-5062. + * If the spatial data for a document is comprised of multiple overlapping or adjacent parts, it + * might fail to match a query shape when doing the CONTAINS predicate when the sum of those + * shapes contain the query shape but none do individually. Set this to false to increase + * performance if you don't care about that circumstance (such as if your indexed data doesn't + * even have such conditions). See LUCENE-5062. */ protected final boolean multiOverlappingIndexedShapes; - public ContainsPrefixTreeQuery(Shape queryShape, String fieldName, SpatialPrefixTree grid, int detailLevel, boolean multiOverlappingIndexedShapes) { + public ContainsPrefixTreeQuery( + Shape queryShape, + String fieldName, + SpatialPrefixTree grid, + int detailLevel, + boolean multiOverlappingIndexedShapes) { super(queryShape, fieldName, grid, detailLevel); this.multiOverlappingIndexedShapes = multiOverlappingIndexedShapes; } @Override public boolean equals(Object o) { - if (!super.equals(o)) - return false; - return multiOverlappingIndexedShapes == ((ContainsPrefixTreeQuery)o).multiOverlappingIndexedShapes; + if (!super.equals(o)) return false; + return multiOverlappingIndexedShapes + == ((ContainsPrefixTreeQuery) o).multiOverlappingIndexedShapes; } @Override @@ -70,12 +75,13 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { @Override public String toString(String field) { - return getClass().getSimpleName() + "(" + - "fieldName=" + fieldName + "," + - "queryShape=" + queryShape + "," + - "detailLevel=" + detailLevel + "," + - "multiOverlappingIndexedShapes=" + multiOverlappingIndexedShapes + - ")"; + return getClass().getSimpleName() + + "(" + + ("fieldName=" + fieldName + ",") + + ("queryShape=" + queryShape + ",") + + ("detailLevel=" + detailLevel + ",") + + ("multiOverlappingIndexedShapes=" + multiOverlappingIndexedShapes) + + ")"; } @Override @@ -88,25 +94,26 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { public ContainsVisitor(LeafReaderContext context) throws IOException { super(context); if (termsEnum != null) { - nextTerm();//advance to first + nextTerm(); // advance to first } } - BytesRef seekTerm = new BytesRef();//temp; see seek() - BytesRef thisTerm;//current term in termsEnum - Cell indexedCell;//the cell wrapper around thisTerm + BytesRef seekTerm = new BytesRef(); // temp; see seek() + BytesRef thisTerm; // current term in termsEnum + Cell indexedCell; // the cell wrapper around thisTerm - /** This is the primary algorithm; recursive. Returns null if finds none. */ + /** This is the primary algorithm; recursive. Returns null if finds none. */ private SmallDocSet visit(Cell cell, Bits acceptContains) throws IOException { - if (thisTerm == null)//signals all done - return null; + if (thisTerm == null) // signals all done + return null; // Get the AND of all child results (into combinedSubResults) SmallDocSet combinedSubResults = null; // Optimization: use null subCellsFilter when we know cell is within the query shape. Shape subCellsFilter = queryShape; - if (cell.getLevel() != 0 && ((cell.getShapeRel() == null || cell.getShapeRel() == SpatialRelation.WITHIN))) { + if (cell.getLevel() != 0 + && ((cell.getShapeRel() == null || cell.getShapeRel() == SpatialRelation.WITHIN))) { subCellsFilter = null; assert cell.getShape().relate(queryShape) == SpatialRelation.WITHIN; } @@ -117,38 +124,36 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { combinedSubResults = null; } else if (subCell.getLevel() == detailLevel) { combinedSubResults = getDocs(subCell, acceptContains); - } else if (!multiOverlappingIndexedShapes && - subCell.getShapeRel() == SpatialRelation.WITHIN) { + } else if (!multiOverlappingIndexedShapes + && subCell.getShapeRel() == SpatialRelation.WITHIN) { combinedSubResults = getLeafDocs(subCell, acceptContains); } else { - //OR the leaf docs with all child results + // OR the leaf docs with all child results SmallDocSet leafDocs = getLeafDocs(subCell, acceptContains); - SmallDocSet subDocs = visit(subCell, acceptContains); //recursion + SmallDocSet subDocs = visit(subCell, acceptContains); // recursion combinedSubResults = union(leafDocs, subDocs); } - if (combinedSubResults == null) - break; - acceptContains = combinedSubResults;//has the 'AND' effect on next iteration + if (combinedSubResults == null) break; + acceptContains = combinedSubResults; // has the 'AND' effect on next iteration } return combinedSubResults; } private boolean seek(Cell cell) throws IOException { - if (thisTerm == null) - return false; + if (thisTerm == null) return false; final int compare = indexedCell.compareToNoLeaf(cell); if (compare > 0) { - return false;//leap-frog effect + return false; // leap-frog effect } else if (compare == 0) { return true; // already there! - } else {//compare > 0 - //seek! + } else { // compare > 0 + // seek! seekTerm = cell.getTokenBytesNoLeaf(seekTerm); final TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(seekTerm); if (seekStatus == TermsEnum.SeekStatus.END) { - thisTerm = null;//all done + thisTerm = null; // all done return false; } thisTerm = termsEnum.term(); @@ -163,8 +168,8 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { /** Get prefix & leaf docs at this cell. */ private SmallDocSet getDocs(Cell cell, Bits acceptContains) throws IOException { assert indexedCell.compareToNoLeaf(cell) == 0; - //called when we've reached detailLevel. - if (indexedCell.isLeaf()) {//only a leaf + // called when we've reached detailLevel. + if (indexedCell.isLeaf()) { // only a leaf SmallDocSet result = collectDocs(acceptContains); nextTerm(); return result; @@ -173,7 +178,7 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { if (!nextTerm()) { return docsAtPrefix; } - //collect leaf too + // collect leaf too if (indexedCell.isLeaf() && indexedCell.compareToNoLeaf(cell) == 0) { SmallDocSet docsAtLeaf = collectDocs(acceptContains); nextTerm(); @@ -187,7 +192,7 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { /** Gets docs on the leaf of the given cell, _if_ there is a leaf cell, otherwise null. */ private SmallDocSet getLeafDocs(Cell cell, Bits acceptContains) throws IOException { assert indexedCell.compareToNoLeaf(cell) == 0; - //Advance past prefix if we're at a prefix; return null if no leaf + // Advance past prefix if we're at a prefix; return null if no leaf if (!indexedCell.isLeaf()) { if (!nextTerm() || !indexedCell.isLeaf() || indexedCell.getLevel() != cell.getLevel()) { return null; @@ -199,17 +204,15 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { } private boolean nextTerm() throws IOException { - if ((thisTerm = termsEnum.next()) == null) - return false; + if ((thisTerm = termsEnum.next()) == null) return false; indexedCell = grid.readCell(thisTerm, indexedCell); return true; } private SmallDocSet union(SmallDocSet aSet, SmallDocSet bSet) { if (bSet != null) { - if (aSet == null) - return bSet; - return aSet.union(bSet);//union is 'or' + if (aSet == null) return bSet; + return aSet.union(bSet); // union is 'or' } return aSet; } @@ -225,19 +228,19 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { } if (set == null) { int size = termsEnum.docFreq(); - if (size <= 0) - size = 16; + if (size <= 0) size = 16; set = new SmallDocSet(size); } set.set(docid); } return set; } + } // class ContainsVisitor - }//class ContainsVisitor - - /** A hash based mutable set of docIds. If this were Solr code then we might - * use a combination of HashDocSet and SortedIntDocSet instead. */ + /** + * A hash based mutable set of docIds. If this were Solr code then we might use a combination of + * HashDocSet and SortedIntDocSet instead. + */ // TODO use DocIdSetBuilder? private static class SmallDocSet extends DocIdSet implements Bits { @@ -255,8 +258,7 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { public void set(int index) { intSet.put(index); - if (index > maxInt) - maxInt = index; + if (index > maxInt) maxInt = index; } /** Largest docid. */ @@ -281,10 +283,9 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { bigger = this; smaller = other; } - //modify bigger + // modify bigger for (int v : smaller.intSet.keys) { - if (v == smaller.intSet.emptyVal) - continue; + if (v == smaller.intSet.emptyVal) continue; bigger.set(v); } return bigger; @@ -292,31 +293,30 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { @Override public Bits bits() throws IOException { - //if the # of docids is super small, return null since iteration is going + // if the # of docids is super small, return null since iteration is going // to be faster return size() > 4 ? this : null; } @Override public DocIdSetIterator iterator() throws IOException { - if (size() == 0) - return null; - //copy the unsorted values to a new array then sort them + if (size() == 0) return null; + // copy the unsorted values to a new array then sort them int d = 0; final int[] docs = new int[intSet.size()]; for (int v : intSet.keys) { - if (v == intSet.emptyVal) - continue; + if (v == intSet.emptyVal) continue; docs[d++] = v; } assert d == intSet.size(); final int size = d; - //sort them + // sort them Arrays.sort(docs, 0, size); return new DocIdSetIterator() { int idx = -1; + @Override public int docID() { if (idx < 0) { @@ -330,14 +330,13 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { @Override public int nextDoc() throws IOException { - if (++idx < size) - return docs[idx]; + if (++idx < size) return docs[idx]; return NO_MORE_DOCS; } @Override public int advance(int target) throws IOException { - //for this small set this is likely faster vs. a binary search + // for this small set this is likely faster vs. a binary search // into the sorted array return slowAdvance(target); } @@ -352,11 +351,8 @@ public class ContainsPrefixTreeQuery extends AbstractPrefixTreeQuery { @Override public long ramBytesUsed() { return RamUsageEstimator.alignObjectSize( - RamUsageEstimator.NUM_BYTES_OBJECT_REF - + Integer.BYTES) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + Integer.BYTES) + intSet.ramBytesUsed(); } - - }//class SmallDocSet - + } // class SmallDocSet } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/HeatmapFacetCounter.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/HeatmapFacetCounter.java index 5402dd59140..0dc32b48bfc 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/HeatmapFacetCounter.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/HeatmapFacetCounter.java @@ -19,7 +19,6 @@ package org.apache.lucene.spatial.prefix; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.CellIterator; @@ -33,25 +32,28 @@ import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; /** - * Computes spatial facets in two dimensions as a grid of numbers. The data is often visualized as a so-called - * "heatmap", hence the name. + * Computes spatial facets in two dimensions as a grid of numbers. The data is often visualized as a + * so-called "heatmap", hence the name. * * @lucene.experimental */ public class HeatmapFacetCounter { - //TODO where should this code live? It could go to PrefixTreeFacetCounter, or maybe here in its own class is fine. + // TODO where should this code live? It could go to PrefixTreeFacetCounter, or maybe here in its + // own class is fine. /** Maximum number of supported rows (or columns). */ public static final int MAX_ROWS_OR_COLUMNS = (int) Math.sqrt(ArrayUtil.MAX_ARRAY_LENGTH); + static { - Math.multiplyExact(MAX_ROWS_OR_COLUMNS, MAX_ROWS_OR_COLUMNS);//will throw if doesn't stay within integer + Math.multiplyExact( + MAX_ROWS_OR_COLUMNS, MAX_ROWS_OR_COLUMNS); // will throw if doesn't stay within integer } /** Response structure */ public static class Heatmap { public final int columns; public final int rows; - public final int[] counts;//in order of 1st column (all rows) then 2nd column (all rows) etc. + public final int[] counts; // in order of 1st column (all rows) then 2nd column (all rows) etc. public final Rectangle region; public Heatmap(int columns, int rows, Rectangle region) { @@ -72,32 +74,40 @@ public class HeatmapFacetCounter { } /** - * Calculates spatial 2D facets (aggregated counts) in a grid, sometimes called a heatmap. - * Facet computation is implemented by navigating the underlying indexed terms efficiently. If you don't know exactly - * what facetLevel to go to for a given input box but you have some sense of how many cells there should be relative - * to the size of the shape, then consider using the logic that {@link org.apache.lucene.spatial.prefix.PrefixTreeStrategy} - * uses when approximating what level to go to when indexing a shape given a distErrPct. + * Calculates spatial 2D facets (aggregated counts) in a grid, sometimes called a heatmap. Facet + * computation is implemented by navigating the underlying indexed terms efficiently. If you don't + * know exactly what facetLevel to go to for a given input box but you have some sense of how many + * cells there should be relative to the size of the shape, then consider using the logic that + * {@link org.apache.lucene.spatial.prefix.PrefixTreeStrategy} uses when approximating what level + * to go to when indexing a shape given a distErrPct. * * @param context the IndexReader's context - * @param topAcceptDocs a Bits to limit counted docs. If null, live docs are counted. - * @param inputShape the shape to gather grid squares for; typically a {@link Rectangle}. - * The actual heatmap area will usually be larger since the cells on the edge that overlap - * are returned. We always return a rectangle of integers even if the inputShape isn't a rectangle - * -- the non-intersecting cells will all be 0. - * If null is given, the entire world is assumed. + * @param topAcceptDocs a Bits to limit counted docs. If null, live docs are counted. + * @param inputShape the shape to gather grid squares for; typically a {@link Rectangle}. The + * actual heatmap area will usually be larger since the cells on the edge that + * overlap are returned. We always return a rectangle of integers even if the inputShape isn't + * a rectangle -- the non-intersecting cells will all be 0. If null is given, the entire world + * is assumed. * @param facetLevel the target depth (detail) of cells. * @param maxCells the maximum number of cells to return. If the cells exceed this count, an */ - public static Heatmap calcFacets(PrefixTreeStrategy strategy, IndexReaderContext context, Bits topAcceptDocs, - Shape inputShape, final int facetLevel, int maxCells) throws IOException { + public static Heatmap calcFacets( + PrefixTreeStrategy strategy, + IndexReaderContext context, + Bits topAcceptDocs, + Shape inputShape, + final int facetLevel, + int maxCells) + throws IOException { if (maxCells > (MAX_ROWS_OR_COLUMNS * MAX_ROWS_OR_COLUMNS)) { - throw new IllegalArgumentException("maxCells (" + maxCells + ") should be <= " + MAX_ROWS_OR_COLUMNS); + throw new IllegalArgumentException( + "maxCells (" + maxCells + ") should be <= " + MAX_ROWS_OR_COLUMNS); } if (inputShape == null) { inputShape = strategy.getSpatialContext().getWorldBounds(); } final Rectangle inputRect = inputShape.getBoundingBox(); - //First get the rect of the cell at the bottom-left at depth facetLevel + // First get the rect of the cell at the bottom-left at depth facetLevel final SpatialPrefixTree grid = strategy.getGrid(); final SpatialContext ctx = grid.getSpatialContext(); final Point cornerPt = ctx.getShapeFactory().pointXY(inputRect.getMinX(), inputRect.getMinY()); @@ -106,91 +116,116 @@ public class HeatmapFacetCounter { while (cellIterator.hasNext()) { cornerCell = cellIterator.next(); } - assert cornerCell != null && cornerCell.getLevel() == facetLevel : "Cell not at target level: " + cornerCell; + assert cornerCell != null && cornerCell.getLevel() == facetLevel + : "Cell not at target level: " + cornerCell; final Rectangle cornerRect = (Rectangle) cornerCell.getShape(); assert cornerRect.hasArea(); - //Now calculate the number of columns and rows necessary to cover the inputRect - double heatMinX = cornerRect.getMinX();//note: we might change this below... + // Now calculate the number of columns and rows necessary to cover the inputRect + double heatMinX = cornerRect.getMinX(); // note: we might change this below... final double cellWidth = cornerRect.getWidth(); final Rectangle worldRect = ctx.getWorldBounds(); - final int columns = calcRowsOrCols(cellWidth, heatMinX, inputRect.getWidth(), inputRect.getMinX(), worldRect.getWidth()); + final int columns = + calcRowsOrCols( + cellWidth, heatMinX, inputRect.getWidth(), inputRect.getMinX(), worldRect.getWidth()); final double heatMinY = cornerRect.getMinY(); final double cellHeight = cornerRect.getHeight(); - final int rows = calcRowsOrCols(cellHeight, heatMinY, inputRect.getHeight(), inputRect.getMinY(), worldRect.getHeight()); + final int rows = + calcRowsOrCols( + cellHeight, + heatMinY, + inputRect.getHeight(), + inputRect.getMinY(), + worldRect.getHeight()); assert rows > 0 && columns > 0; if (columns > MAX_ROWS_OR_COLUMNS || rows > MAX_ROWS_OR_COLUMNS || columns * rows > maxCells) { throw new IllegalArgumentException( - "Too many cells (" + columns + " x " + rows + ") for level " + facetLevel + " shape " + inputRect); + "Too many cells (" + + columns + + " x " + + rows + + ") for level " + + facetLevel + + " shape " + + inputRect); } - //Create resulting heatmap bounding rectangle & Heatmap object. + // Create resulting heatmap bounding rectangle & Heatmap object. final double halfCellWidth = cellWidth / 2.0; // if X world-wraps, use world bounds' range if (columns * cellWidth + halfCellWidth > worldRect.getWidth()) { heatMinX = worldRect.getMinX(); } double heatMaxX = heatMinX + columns * cellWidth; - if (Math.abs(heatMaxX - worldRect.getMaxX()) < halfCellWidth) {//numeric conditioning issue + if (Math.abs(heatMaxX - worldRect.getMaxX()) < halfCellWidth) { // numeric conditioning issue heatMaxX = worldRect.getMaxX(); - } else if (heatMaxX > worldRect.getMaxX()) {//wraps dateline (won't happen if !geo) - heatMaxX = heatMaxX - worldRect.getMaxX() + worldRect.getMinX(); + } else if (heatMaxX > worldRect.getMaxX()) { // wraps dateline (won't happen if !geo) + heatMaxX = heatMaxX - worldRect.getMaxX() + worldRect.getMinX(); } final double halfCellHeight = cellHeight / 2.0; double heatMaxY = heatMinY + rows * cellHeight; - if (Math.abs(heatMaxY - worldRect.getMaxY()) < halfCellHeight) {//numeric conditioning issue + if (Math.abs(heatMaxY - worldRect.getMaxY()) < halfCellHeight) { // numeric conditioning issue heatMaxY = worldRect.getMaxY(); } - final Heatmap heatmap = new Heatmap(columns, rows, ctx.getShapeFactory().rect(heatMinX, heatMaxX, heatMinY, heatMaxY)); + final Heatmap heatmap = + new Heatmap( + columns, rows, ctx.getShapeFactory().rect(heatMinX, heatMaxX, heatMinY, heatMaxY)); if (topAcceptDocs instanceof Bits.MatchNoBits) { return heatmap; // short-circuit } - //All ancestor cell counts (of facetLevel) will be captured during facet visiting and applied later. If the data is + // All ancestor cell counts (of facetLevel) will be captured during facet visiting and applied + // later. If the data is // just points then there won't be any ancestors. - //Facet count of ancestors covering all of the heatmap: - int[] allCellsAncestorCount = new int[1]; // single-element array so it can be accumulated in the inner class - //All other ancestors: - Map ancestors = new HashMap<>(); + // Facet count of ancestors covering all of the heatmap: + // single-element array so it can be accumulated in the inner class + int[] allCellsAncestorCount = new int[1]; + // All other ancestors: + Map ancestors = new HashMap<>(); - //Now lets count some facets! - PrefixTreeFacetCounter.compute(strategy, context, topAcceptDocs, inputShape, facetLevel, + // Now lets count some facets! + PrefixTreeFacetCounter.compute( + strategy, + context, + topAcceptDocs, + inputShape, + facetLevel, new PrefixTreeFacetCounter.FacetVisitor() { - @Override - public void visit(Cell cell, int count) { - final double heatMinX = heatmap.region.getMinX(); - final Rectangle rect = (Rectangle) cell.getShape(); - if (cell.getLevel() == facetLevel) {//heatmap level; count it directly - //convert to col & row - int column; - if (rect.getMinX() >= heatMinX) { - column = (int) Math.round((rect.getMinX() - heatMinX) / cellWidth); - } else { // due to dateline wrap - column = (int) Math.round((rect.getMinX() + 360 - heatMinX) / cellWidth); - } - int row = (int) Math.round((rect.getMinY() - heatMinY) / cellHeight); - //note: unfortunately, it's possible for us to visit adjacent cells to the heatmap (if the SpatialPrefixTree - // allows adjacent cells to overlap on the seam), so we need to skip them - if (column < 0 || column >= heatmap.columns || row < 0 || row >= heatmap.rows) { - return; - } - // increment - heatmap.counts[column * heatmap.rows + row] += count; + @Override + public void visit(Cell cell, int count) { + final double heatMinX = heatmap.region.getMinX(); + final Rectangle rect = (Rectangle) cell.getShape(); + if (cell.getLevel() == facetLevel) { // heatmap level; count it directly + // convert to col & row + int column; + if (rect.getMinX() >= heatMinX) { + column = (int) Math.round((rect.getMinX() - heatMinX) / cellWidth); + } else { // due to dateline wrap + column = (int) Math.round((rect.getMinX() + 360 - heatMinX) / cellWidth); + } + int row = (int) Math.round((rect.getMinY() - heatMinY) / cellHeight); + // note: unfortunately, it's possible for us to visit adjacent cells to the heatmap + // (if the SpatialPrefixTree + // allows adjacent cells to overlap on the seam), so we need to skip them + if (column < 0 || column >= heatmap.columns || row < 0 || row >= heatmap.rows) { + return; + } + // increment + heatmap.counts[column * heatmap.rows + row] += count; - } else if (rect.relate(heatmap.region) == SpatialRelation.CONTAINS) {//containing ancestor - allCellsAncestorCount[0] += count; - - } else { // ancestor - // note: not particularly efficient (possible put twice, and Integer wrapper); oh well - Integer existingCount = ancestors.put(rect, count); - if (existingCount != null) { - ancestors.put(rect, count + existingCount); + } else if (rect.relate(heatmap.region) == SpatialRelation.CONTAINS) { + allCellsAncestorCount[0] += count; + } else { // ancestor + // note: not particularly efficient (possible put twice, and Integer wrapper); oh well + Integer existingCount = ancestors.put(rect, count); + if (existingCount != null) { + ancestors.put(rect, count + existingCount); + } + } } - } - } - }); + }); - //Update the heatmap counts with ancestor counts + // Update the heatmap counts with ancestor counts // Apply allCellsAncestorCount if (allCellsAncestorCount[0] > 0) { @@ -200,41 +235,49 @@ public class HeatmapFacetCounter { } // Apply ancestors - // note: This approach isn't optimized for a ton of ancestor cells. We'll potentially increment the same cells - // multiple times in separate passes if any ancestors overlap. IF this poses a problem, we could optimize it - // with additional complication by keeping track of intervals in a sorted tree structure (possible TreeMap/Set) + // note: This approach isn't optimized for a ton of ancestor cells. We'll potentially increment + // the same cells + // multiple times in separate passes if any ancestors overlap. IF this poses a problem, we + // could optimize it + // with additional complication by keeping track of intervals in a sorted tree structure + // (possible TreeMap/Set) // and iterate them cleverly such that we just make one pass at this stage. - int[] pair = new int[2];//output of intersectInterval + int[] pair = new int[2]; // output of intersectInterval for (Map.Entry entry : ancestors.entrySet()) { Rectangle rect = entry.getKey(); // from a cell (thus doesn't cross DL) final int count = entry.getValue(); - //note: we approach this in a way that eliminates int overflow/underflow (think huge cell, tiny heatmap) + // note: we approach this in a way that eliminates int overflow/underflow (think huge cell, + // tiny heatmap) intersectInterval(heatMinY, heatMaxY, cellHeight, rows, rect.getMinY(), rect.getMaxY(), pair); final int startRow = pair[0]; final int endRow = pair[1]; if (!heatmap.region.getCrossesDateLine()) { - intersectInterval(heatMinX, heatMaxX, cellWidth, columns, rect.getMinX(), rect.getMaxX(), pair); + intersectInterval( + heatMinX, heatMaxX, cellWidth, columns, rect.getMinX(), rect.getMaxX(), pair); final int startCol = pair[0]; final int endCol = pair[1]; incrementRange(heatmap, startCol, endCol, startRow, endRow, count); } else { - // note: the cell rect might intersect 2 disjoint parts of the heatmap, so we do the left & right separately + // note: the cell rect might intersect 2 disjoint parts of the heatmap, so we do the left & + // right separately final int leftColumns = (int) Math.round((180 - heatMinX) / cellWidth); final int rightColumns = heatmap.columns - leftColumns; - //left half of dateline: + // left half of dateline: if (rect.getMaxX() > heatMinX) { - intersectInterval(heatMinX, 180, cellWidth, leftColumns, rect.getMinX(), rect.getMaxX(), pair); + intersectInterval( + heatMinX, 180, cellWidth, leftColumns, rect.getMinX(), rect.getMaxX(), pair); final int startCol = pair[0]; final int endCol = pair[1]; incrementRange(heatmap, startCol, endCol, startRow, endRow, count); } - //right half of dateline + // right half of dateline if (rect.getMinX() < heatMaxX) { - intersectInterval(-180, heatMaxX, cellWidth, rightColumns, rect.getMinX(), rect.getMaxX(), pair); + intersectInterval( + -180, heatMaxX, cellWidth, rightColumns, rect.getMinX(), rect.getMaxX(), pair); final int startCol = pair[0] + leftColumns; final int endCol = pair[1] + leftColumns; incrementRange(heatmap, startCol, endCol, startRow, endRow, count); @@ -245,11 +288,16 @@ public class HeatmapFacetCounter { return heatmap; } - private static void intersectInterval(double heatMin, double heatMax, double heatCellLen, int numCells, - double cellMin, double cellMax, - int[] out) { + private static void intersectInterval( + double heatMin, + double heatMax, + double heatCellLen, + int numCells, + double cellMin, + double cellMax, + int[] out) { assert heatMin < heatMax && cellMin < cellMax; - //precondition: we know there's an intersection + // precondition: we know there's an intersection if (heatMin >= cellMin) { out[0] = 0; } else { @@ -262,23 +310,24 @@ public class HeatmapFacetCounter { } } - private static void incrementRange(Heatmap heatmap, int startColumn, int endColumn, int startRow, int endRow, - int count) { - //startColumn & startRow are not necessarily within the heatmap range; likewise numRows/columns may overlap. + private static void incrementRange( + Heatmap heatmap, int startColumn, int endColumn, int startRow, int endRow, int count) { + // startColumn & startRow are not necessarily within the heatmap range; likewise numRows/columns + // may overlap. if (startColumn < 0) { endColumn += startColumn; startColumn = 0; } - endColumn = Math.min(heatmap.columns-1, endColumn); + endColumn = Math.min(heatmap.columns - 1, endColumn); if (startRow < 0) { endRow += startRow; startRow = 0; } - endRow = Math.min(heatmap.rows-1, endRow); + endRow = Math.min(heatmap.rows - 1, endRow); if (startRow > endRow) { - return;//short-circuit + return; // short-circuit } for (int c = startColumn; c <= endColumn; c++) { int cBase = c * heatmap.rows; @@ -289,27 +338,27 @@ public class HeatmapFacetCounter { } /** Computes the number of intervals (rows or columns) to cover a range given the sizes. */ - private static int calcRowsOrCols(double cellRange, double cellMin, double requestRange, double requestMin, - double worldRange) { + private static int calcRowsOrCols( + double cellRange, double cellMin, double requestRange, double requestMin, double worldRange) { assert requestMin >= cellMin; - //Idealistically this wouldn't be so complicated but we concern ourselves with overflow and edge cases + // Idealistically this wouldn't be so complicated but we concern ourselves with overflow and + // edge cases double range = (requestRange + (requestMin - cellMin)); if (range == 0) { return 1; } final double intervals = Math.ceil(range / cellRange); if (intervals > Integer.MAX_VALUE) { - return Integer.MAX_VALUE;//should result in an error soon (exceed thresholds) + return Integer.MAX_VALUE; // should result in an error soon (exceed thresholds) } // ensures we don't have more intervals than world bounds (possibly due to rounding/edge issue) final long intervalsMax = Math.round(worldRange / cellRange); if (intervalsMax > Integer.MAX_VALUE) { - //just return intervals + // just return intervals return (int) intervals; } - return Math.min((int)intervalsMax, (int)intervals); + return Math.min((int) intervalsMax, (int) intervals); } - private HeatmapFacetCounter() { - } + private HeatmapFacetCounter() {} } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/IntersectsPrefixTreeQuery.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/IntersectsPrefixTreeQuery.java index c1d76dcb163..966080037a9 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/IntersectsPrefixTreeQuery.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/IntersectsPrefixTreeQuery.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.prefix; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.spatial.prefix.tree.Cell; @@ -27,16 +26,19 @@ import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; /** - * A Query matching documents that have an {@link SpatialRelation#INTERSECTS} - * (i.e. not DISTINCT) relationship with a provided query shape. + * A Query matching documents that have an {@link SpatialRelation#INTERSECTS} (i.e. not DISTINCT) + * relationship with a provided query shape. * * @lucene.internal */ public class IntersectsPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { - public IntersectsPrefixTreeQuery(Shape queryShape, String fieldName, - SpatialPrefixTree grid, int detailLevel, - int prefixGridScanLevel) { + public IntersectsPrefixTreeQuery( + Shape queryShape, + String fieldName, + SpatialPrefixTree grid, + int detailLevel, + int prefixGridScanLevel) { super(queryShape, fieldName, grid, detailLevel, prefixGridScanLevel); } @@ -77,18 +79,17 @@ public class IntersectsPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { protected void visitLeaf(Cell cell) throws IOException { collectDocs(results); } - }.getDocIdSet(); } @Override public String toString(String field) { - return getClass().getSimpleName() + "(" + - "fieldName=" + fieldName + "," + - "queryShape=" + queryShape + "," + - "detailLevel=" + detailLevel + "," + - "prefixGridScanLevel=" + prefixGridScanLevel + - ")"; + return getClass().getSimpleName() + + "(" + + ("fieldName=" + fieldName + ",") + + ("queryShape=" + queryShape + ",") + + ("detailLevel=" + detailLevel + ",") + + ("prefixGridScanLevel=" + prefixGridScanLevel) + + ")"; } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java index 3d4c7a9b1f4..79313a23e9f 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java @@ -16,12 +16,13 @@ */ package org.apache.lucene.spatial.prefix; +import static org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape; + import java.io.IOException; import java.util.Arrays; import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; - import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.search.DoubleValuesSource; import org.apache.lucene.spatial.prefix.tree.Cell; @@ -30,13 +31,13 @@ import org.apache.lucene.util.Bits; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; -import static org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape; - -/** A PrefixTree based on Number/Date ranges. This isn't very "spatial" on the surface (to the user) but - * it's implemented using spatial so that's why it's here extending a SpatialStrategy. When using this class, you will - * use various utility methods on the prefix tree implementation to convert objects/strings to/from shapes. +/** + * A PrefixTree based on Number/Date ranges. This isn't very "spatial" on the surface (to the user) + * but it's implemented using spatial so that's why it's here extending a SpatialStrategy. When + * using this class, you will use various utility methods on the prefix tree implementation to + * convert objects/strings to/from shapes. * - * To use with dates, pass in {@link org.apache.lucene.spatial.prefix.tree.DateRangePrefixTree}. + *

    To use with dates, pass in {@link org.apache.lucene.spatial.prefix.tree.DateRangePrefixTree}. * * @lucene.experimental */ @@ -45,7 +46,7 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { public NumberRangePrefixTreeStrategy(NumberRangePrefixTree prefixTree, String fieldName) { super(prefixTree, fieldName); setPruneLeafyBranches(false); - setPrefixGridScanLevel(prefixTree.getMaxLevels()-2);//user might want to change, however + setPrefixGridScanLevel(prefixTree.getMaxLevels() - 2); // user might want to change, however setPointsOnly(false); setDistErrPct(0); } @@ -58,7 +59,7 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { @Override protected boolean isPointShape(Shape shape) { if (shape instanceof NumberRangePrefixTree.UnitNRShape) { - return ((NumberRangePrefixTree.UnitNRShape)shape).getLevel() == grid.getMaxLevels(); + return ((NumberRangePrefixTree.UnitNRShape) shape).getLevel() == grid.getMaxLevels(); } else { return false; } @@ -68,7 +69,7 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { protected boolean isGridAlignedShape(Shape shape) { // any UnitNRShape other than the world is a single cell/term if (shape instanceof NumberRangePrefixTree.UnitNRShape) { - return ((NumberRangePrefixTree.UnitNRShape)shape).getLevel() > 0; + return ((NumberRangePrefixTree.UnitNRShape) shape).getLevel() > 0; } else { return false; } @@ -80,12 +81,15 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { throw new UnsupportedOperationException(); } - /** Calculates facets between {@code start} and {@code end} to a detail level one greater than that provided by the - * arguments. For example providing March to October of 2014 would return facets to the day level of those months. - * This is just a convenience method. + /** + * Calculates facets between {@code start} and {@code end} to a detail level one greater than that + * provided by the arguments. For example providing March to October of 2014 would return facets + * to the day level of those months. This is just a convenience method. + * * @see #calcFacets(IndexReaderContext, Bits, Shape, int) */ - public Facets calcFacets(IndexReaderContext context, Bits topAcceptDocs, UnitNRShape start, UnitNRShape end) + public Facets calcFacets( + IndexReaderContext context, Bits topAcceptDocs, UnitNRShape start, UnitNRShape end) throws IOException { Shape facetRange = getGrid().toRangeShape(start, end); int detailLevel = Math.max(start.getLevel(), end.getLevel()) + 1; @@ -93,37 +97,44 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { } /** - * Calculates facets (aggregated counts) given a range shape (start-end span) and a level, which specifies the detail. - * To get the level of an existing shape, say a Calendar, call - * {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree#toUnitShape(Object)} then call + * Calculates facets (aggregated counts) given a range shape (start-end span) and a level, which + * specifies the detail. To get the level of an existing shape, say a Calendar, call {@link + * org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree#toUnitShape(Object)} then call * {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape#getLevel()}. * Facet computation is implemented by navigating the underlying indexed terms efficiently. */ - public Facets calcFacets(IndexReaderContext context, Bits topAcceptDocs, Shape facetRange, final int level) + public Facets calcFacets( + IndexReaderContext context, Bits topAcceptDocs, Shape facetRange, final int level) throws IOException { final Facets facets = new Facets(level); - PrefixTreeFacetCounter.compute(this, context, topAcceptDocs, facetRange, level, + PrefixTreeFacetCounter.compute( + this, + context, + topAcceptDocs, + facetRange, + level, new PrefixTreeFacetCounter.FacetVisitor() { Facets.FacetParentVal parentFacet; UnitNRShape parentShape; @Override public void visit(Cell cell, int count) { - if (cell.getLevel() < level - 1) {//some ancestor of parent facet level, direct or distant - parentFacet = null;//reset - parentShape = null;//reset + if (cell.getLevel() + < level - 1) { // some ancestor of parent facet level, direct or distant + parentFacet = null; // reset + parentShape = null; // reset facets.topLeaves += count; - } else if (cell.getLevel() == level - 1) {//parent - //set up FacetParentVal + } else if (cell.getLevel() == level - 1) { // parent + // set up FacetParentVal setupParent((UnitNRShape) cell.getShape()); parentFacet.parentLeaves += count; - } else {//at facet level + } else { // at facet level UnitNRShape unitShape = (UnitNRShape) cell.getShape(); UnitNRShape unitShapeParent = unitShape.getShapeAtLevel(unitShape.getLevel() - 1); if (parentFacet == null || !parentShape.equals(unitShapeParent)) { setupParent(unitShapeParent); } - //lazy init childCounts + // lazy init childCounts if (parentFacet.childCounts == null) { parentFacet.childCounts = new int[parentFacet.childCountsLen]; } @@ -133,9 +144,9 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { private void setupParent(UnitNRShape unitShape) { parentShape = unitShape.clone(); - //Look for existing parentFacet (from previous segment), or create anew if needed + // Look for existing parentFacet (from previous segment), or create anew if needed parentFacet = facets.parents.get(parentShape); - if (parentFacet == null) {//didn't find one; make a new one + if (parentFacet == null) { // didn't find one; make a new one parentFacet = new Facets.FacetParentVal(); parentFacet.childCountsLen = getGrid().getNumSubCells(parentShape); facets.parents.put(parentShape, parentFacet); @@ -147,7 +158,7 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { /** Facet response information */ public static class Facets { - //TODO consider a variable-level structure -- more general purpose. + // TODO consider a variable-level structure -- more general purpose. public Facets(int detailLevel) { this.detailLevel = detailLevel; @@ -157,41 +168,56 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { public final int detailLevel; /** - * The count of documents with ranges that completely spanned the parents of the detail level. In more technical - * terms, this is the count of leaf cells 2 up and higher from the bottom. Usually you only care about counts at - * detailLevel, and so you will add this number to all other counts below, including to omitted/implied children - * counts of 0. If there are no indexed ranges (just instances, i.e. fully specified dates) then this value will - * always be 0. + * The count of documents with ranges that completely spanned the parents of the detail level. + * In more technical terms, this is the count of leaf cells 2 up and higher from the bottom. + * Usually you only care about counts at detailLevel, and so you will add this number to all + * other counts below, including to omitted/implied children counts of 0. If there are no + * indexed ranges (just instances, i.e. fully specified dates) then this value will always be 0. */ public int topLeaves; - /** Holds all the {@link FacetParentVal} instances in order of the key. This is sparse; there won't be an - * instance if it's count and children are all 0. The keys are {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape} shapes, which can be - * converted back to the original Object (i.e. a Calendar) via - * {@link NumberRangePrefixTree#toObject(org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape)}. */ - public final SortedMap parents = new TreeMap<>(); + /** + * Holds all the {@link FacetParentVal} instances in order of the key. This is sparse; there + * won't be an instance if it's count and children are all 0. The keys are {@link + * org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape} shapes, which can be + * converted back to the original Object (i.e. a Calendar) via {@link + * NumberRangePrefixTree#toObject(org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape)}. + */ + public final SortedMap parents = new TreeMap<>(); /** Holds a block of detailLevel counts aggregated to their parent level. */ public static class FacetParentVal { - /** The count of ranges that span all of the childCounts. In more technical terms, this is the number of leaf - * cells found at this parent. Treat this like {@link Facets#topLeaves}. */ + /** + * The count of ranges that span all of the childCounts. In more technical terms, this is the + * number of leaf cells found at this parent. Treat this like {@link Facets#topLeaves}. + */ public int parentLeaves; - /** The length of {@link #childCounts}. If childCounts is not null then this is childCounts.length, otherwise it - * says how long it would have been if it weren't null. */ + /** + * The length of {@link #childCounts}. If childCounts is not null then this is + * childCounts.length, otherwise it says how long it would have been if it weren't null. + */ public int childCountsLen; - /** The detail level counts. It will be null if there are none, and thus they are assumed 0. Most apps, when - * presenting the information, will add {@link #topLeaves} and {@link #parentLeaves} to each count. */ + /** + * The detail level counts. It will be null if there are none, and thus they are assumed 0. + * Most apps, when presenting the information, will add {@link #topLeaves} and {@link + * #parentLeaves} to each count. + */ public int[] childCounts; - //assert childCountsLen == childCounts.length + // assert childCountsLen == childCounts.length } @Override public String toString() { StringBuilder buf = new StringBuilder(2048); - buf.append("Facets: level=").append(detailLevel).append(" topLeaves=").append(topLeaves).append(" parentCount=").append(parents.size()); + buf.append("Facets: level=") + .append(detailLevel) + .append(" topLeaves=") + .append(topLeaves) + .append(" parentCount=") + .append(parents.size()); for (Map.Entry entry : parents.entrySet()) { buf.append('\n'); if (buf.length() > 1000) { @@ -207,5 +233,4 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy { return buf.toString(); } } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PointPrefixTreeFieldCacheProvider.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PointPrefixTreeFieldCacheProvider.java index f44ca447409..ddafcb8731b 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PointPrefixTreeFieldCacheProvider.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PointPrefixTreeFieldCacheProvider.java @@ -16,33 +16,33 @@ */ package org.apache.lucene.spatial.prefix; -import org.locationtech.spatial4j.shape.Point; import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; import org.apache.lucene.spatial.util.ShapeFieldCacheProvider; import org.apache.lucene.util.BytesRef; +import org.locationtech.spatial4j.shape.Point; /** - * Implementation of {@link ShapeFieldCacheProvider} designed for {@link PrefixTreeStrategy}s that index points - * (AND ONLY POINTS!). + * Implementation of {@link ShapeFieldCacheProvider} designed for {@link PrefixTreeStrategy}s that + * index points (AND ONLY POINTS!). * * @lucene.internal */ public class PointPrefixTreeFieldCacheProvider extends ShapeFieldCacheProvider { private final SpatialPrefixTree grid; - private Cell scanCell;//re-used in readShape to save GC + private Cell scanCell; // re-used in readShape to save GC - public PointPrefixTreeFieldCacheProvider(SpatialPrefixTree grid, String shapeField, int defaultSize) { - super( shapeField, defaultSize ); + public PointPrefixTreeFieldCacheProvider( + SpatialPrefixTree grid, String shapeField, int defaultSize) { + super(shapeField, defaultSize); this.grid = grid; } @Override protected Point readShape(BytesRef term) { scanCell = grid.readCell(term, scanCell); - if (scanCell.getLevel() == grid.getMaxLevels()) - return scanCell.getShape().getCenter(); + if (scanCell.getLevel() == grid.getMaxLevels()) return scanCell.getShape().getCenter(); return null; } } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java index b3b82db35c6..23753d17faf 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java @@ -17,8 +17,6 @@ package org.apache.lucene.spatial.prefix; import java.io.IOException; - -import org.locationtech.spatial4j.shape.Shape; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; @@ -27,43 +25,47 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; import org.apache.lucene.util.Bits; +import org.locationtech.spatial4j.shape.Shape; /** * Computes facets on cells for {@link org.apache.lucene.spatial.prefix.PrefixTreeStrategy}. - *

    - * NOTE: If for a given document and a given field using - * {@link org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy} - * multiple values are indexed (i.e. multi-valued) and at least one of them is a non-point, then there is a possibility - * of double-counting the document in the facet results. Since each shape is independently turned into grid cells at - * a resolution chosen by the shape's size, it's possible they will be indexed at different resolutions. This means - * the document could be present in BOTH the postings for a cell in both its prefix and leaf variants. To avoid this, - * use a single valued field with a {@link org.locationtech.spatial4j.shape.ShapeCollection} (or WKT equivalent). Or - * calculate a suitable level/distErr to index both and call - * {@link org.apache.lucene.spatial.prefix.PrefixTreeStrategy#createIndexableFields(org.locationtech.spatial4j.shape.Shape, int)} - * with the same value for all shapes for a given document/field. + * + *

    NOTE: If for a given document and a given field using {@link + * org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy} multiple values are indexed (i.e. + * multi-valued) and at least one of them is a non-point, then there is a possibility of + * double-counting the document in the facet results. Since each shape is independently turned into + * grid cells at a resolution chosen by the shape's size, it's possible they will be indexed at + * different resolutions. This means the document could be present in BOTH the postings for a cell + * in both its prefix and leaf variants. To avoid this, use a single valued field with a {@link + * org.locationtech.spatial4j.shape.ShapeCollection} (or WKT equivalent). Or calculate a suitable + * level/distErr to index both and call {@link + * org.apache.lucene.spatial.prefix.PrefixTreeStrategy#createIndexableFields(org.locationtech.spatial4j.shape.Shape, + * int)} with the same value for all shapes for a given document/field. * * @lucene.experimental */ public class PrefixTreeFacetCounter { /** A callback/visitor of facet counts. */ - public static abstract class FacetVisitor { + public abstract static class FacetVisitor { /** Called at the start of the segment, if there is indexed data. */ public void startOfSegment() {} - /** Called for cells with a leaf, or cells at the target facet level. {@code count} is greater than zero. - * When an ancestor cell is given with non-zero count, the count can be considered to be added to all cells - * below. You won't necessarily get a cell at level {@code facetLevel} if the indexed data is courser (bigger). + /** + * Called for cells with a leaf, or cells at the target facet level. {@code count} is greater + * than zero. When an ancestor cell is given with non-zero count, the count can be considered to + * be added to all cells below. You won't necessarily get a cell at level {@code facetLevel} if + * the indexed data is courser (bigger). */ public abstract void visit(Cell cell, int count); } - private PrefixTreeFacetCounter() { - } + private PrefixTreeFacetCounter() {} /** - * Computes facets using a callback/visitor style design, allowing flexibility for the caller to determine what to do - * with each underlying count. + * Computes facets using a callback/visitor style design, allowing flexibility for the caller to + * determine what to do with each underlying count. + * * @param strategy the prefix tree strategy (contains the field reference, grid, max levels) * @param context the IndexReader's context * @param topAcceptDocs a Bits to limit counted docs. If null, live docs are counted. @@ -71,27 +73,33 @@ public class PrefixTreeFacetCounter { * @param facetLevel the maximum depth (detail) of faceted cells * @param facetVisitor the visitor/callback to receive the counts */ - public static void compute(PrefixTreeStrategy strategy, IndexReaderContext context, Bits topAcceptDocs, - Shape queryShape, int facetLevel, FacetVisitor facetVisitor) + public static void compute( + PrefixTreeStrategy strategy, + IndexReaderContext context, + Bits topAcceptDocs, + Shape queryShape, + int facetLevel, + FacetVisitor facetVisitor) throws IOException { - //We collect per-leaf + // We collect per-leaf for (final LeafReaderContext leafCtx : context.leaves()) { - //determine leaf acceptDocs Bits + // determine leaf acceptDocs Bits Bits leafAcceptDocs; if (topAcceptDocs == null) { - leafAcceptDocs = leafCtx.reader().getLiveDocs();//filter deleted + leafAcceptDocs = leafCtx.reader().getLiveDocs(); // filter deleted } else { - leafAcceptDocs = new Bits() { - @Override - public boolean get(int index) { - return topAcceptDocs.get(leafCtx.docBase + index); - } + leafAcceptDocs = + new Bits() { + @Override + public boolean get(int index) { + return topAcceptDocs.get(leafCtx.docBase + index); + } - @Override - public int length() { - return leafCtx.reader().maxDoc(); - } - }; + @Override + public int length() { + return leafCtx.reader().maxDoc(); + } + }; } compute(strategy, leafCtx, leafAcceptDocs, queryShape, facetLevel, facetVisitor); @@ -99,31 +107,42 @@ public class PrefixTreeFacetCounter { } /** Lower-level per-leaf segment method. */ - public static void compute(final PrefixTreeStrategy strategy, final LeafReaderContext context, final Bits acceptDocs, - final Shape queryShape, final int facetLevel, final FacetVisitor facetVisitor) + public static void compute( + final PrefixTreeStrategy strategy, + final LeafReaderContext context, + final Bits acceptDocs, + final Shape queryShape, + final int facetLevel, + final FacetVisitor facetVisitor) throws IOException { if (acceptDocs != null && acceptDocs.length() != context.reader().maxDoc()) { throw new IllegalArgumentException( - "acceptDocs bits length " + acceptDocs.length() +" != leaf maxdoc " + context.reader().maxDoc()); + "acceptDocs bits length " + + acceptDocs.length() + + " != leaf maxdoc " + + context.reader().maxDoc()); } final SpatialPrefixTree tree = strategy.getGrid(); - //scanLevel is an optimization knob of AbstractVisitingPrefixTreeFilter. It's unlikely - // another scanLevel would be much faster and it tends to be a risky knob (can help a little, can hurt a ton). + // scanLevel is an optimization knob of AbstractVisitingPrefixTreeFilter. It's unlikely + // another scanLevel would be much faster and it tends to be a risky knob (can help a little, + // can hurt a ton). // TODO use RPT's configured scan level? Do we know better here? Hard to say. final int scanLevel = tree.getMaxLevels(); - //AbstractVisitingPrefixTreeFilter is a Lucene Filter. We don't need a filter; we use it for its great prefix-tree + // AbstractVisitingPrefixTreeFilter is a Lucene Filter. We don't need a filter; we use it for + // its great prefix-tree // traversal code. TODO consider refactoring if/when it makes sense (more use cases than this) - new AbstractVisitingPrefixTreeQuery(queryShape, strategy.getFieldName(), tree, facetLevel, scanLevel) { + new AbstractVisitingPrefixTreeQuery( + queryShape, strategy.getFieldName(), tree, facetLevel, scanLevel) { @Override public String toString(String field) { - return "anonPrefixTreeQuery";//un-used + return "anonPrefixTreeQuery"; // un-used } @Override public DocIdSet getDocIdSet(LeafReaderContext contexts) throws IOException { - assert facetLevel == super.detailLevel;//same thing, FYI. (constant) + assert facetLevel == super.detailLevel; // same thing, FYI. (constant) return new VisitorTemplate(context) { @@ -134,7 +153,7 @@ public class PrefixTreeFacetCounter { @Override protected DocIdSet finish() throws IOException { - return null;//unused; + return null; // unused; } @Override @@ -142,15 +161,19 @@ public class PrefixTreeFacetCounter { // At facetLevel... if (cell.getLevel() == facetLevel) { // Count docs - visitLeaf(cell);//we're not a leaf but we treat it as such at facet level - return false;//don't descend further; this is enough detail + visitLeaf(cell); // we're not a leaf but we treat it as such at facet level + return false; // don't descend further; this is enough detail } - // We optimize for discriminating filters (reflected in acceptDocs) and short-circuit if no - // matching docs. We could do this at all levels or never but the closer we get to the facet level, the - // higher the probability this is worthwhile. We do when docFreq == 1 because it's a cheap check, especially + // We optimize for discriminating filters (reflected in acceptDocs) and short-circuit if + // no + // matching docs. We could do this at all levels or never but the closer we get to the + // facet level, the + // higher the probability this is worthwhile. We do when docFreq == 1 because it's a + // cheap check, especially // due to "pulsing" in the codec. - //TODO this opt should move to VisitorTemplate (which contains an optimization TODO to this effect) + // TODO this opt should move to VisitorTemplate (which contains an optimization TODO to + // this effect) if (cell.getLevel() == facetLevel - 1 || termsEnum.docFreq() == 1) { if (!hasDocsAtThisTerm()) { return false; @@ -193,7 +216,6 @@ public class PrefixTreeFacetCounter { } return nextDoc != DocIdSetIterator.NO_MORE_DOCS; } - }.getDocIdSet(); } }.getDocIdSet(context); diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java index 10b24a28870..bbf565c9f2e 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; @@ -36,43 +35,37 @@ import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; /** - * An abstract SpatialStrategy based on {@link SpatialPrefixTree}. The two - * subclasses are {@link RecursivePrefixTreeStrategy} and {@link - * TermQueryPrefixTreeStrategy}. This strategy is most effective as a fast - * approximate spatial search filter. - *

    - * Characteristics: - *
    + * An abstract SpatialStrategy based on {@link SpatialPrefixTree}. The two subclasses are {@link + * RecursivePrefixTreeStrategy} and {@link TermQueryPrefixTreeStrategy}. This strategy is most + * effective as a fast approximate spatial search filter. + * + *

    Characteristics:
    + * *

      - *
    • Can index any shape; however only {@link RecursivePrefixTreeStrategy} - * can effectively search non-point shapes.
    • - *
    • Can index a variable number of shapes per field value. This strategy - * can do it via multiple calls to {@link #createIndexableFields(org.locationtech.spatial4j.shape.Shape)} - * for a document or by giving it some sort of Shape aggregate (e.g. JTS - * WKT MultiPoint). The shape's boundary is approximated to a grid precision. - *
    • - *
    • Can query with any shape. The shape's boundary is approximated to a grid - * precision.
    • - *
    • Only {@link org.apache.lucene.spatial.query.SpatialOperation#Intersects} - * is supported. If only points are indexed then this is effectively equivalent - * to IsWithin.
    • - *
    • The strategy supports {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point,double)} - * even for multi-valued data, so long as the indexed data is all points; the - * behavior is undefined otherwise. However, it will likely be removed in - * the future in lieu of using another strategy with a more scalable - * implementation. Use of this call is the only - * circumstance in which a cache is used. The cache is simple but as such - * it doesn't scale to large numbers of points nor is it real-time-search - * friendly.
    • + *
    • Can index any shape; however only {@link RecursivePrefixTreeStrategy} can effectively + * search non-point shapes. + *
    • Can index a variable number of shapes per field value. This strategy can do it via multiple + * calls to {@link #createIndexableFields(org.locationtech.spatial4j.shape.Shape)} for a + * document or by giving it some sort of Shape aggregate (e.g. JTS WKT MultiPoint). The + * shape's boundary is approximated to a grid precision. + *
    • Can query with any shape. The shape's boundary is approximated to a grid precision. + *
    • Only {@link org.apache.lucene.spatial.query.SpatialOperation#Intersects} is supported. If + * only points are indexed then this is effectively equivalent to IsWithin. + *
    • The strategy supports {@link + * #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point,double)} even for + * multi-valued data, so long as the indexed data is all points; the behavior is undefined + * otherwise. However, it will likely be removed in the future in lieu of using + * another strategy with a more scalable implementation. Use of this call is the only + * circumstance in which a cache is used. The cache is simple but as such it doesn't scale to + * large numbers of points nor is it real-time-search friendly. *
    - *

    - * Implementation: - *

    - * The {@link SpatialPrefixTree} does most of the work, for example returning - * a list of terms representing grids of various sizes for a supplied shape. - * An important - * configuration item is {@link #setDistErrPct(double)} which balances - * shape precision against scalability. See those javadocs. + * + *

    Implementation: + * + *

    The {@link SpatialPrefixTree} does most of the work, for example returning a list of terms + * representing grids of various sizes for a supplied shape. An important configuration item is + * {@link #setDistErrPct(double)} which balances shape precision against scalability. See those + * javadocs. * * @lucene.experimental */ @@ -80,8 +73,8 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { protected final SpatialPrefixTree grid; private final Map provider = new ConcurrentHashMap<>(); protected int defaultFieldValuesArrayLen = 2; - protected double distErrPct = SpatialArgs.DEFAULT_DISTERRPCT;// [ 0 TO 0.5 ] - protected boolean pointsOnly = false;//if true, there are no leaves + protected double distErrPct = SpatialArgs.DEFAULT_DISTERRPCT; // [ 0 TO 0.5 ] + protected boolean pointsOnly = false; // if true, there are no leaves public PrefixTreeStrategy(SpatialPrefixTree grid, String fieldName) { super(grid.getSpatialContext(), fieldName); @@ -94,9 +87,8 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { /** * A memory hint used by {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point)} - * for how big the initial size of each Document's array should be. The - * default is 2. Set this to slightly more than the default expected number - * of points per document. + * for how big the initial size of each Document's array should be. The default is 2. Set this to + * slightly more than the default expected number of points per document. */ public void setDefaultFieldValuesArrayLen(int defaultFieldValuesArrayLen) { this.defaultFieldValuesArrayLen = defaultFieldValuesArrayLen; @@ -107,13 +99,12 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { } /** - * The default measure of shape precision affecting shapes at index and query - * times. Points don't use this as they are always indexed at the configured - * maximum precision ({@link org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree#getMaxLevels()}); - * this applies to all other shapes. Specific shapes at index and query time - * can use something different than this default value. If you don't set a - * default then the default is {@link SpatialArgs#DEFAULT_DISTERRPCT} -- - * 2.5%. + * The default measure of shape precision affecting shapes at index and query times. Points don't + * use this as they are always indexed at the configured maximum precision ({@link + * org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree#getMaxLevels()}); this applies to all + * other shapes. Specific shapes at index and query time can use something different than this + * default value. If you don't set a default then the default is {@link + * SpatialArgs#DEFAULT_DISTERRPCT} -- 2.5%. * * @see org.apache.lucene.spatial.query.SpatialArgs#getDistErrPct() */ @@ -125,8 +116,10 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { return pointsOnly; } - /** True if only indexed points shall be supported. There are no "leafs" in such a case, except those - * at maximum precision. */ + /** + * True if only indexed points shall be supported. There are no "leafs" in such a case, except + * those at maximum precision. + */ public void setPointsOnly(boolean pointsOnly) { this.pointsOnly = pointsOnly; } @@ -138,8 +131,8 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { } /** - * Turns {@link SpatialPrefixTree#getTreeCellIterator(Shape, int)} into a - * {@link org.apache.lucene.analysis.TokenStream}. + * Turns {@link SpatialPrefixTree#getTreeCellIterator(Shape, int)} into a {@link + * org.apache.lucene.analysis.TokenStream}. */ public Field[] createIndexableFields(Shape shape, double distErr) { int detailLevel = grid.getLevelForDistance(distErr); @@ -147,14 +140,15 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { } public Field[] createIndexableFields(Shape shape, int detailLevel) { - //TODO re-use TokenStream LUCENE-5776: Subclass Field, put cell iterator there, override tokenStream() + // TODO re-use TokenStream LUCENE-5776: Subclass Field, put cell iterator there, override + // tokenStream() Iterator cells = createCellIteratorToIndex(shape, detailLevel, null); CellToBytesRefIterator cellToBytesRefIterator = newCellToBytesRefIterator(); cellToBytesRefIterator.reset(cells); BytesRefIteratorTokenStream tokenStream = new BytesRefIteratorTokenStream(); tokenStream.setBytesRefIterator(cellToBytesRefIterator); Field field = new Field(getFieldName(), tokenStream, FIELD_TYPE); - return new Field[]{field}; + return new Field[] {field}; } /** Tokenstream for indexing cells of a shape */ @@ -168,7 +162,6 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { cellToBytesRefIterator.reset(cells); setBytesRefIterator(cellToBytesRefIterator); } - } public ShapeTokenStream tokenStream() { @@ -176,15 +169,17 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { } protected CellToBytesRefIterator newCellToBytesRefIterator() { - //subclasses could return one that never emits leaves, or does both, or who knows. + // subclasses could return one that never emits leaves, or does both, or who knows. return new CellToBytesRefIterator(); } - protected Iterator createCellIteratorToIndex(Shape shape, int detailLevel, Iterator reuse) { + protected Iterator createCellIteratorToIndex( + Shape shape, int detailLevel, Iterator reuse) { if (pointsOnly && !isPointShape(shape)) { - throw new IllegalArgumentException("pointsOnly is true yet a " + shape.getClass() + " is given for indexing"); + throw new IllegalArgumentException( + "pointsOnly is true yet a " + shape.getClass() + " is given for indexing"); } - return grid.getTreeCellIterator(shape, detailLevel);//TODO should take a re-use iterator + return grid.getTreeCellIterator(shape, detailLevel); // TODO should take a re-use iterator } /* Indexed, tokenized, not stored. */ @@ -199,13 +194,15 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { @Override public DoubleValuesSource makeDistanceValueSource(Point queryPoint, double multiplier) { - PointPrefixTreeFieldCacheProvider p = provider.get( getFieldName() ); - if( p == null ) { - synchronized (this) {//double checked locking idiom is okay since provider is threadsafe - p = provider.get( getFieldName() ); + PointPrefixTreeFieldCacheProvider p = provider.get(getFieldName()); + if (p == null) { + synchronized (this) { // double checked locking idiom is okay since provider is threadsafe + p = provider.get(getFieldName()); if (p == null) { - p = new PointPrefixTreeFieldCacheProvider(grid, getFieldName(), defaultFieldValuesArrayLen); - provider.put(getFieldName(),p); + p = + new PointPrefixTreeFieldCacheProvider( + grid, getFieldName(), defaultFieldValuesArrayLen); + provider.put(getFieldName(), p); } } } @@ -214,19 +211,27 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy { } /** - * Computes spatial facets in two dimensions as a grid of numbers. The data is often visualized as a so-called - * "heatmap". + * Computes spatial facets in two dimensions as a grid of numbers. The data is often visualized as + * a so-called "heatmap". * - * @see HeatmapFacetCounter#calcFacets(PrefixTreeStrategy, IndexReaderContext, Bits, Shape, int, int) + * @see HeatmapFacetCounter#calcFacets(PrefixTreeStrategy, IndexReaderContext, Bits, Shape, int, + * int) */ - public HeatmapFacetCounter.Heatmap calcFacets(IndexReaderContext context, Bits topAcceptDocs, - Shape inputShape, final int facetLevel, int maxCells) throws IOException { - return HeatmapFacetCounter.calcFacets(this, context, topAcceptDocs, inputShape, facetLevel, maxCells); + public HeatmapFacetCounter.Heatmap calcFacets( + IndexReaderContext context, + Bits topAcceptDocs, + Shape inputShape, + final int facetLevel, + int maxCells) + throws IOException { + return HeatmapFacetCounter.calcFacets( + this, context, topAcceptDocs, inputShape, facetLevel, maxCells); } /** - * Returns true if the {@code shape} is a {@link Point}. For custom spatial contexts, it may make sense to - * have certain other shapes return true. + * Returns true if the {@code shape} is a {@link Point}. For custom spatial contexts, it may make + * sense to have certain other shapes return true. + * * @lucene.experimental */ protected boolean isPointShape(Shape shape) { diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java index 6591de96c1a..8fe372feec3 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java @@ -19,7 +19,6 @@ package org.apache.lucene.spatial.prefix; import java.util.ArrayList; import java.util.Iterator; import java.util.List; - import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -33,26 +32,25 @@ import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; import org.locationtech.spatial4j.shape.Shape; /** - * A {@link PrefixTreeStrategy} which uses {@link AbstractVisitingPrefixTreeQuery}. - * This strategy has support for searching non-point shapes (note: not tested). - * Even a query shape with distErrPct=0 (fully precise to the grid) should have - * good performance for typical data, unless there is a lot of indexed data - * coincident with the shape's edge. + * A {@link PrefixTreeStrategy} which uses {@link AbstractVisitingPrefixTreeQuery}. This strategy + * has support for searching non-point shapes (note: not tested). Even a query shape with + * distErrPct=0 (fully precise to the grid) should have good performance for typical data, unless + * there is a lot of indexed data coincident with the shape's edge. * * @lucene.experimental */ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { /* Future potential optimizations: - Each shape.relate(otherShape) result could be cached since much of the same relations will be invoked when - multiple segments are involved. Do this for "complex" shapes, not cheap ones, and don't cache when disjoint to - bbox because it's a cheap calc. This is one advantage TermQueryPrefixTreeStrategy has over RPT. + Each shape.relate(otherShape) result could be cached since much of the same relations will be invoked when + multiple segments are involved. Do this for "complex" shapes, not cheap ones, and don't cache when disjoint to + bbox because it's a cheap calc. This is one advantage TermQueryPrefixTreeStrategy has over RPT. - */ + */ protected int prefixGridScanLevel; - //Formerly known as simplifyIndexedCells. Eventually will be removed. Only compatible with RPT + // Formerly known as simplifyIndexedCells. Eventually will be removed. Only compatible with RPT // and cells implementing CellCanPrune, otherwise ignored. protected boolean pruneLeafyBranches = true; @@ -60,7 +58,8 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { public RecursivePrefixTreeStrategy(SpatialPrefixTree grid, String fieldName) { super(grid, fieldName); - prefixGridScanLevel = grid.getMaxLevels() - 4;//TODO this default constant is dependent on the prefix grid size + prefixGridScanLevel = + grid.getMaxLevels() - 4; // TODO this default constant is dependent on the prefix grid size } public int getPrefixGridScanLevel() { @@ -68,14 +67,14 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { } /** - * Sets the grid level [1-maxLevels] at which indexed terms are scanned brute-force - * instead of by grid decomposition. By default this is maxLevels - 4. The - * final level, maxLevels, is always scanned. + * Sets the grid level [1-maxLevels] at which indexed terms are scanned brute-force instead of by + * grid decomposition. By default this is maxLevels - 4. The final level, maxLevels, is always + * scanned. * * @param prefixGridScanLevel 1 to maxLevels */ public void setPrefixGridScanLevel(int prefixGridScanLevel) { - //TODO if negative then subtract from maxlevels + // TODO if negative then subtract from maxlevels this.prefixGridScanLevel = prefixGridScanLevel; } @@ -93,15 +92,15 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { } /** - * An optional hint affecting non-point shapes and tree cells implementing {@link CellCanPrune}, otherwise - * ignored. - *

    - * It will prune away a complete set sibling leaves to their parent (recursively), resulting in ~20-50% - * fewer indexed cells, and consequently that much less disk and that much faster indexing. - * So if it's a quad tree and all 4 sub-cells are there marked as a leaf, then they will be - * removed (pruned) and the parent is marked as a leaf instead. This occurs recursively on up. Unfortunately, the - * current implementation will buffer all cells to do this, so consider disabling for high precision (low distErrPct) - * shapes. (default=true) + * An optional hint affecting non-point shapes and tree cells implementing {@link CellCanPrune}, + * otherwise ignored. + * + *

    It will prune away a complete set sibling leaves to their parent (recursively), resulting in + * ~20-50% fewer indexed cells, and consequently that much less disk and that much faster + * indexing. So if it's a quad tree and all 4 sub-cells are there marked as a leaf, then they will + * be removed (pruned) and the parent is marked as a leaf instead. This occurs recursively on up. + * Unfortunately, the current implementation will buffer all cells to do this, so consider + * disabling for high precision (low distErrPct) shapes. (default=true) */ public void setPruneLeafyBranches(boolean pruneLeafyBranches) { this.pruneLeafyBranches = pruneLeafyBranches; @@ -111,19 +110,17 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { public String toString() { StringBuilder str = new StringBuilder(getClass().getSimpleName()).append('('); str.append("SPG:(").append(grid.toString()).append(')'); - if (pointsOnly) - str.append(",pointsOnly"); - if (pruneLeafyBranches) - str.append(",pruneLeafyBranches"); + if (pointsOnly) str.append(",pointsOnly"); + if (pruneLeafyBranches) str.append(",pruneLeafyBranches"); if (prefixGridScanLevel != grid.getMaxLevels() - 4) str.append(",prefixGridScanLevel:").append("").append(prefixGridScanLevel); - if (!multiOverlappingIndexedShapes) - str.append(",!multiOverlappingIndexedShapes"); + if (!multiOverlappingIndexedShapes) str.append(",!multiOverlappingIndexedShapes"); return str.append(')').toString(); } @Override - protected Iterator createCellIteratorToIndex(Shape shape, int detailLevel, Iterator reuse) { + protected Iterator createCellIteratorToIndex( + Shape shape, int detailLevel, Iterator reuse) { if (!pruneLeafyBranches || isGridAlignedShape(shape)) return super.createCellIteratorToIndex(shape, detailLevel, reuse); @@ -133,41 +130,44 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { } /** Returns true if cell was added as a leaf. If it wasn't it recursively descends. */ - private boolean recursiveTraverseAndPrune(Cell cell, Shape shape, int detailLevel, List result) { + private boolean recursiveTraverseAndPrune( + Cell cell, Shape shape, int detailLevel, List result) { if (cell.getLevel() == detailLevel) { - cell.setLeaf();//FYI might already be a leaf + cell.setLeaf(); // FYI might already be a leaf } if (cell.isLeaf()) { result.add(cell); return true; } - if (cell.getLevel() != 0) + if (cell.getLevel() != 0) { result.add(cell); + } int leaves = 0; CellIterator subCells = cell.getNextLevelCells(shape); while (subCells.hasNext()) { Cell subCell = subCells.next(); - if (recursiveTraverseAndPrune(subCell, shape, detailLevel, result)) + if (recursiveTraverseAndPrune(subCell, shape, detailLevel, result)) { leaves++; + } } if (!(cell instanceof CellCanPrune)) { - //Cannot prune so return false + // Cannot prune so return false return false; } - //can we prune? - if (leaves == ((CellCanPrune)cell).getSubCellsSize() && cell.getLevel() != 0) { - //Optimization: substitute the parent as a leaf instead of adding all + // can we prune? + if (leaves == ((CellCanPrune) cell).getSubCellsSize() && cell.getLevel() != 0) { + // Optimization: substitute the parent as a leaf instead of adding all // children as leaves - //remove the leaves + // remove the leaves do { - result.remove(result.size() - 1);//remove last + result.remove(result.size() - 1); // remove last } while (--leaves > 0); - //add cell as the leaf + // add cell as the leaf cell.setLeaf(); return true; } @@ -189,19 +189,23 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { shape, getFieldName(), grid, detailLevel, prefixGridScanLevel); } else if (op == SpatialOperation.IsWithin) { return new WithinPrefixTreeQuery( - shape, getFieldName(), grid, detailLevel, prefixGridScanLevel, - -1);//-1 flag is slower but ensures correct results + shape, + getFieldName(), + grid, + detailLevel, + prefixGridScanLevel, + -1); // -1 flag is slower but ensures correct results } else if (op == SpatialOperation.Contains) { - return new ContainsPrefixTreeQuery(shape, getFieldName(), grid, detailLevel, - multiOverlappingIndexedShapes); + return new ContainsPrefixTreeQuery( + shape, getFieldName(), grid, detailLevel, multiOverlappingIndexedShapes); } throw new UnsupportedSpatialOperation(op); } /** - * A quick check of the shape to see if it is perfectly aligned to a grid. - * Points always are as they are indivisible. It's okay to return false - * if the shape actually is aligned; this is an optimization hint. + * A quick check of the shape to see if it is perfectly aligned to a grid. Points always are as + * they are indivisible. It's okay to return false if the shape actually is aligned; this is an + * optimization hint. */ protected boolean isGridAlignedShape(Shape shape) { return isPointShape(shape); @@ -223,7 +227,8 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { assert cell.isLeaf(); return new TermQuery(new Term(getFieldName(), cell.getTokenBytesWithLeaf(null))); } else { - // Well there could be parent cells. But we can reduce the "scan level" which will be slower for a point query. + // Well there could be parent cells. But we can reduce the "scan level" which will be slower + // for a point query. // TODO: AVPTQ will still scan the bottom nonetheless; file an issue to eliminate that return new IntersectsPrefixTreeQuery( gridShape, getFieldName(), grid, getGrid().getMaxLevels(), getGrid().getMaxLevels() + 1); diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java index 4e37f5d9a78..a960eaf5943 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java @@ -18,7 +18,6 @@ package org.apache.lucene.spatial.prefix; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.spatial.prefix.tree.Cell; @@ -33,14 +32,14 @@ import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; /** - * A basic implementation of {@link PrefixTreeStrategy} using a large - * {@link TermInSetQuery} of all the cells from - * {@link SpatialPrefixTree#getTreeCellIterator(org.locationtech.spatial4j.shape.Shape, int)}. - * It only supports the search of indexed Point shapes. - *

    - * The precision of query shapes (distErrPct) is an important factor in using - * this Strategy. If the precision is too precise then it will result in many - * terms which will amount to a slower query. + * A basic implementation of {@link PrefixTreeStrategy} using a large {@link TermInSetQuery} of all + * the cells from {@link + * SpatialPrefixTree#getTreeCellIterator(org.locationtech.spatial4j.shape.Shape, int)}. It only + * supports the search of indexed Point shapes. + * + *

    The precision of query shapes (distErrPct) is an important factor in using this Strategy. If + * the precision is too precise then it will result in many terms which will amount to a slower + * query. * * @lucene.experimental */ @@ -54,7 +53,7 @@ public class TermQueryPrefixTreeStrategy extends PrefixTreeStrategy { @Override protected CellToBytesRefIterator newCellToBytesRefIterator() { - //Ensure we don't have leaves, as this strategy doesn't handle them. + // Ensure we don't have leaves, as this strategy doesn't handle them. return new CellToBytesRefIterator() { @Override public BytesRef next() { @@ -69,43 +68,42 @@ public class TermQueryPrefixTreeStrategy extends PrefixTreeStrategy { @Override public Query makeQuery(SpatialArgs args) { final SpatialOperation op = args.getOperation(); - if (op != SpatialOperation.Intersects) - throw new UnsupportedSpatialOperation(op); + if (op != SpatialOperation.Intersects) throw new UnsupportedSpatialOperation(op); Shape shape = args.getShape(); int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, distErrPct)); - //--get a List of BytesRef for each term we want (no parents, no leaf bytes)) + // --get a List of BytesRef for each term we want (no parents, no leaf bytes)) final int GUESS_NUM_TERMS; - if (shape instanceof Point) - GUESS_NUM_TERMS = detailLevel;//perfect guess - else - GUESS_NUM_TERMS = 4096;//should this be a method on SpatialPrefixTree? + if (shape instanceof Point) { + GUESS_NUM_TERMS = detailLevel; // perfect guess + } else { + GUESS_NUM_TERMS = 4096; // should this be a method on SpatialPrefixTree? + } - BytesRefBuilder masterBytes = new BytesRefBuilder();//shared byte array for all terms + BytesRefBuilder masterBytes = new BytesRefBuilder(); // shared byte array for all terms List terms = new ArrayList<>(GUESS_NUM_TERMS); CellIterator cells = grid.getTreeCellIterator(shape, detailLevel); while (cells.hasNext()) { Cell cell = cells.next(); - if (!cell.isLeaf()) - continue; - BytesRef term = cell.getTokenBytesNoLeaf(null);//null because we want a new BytesRef - //We copy out the bytes because it may be re-used across the iteration. This also gives us the opportunity + if (!cell.isLeaf()) continue; + BytesRef term = cell.getTokenBytesNoLeaf(null); // null because we want a new BytesRef + // We copy out the bytes because it may be re-used across the iteration. This also gives us + // the opportunity // to use one contiguous block of memory for the bytes of all terms we need. masterBytes.grow(masterBytes.length() + term.length); masterBytes.append(term); - term.bytes = null;//don't need; will reset later + term.bytes = null; // don't need; will reset later term.offset = masterBytes.length() - term.length; terms.add(term); } - //doing this now because if we did earlier, it's possible the bytes needed to grow() + // doing this now because if we did earlier, it's possible the bytes needed to grow() for (BytesRef byteRef : terms) { byteRef.bytes = masterBytes.bytes(); } - //unfortunately TermsQuery will needlessly sort & dedupe - //TODO an automatonQuery might be faster? + // unfortunately TermsQuery will needlessly sort & dedupe + // TODO an automatonQuery might be faster? return new TermInSetQuery(getFieldName(), terms); } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/WithinPrefixTreeQuery.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/WithinPrefixTreeQuery.java index 538fc7d0cf9..daad6e2b591 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/WithinPrefixTreeQuery.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/WithinPrefixTreeQuery.java @@ -17,14 +17,6 @@ package org.apache.lucene.spatial.prefix; import java.io.IOException; - -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; -import org.locationtech.spatial4j.shape.Circle; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Rectangle; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.SpatialRelation; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.spatial.prefix.tree.Cell; @@ -32,52 +24,69 @@ import org.apache.lucene.spatial.prefix.tree.CellIterator; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.FixedBitSet; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; +import org.locationtech.spatial4j.shape.Circle; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.SpatialRelation; /** - * Finds docs where its indexed shape is {@link org.apache.lucene.spatial.query.SpatialOperation#IsWithin - * WITHIN} the query shape. It works by looking at cells outside of the query - * shape to ensure documents there are excluded. By default, it will - * examine all cells, and it's fairly slow. If you know that the indexed shapes - * are never comprised of multiple disjoint parts (which also means it is not multi-valued), - * then you can pass {@code SpatialPrefixTree.getDistanceForLevel(maxLevels)} as - * the {@code queryBuffer} constructor parameter to minimally look this distance - * beyond the query shape's edge. Even if the indexed shapes are sometimes - * comprised of multiple disjoint parts, you might want to use this option with + * Finds docs where its indexed shape is {@link + * org.apache.lucene.spatial.query.SpatialOperation#IsWithin WITHIN} the query shape. It works by + * looking at cells outside of the query shape to ensure documents there are excluded. By default, + * it will examine all cells, and it's fairly slow. If you know that the indexed shapes are never + * comprised of multiple disjoint parts (which also means it is not multi-valued), then you can pass + * {@code SpatialPrefixTree.getDistanceForLevel(maxLevels)} as the {@code queryBuffer} constructor + * parameter to minimally look this distance beyond the query shape's edge. Even if the indexed + * shapes are sometimes comprised of multiple disjoint parts, you might want to use this option with * a large buffer as a faster approximation with minimal false-positives. * * @lucene.experimental */ public class WithinPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { - //TODO LUCENE-4869: implement faster algorithm based on filtering out false-positives of a + // TODO LUCENE-4869: implement faster algorithm based on filtering out false-positives of a // minimal query buffer by looking in a DocValues cache holding a representative // point of each disjoint component of a document's shape(s). - //TODO Could the recursion in allCellsIntersectQuery() be eliminated when non-fuzzy or other + // TODO Could the recursion in allCellsIntersectQuery() be eliminated when non-fuzzy or other // circumstances? - private final Shape bufferedQueryShape;//if null then the whole world + private final Shape bufferedQueryShape; // if null then the whole world /** - * See {@link AbstractVisitingPrefixTreeQuery#AbstractVisitingPrefixTreeQuery(org.locationtech.spatial4j.shape.Shape, String, org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree, int, int)}. - * {@code queryBuffer} is the (minimum) distance beyond the query shape edge - * where non-matching documents are looked for so they can be excluded. If - * -1 is used then the whole world is examined (a good default for correctness). + * See {@link + * AbstractVisitingPrefixTreeQuery#AbstractVisitingPrefixTreeQuery(org.locationtech.spatial4j.shape.Shape, + * String, org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree, int, int)}. {@code + * queryBuffer} is the (minimum) distance beyond the query shape edge where non-matching documents + * are looked for so they can be excluded. If -1 is used then the whole world is examined (a good + * default for correctness). */ - public WithinPrefixTreeQuery(Shape queryShape, String fieldName, SpatialPrefixTree grid, - int detailLevel, int prefixGridScanLevel, - double queryBuffer) { + public WithinPrefixTreeQuery( + Shape queryShape, + String fieldName, + SpatialPrefixTree grid, + int detailLevel, + int prefixGridScanLevel, + double queryBuffer) { super(queryShape, fieldName, grid, detailLevel, prefixGridScanLevel); this.bufferedQueryShape = queryBuffer == -1 ? null : bufferShape(queryShape, queryBuffer); } @Override public boolean equals(Object o) { - if (!super.equals(o)) return false;//checks getClass == o.getClass & instanceof + if (!super.equals(o)) { + return false; // checks getClass == o.getClass & instanceof + } WithinPrefixTreeQuery that = (WithinPrefixTreeQuery) o; - if (bufferedQueryShape != null ? !bufferedQueryShape.equals(that.bufferedQueryShape) : that.bufferedQueryShape != null) + if (bufferedQueryShape != null + ? !bufferedQueryShape.equals(that.bufferedQueryShape) + : that.bufferedQueryShape != null) { return false; + } return true; } @@ -91,28 +100,28 @@ public class WithinPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { @Override public String toString(String field) { - return getClass().getSimpleName() + "(" + - "fieldName=" + fieldName + "," + - "queryShape=" + queryShape + "," + - "detailLevel=" + detailLevel + "," + - "prefixGridScanLevel=" + prefixGridScanLevel + - ")"; + return getClass().getSimpleName() + + "(" + + ("fieldName=" + fieldName + ",") + + ("queryShape=" + queryShape + ",") + + ("detailLevel=" + detailLevel + ",") + + ("prefixGridScanLevel=" + prefixGridScanLevel) + + ")"; } - /** Returns a new shape that is larger than shape by at distErr. - */ - //TODO move this generic code elsewhere? Spatial4j? + /** Returns a new shape that is larger than shape by at distErr. */ + // TODO move this generic code elsewhere? Spatial4j? protected Shape bufferShape(Shape shape, double distErr) { - if (distErr <= 0) - throw new IllegalArgumentException("distErr must be > 0"); + if (distErr <= 0) throw new IllegalArgumentException("distErr must be > 0"); SpatialContext ctx = grid.getSpatialContext(); if (shape instanceof Point) { - return ctx.getShapeFactory().circle((Point)shape, distErr); + return ctx.getShapeFactory().circle((Point) shape, distErr); } else if (shape instanceof Circle) { Circle circle = (Circle) shape; double newDist = circle.getRadius() + distErr; - if (ctx.isGeo() && newDist > 180) + if (ctx.isGeo() && newDist > 180) { newDist = 180; + } return ctx.getShapeFactory().circle(circle.getCenter(), newDist); } else { Rectangle bbox = shape.getBoundingBox(); @@ -121,11 +130,9 @@ public class WithinPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { double newMinY = bbox.getMinY() - distErr; double newMaxY = bbox.getMaxY() + distErr; if (ctx.isGeo()) { - if (newMinY < -90) - newMinY = -90; - if (newMaxY > 90) - newMaxY = 90; - if (newMinY == -90 || newMaxY == 90 || bbox.getWidth() + 2*distErr > 360) { + if (newMinY < -90) newMinY = -90; + if (newMaxY > 90) newMaxY = 90; + if (newMinY == -90 || newMaxY == 90 || bbox.getWidth() + 2 * distErr > 360) { newMinX = -180; newMaxX = 180; } else { @@ -133,7 +140,7 @@ public class WithinPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { newMaxX = DistanceUtils.normLonDEG(newMaxX); } } else { - //restrict to world bounds + // restrict to world bounds newMinX = Math.max(newMinX, ctx.getWorldBounds().getMinX()); newMaxX = Math.min(newMaxX, ctx.getWorldBounds().getMaxX()); newMinY = Math.max(newMinY, ctx.getWorldBounds().getMinY()); @@ -143,7 +150,6 @@ public class WithinPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { } } - @Override protected DocIdSet getDocIdSet(LeafReaderContext context) throws IOException { return new VisitorTemplate(context) { @@ -164,13 +170,13 @@ public class WithinPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { @Override protected CellIterator findSubCellsToVisit(Cell cell) { - //use buffered query shape instead of orig. Works with null too. + // use buffered query shape instead of orig. Works with null too. return cell.getNextLevelCells(bufferedQueryShape); } @Override protected boolean visitPrefix(Cell cell) throws IOException { - //cell.relate is based on the bufferedQueryShape; we need to examine what + // cell.relate is based on the bufferedQueryShape; we need to examine what // the relation is against the queryShape SpatialRelation visitRelation = cell.getShape().relate(queryShape); if (cell.getLevel() == detailLevel) { @@ -188,45 +194,40 @@ public class WithinPrefixTreeQuery extends AbstractVisitingPrefixTreeQuery { @Override protected void visitLeaf(Cell cell) throws IOException { - if (allCellsIntersectQuery(cell)) - collectDocs(inside); - else - collectDocs(outside); + if (allCellsIntersectQuery(cell)) collectDocs(inside); + else collectDocs(outside); } - /** Returns true if the provided cell, and all its sub-cells down to - * detailLevel all intersect the queryShape. + /** + * Returns true if the provided cell, and all its sub-cells down to detailLevel all intersect + * the queryShape. */ private boolean allCellsIntersectQuery(Cell cell) { SpatialRelation relate = cell.getShape().relate(queryShape); - if (cell.getLevel() == detailLevel) - return relate.intersects(); - if (relate == SpatialRelation.WITHIN) - return true; - if (relate == SpatialRelation.DISJOINT) - return false; + if (cell.getLevel() == detailLevel) return relate.intersects(); + if (relate == SpatialRelation.WITHIN) return true; + if (relate == SpatialRelation.DISJOINT) return false; // Note: Generating all these cells just to determine intersection is not ideal. // The real solution is LUCENE-4869. CellIterator subCells = cell.getNextLevelCells(null); while (subCells.hasNext()) { Cell subCell = subCells.next(); - if (!allCellsIntersectQuery(subCell))//recursion - return false; + if (!allCellsIntersectQuery(subCell)) // recursion + return false; } return true; } @Override protected void visitScanned(Cell cell) throws IOException { - visitLeaf(cell);//collects as we want, even if not a leaf -// if (cell.isLeaf()) { -// visitLeaf(cell); -// } else { -// visitPrefix(cell); -// } + visitLeaf(cell); + // collects as we want, even if not a leaf + // if (cell.isLeaf()) { + // visitLeaf(cell); + // } else { + // visitPrefix(cell); + // } } - }.getDocIdSet(); } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/package-info.java index b35f8ee6773..4e7f74ce5d8 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Prefix Tree Strategy. - */ +/** Prefix Tree Strategy. */ package org.apache.lucene.spatial.prefix; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java index f4bc45856e7..2d0dd9e8a10 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java @@ -16,34 +16,43 @@ */ package org.apache.lucene.spatial.prefix.tree; +import org.apache.lucene.util.BytesRef; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; -import org.apache.lucene.util.BytesRef; /** * Represents a grid cell. Cell instances are generally very transient and may be re-used - * internally. To get an instance, you could start with {@link SpatialPrefixTree#getWorldCell()}. - * And from there you could either traverse down the tree with {@link #getNextLevelCells(org.locationtech.spatial4j.shape.Shape)}, - * or you could read an indexed term via {@link SpatialPrefixTree#readCell(org.apache.lucene.util.BytesRef,Cell)}. - * When a cell is read from a term, it is comprised of just the base bytes plus optionally a leaf flag. + * internally. To get an instance, you could start with {@link SpatialPrefixTree#getWorldCell()}. + * And from there you could either traverse down the tree with {@link + * #getNextLevelCells(org.locationtech.spatial4j.shape.Shape)}, or you could read an indexed term + * via {@link SpatialPrefixTree#readCell(org.apache.lucene.util.BytesRef,Cell)}. When a cell is read + * from a term, it is comprised of just the base bytes plus optionally a leaf flag. * * @lucene.experimental */ public interface Cell { -// If we bring this back; perhaps do so as a method that un-shares its internal state: void unshare(); -// /** Resets the state of this cell such that it is identical to {@code source}. This can be used for -// * cloning a cell to have a safe copy, and it also might be used to position this cell -// * before calling {@link #readCell(org.apache.lucene.util.BytesRef)} in a loop if you know the first term -// * is going to be close to some other cell, thereby saving some computations. */ -// void copyFrom(Cell source); + // If we bring this back; perhaps do so as a method that un-shares its internal state: void + // unshare(); + // /** Resets the state of this cell such that it is identical to {@code source}. This can be + // used for + // * cloning a cell to have a safe copy, and it also might be used to position this cell + // * before calling {@link #readCell(org.apache.lucene.util.BytesRef)} in a loop if you know the + // first term + // * is going to be close to some other cell, thereby saving some computations. */ + // void copyFrom(Cell source); - /** Gets the relationship this cell has with the shape from which it was filtered from, assuming it came from a - * {@link CellIterator}. Arguably it belongs there but it's very convenient here. */ + /** + * Gets the relationship this cell has with the shape from which it was filtered from, assuming it + * came from a {@link CellIterator}. Arguably it belongs there but it's very convenient here. + */ SpatialRelation getShapeRel(); - /** See {@link #getShapeRel()}. - * @lucene.internal */ + /** + * See {@link #getShapeRel()}. + * + * @lucene.internal + */ void setShapeRel(SpatialRelation rel); /** @@ -53,41 +62,46 @@ public interface Cell { */ boolean isLeaf(); - /** Set this cell to be a leaf. Warning: never call on a cell - * initialized to reference the same bytes from termsEnum, which should be treated as immutable. - * Note: not supported at level 0. - * @lucene.internal */ + /** + * Set this cell to be a leaf. Warning: never call on a cell initialized to reference the same + * bytes from termsEnum, which should be treated as immutable. Note: not supported at level 0. + * + * @lucene.internal + */ void setLeaf(); /** - * Returns the bytes for this cell, with a leaf byte if this is a leaf cell. - * The result param is used to save object allocation, though its bytes aren't used. + * Returns the bytes for this cell, with a leaf byte if this is a leaf cell. The result + * param is used to save object allocation, though its bytes aren't used. + * * @param result where the result goes, or null to create new */ BytesRef getTokenBytesWithLeaf(BytesRef result); /** - * Returns the bytes for this cell, without a leaf set. The bytes should sort before - * {@link #getTokenBytesWithLeaf(org.apache.lucene.util.BytesRef)}. - * The result param is used to save object allocation, though its bytes aren't used. + * Returns the bytes for this cell, without a leaf set. The bytes should sort before {@link + * #getTokenBytesWithLeaf(org.apache.lucene.util.BytesRef)}. The result param is used to save + * object allocation, though its bytes aren't used. + * * @param result where the result goes, or null to create new */ BytesRef getTokenBytesNoLeaf(BytesRef result); - /** Level 0 is the world (and has no parent), from then on a higher level means a smaller - * cell than the level before it. + /** + * Level 0 is the world (and has no parent), from then on a higher level means a smaller cell than + * the level before it. */ int getLevel(); /** - * Gets the cells at the next grid cell level underneath this one, optionally filtered by - * {@code shapeFilter}. The returned cells should have {@link #getShapeRel()} set to - * their relation with {@code shapeFilter}. In addition, for non-points {@link #isLeaf()} - * must be true when that relation is WITHIN. - *

    - * IMPORTANT: Cells returned from this iterator can be shared, as well as the bytes. - *

    - * Precondition: Never called when getLevel() == maxLevel. + * Gets the cells at the next grid cell level underneath this one, optionally filtered by {@code + * shapeFilter}. The returned cells should have {@link #getShapeRel()} set to their relation with + * {@code shapeFilter}. In addition, for non-points {@link #isLeaf()} must be true when that + * relation is WITHIN. + * + *

    IMPORTANT: Cells returned from this iterator can be shared, as well as the bytes. + * + *

    Precondition: Never called when getLevel() == maxLevel. * * @param shapeFilter an optional filter for the returned cells. * @return A set of cells (no dups), sorted. Not Modifiable. @@ -98,12 +112,15 @@ public interface Cell { Shape getShape(); /** - * Returns if the target term is within/underneath this cell; not necessarily a direct - * descendant. + * Returns if the target term is within/underneath this cell; not necessarily a direct descendant. + * * @param c the term */ boolean isPrefixOf(Cell c); - /** Equivalent to {@code this.getTokenBytesNoLeaf(null).compareTo(fromCell.getTokenBytesNoLeaf(null))}. */ + /** + * Equivalent to {@code + * this.getTokenBytesNoLeaf(null).compareTo(fromCell.getTokenBytesNoLeaf(null))}. + */ int compareToNoLeaf(Cell fromCell); } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/CellCanPrune.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/CellCanPrune.java index 33bd904d52f..faff421cce0 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/CellCanPrune.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/CellCanPrune.java @@ -20,13 +20,14 @@ package org.apache.lucene.spatial.prefix.tree; import org.locationtech.spatial4j.shape.Shape; /** - * Grid cells that share nothing with other cells when calling {@link #getNextLevelCells(Shape)} - * might implement this interface. Children cells for this cell will be eligible for pruning via - * {@link org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy#setPruneLeafyBranches(boolean)}. + * Grid cells that share nothing with other cells when calling {@link #getNextLevelCells(Shape)} + * might implement this interface. Children cells for this cell will be eligible for pruning via + * {@link + * org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy#setPruneLeafyBranches(boolean)}. * * @lucene.experimental */ -public interface CellCanPrune extends Cell{ +public interface CellCanPrune extends Cell { /** * Returns the number of children for this cell. diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/CellIterator.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/CellIterator.java index 1cef37a8578..64129b7cdb4 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/CellIterator.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/CellIterator.java @@ -26,11 +26,11 @@ import java.util.NoSuchElementException; */ public abstract class CellIterator implements Iterator { - //note: nextCell or thisCell can be non-null but neither at the same time. That's + // note: nextCell or thisCell can be non-null but neither at the same time. That's // because they might return the same instance when re-used! - protected Cell nextCell;//to be returned by next(), and null'ed after - protected Cell thisCell;//see next() & thisCell(). Should be cleared in hasNext(). + protected Cell nextCell; // to be returned by next(), and null'ed after + protected Cell thisCell; // see next() & thisCell(). Should be cleared in hasNext(). /** Returns the cell last returned from {@link #next()}. It's cleared by hasNext(). */ public Cell thisCell() { @@ -39,25 +39,28 @@ public abstract class CellIterator implements Iterator { } // Arguably this belongs here and not on Cell - //public SpatialRelation getShapeRel() + // public SpatialRelation getShapeRel() /** - * Gets the next cell that is >= {@code fromCell}, compared using non-leaf bytes. If it returns null then - * the iterator is exhausted. + * Gets the next cell that is >= {@code fromCell}, compared using non-leaf bytes. If it returns + * null then the iterator is exhausted. */ public Cell nextFrom(Cell fromCell) { while (true) { - if (!hasNext()) + if (!hasNext()) { return null; - Cell c = next();//will update thisCell + } + Cell c = next(); // will update thisCell if (c.compareToNoLeaf(fromCell) >= 0) { return c; } } } - /** This prevents sub-cells (those underneath the current cell) from being iterated to, - * if applicable, otherwise a NO-OP. */ + /** + * This prevents sub-cells (those underneath the current cell) from being iterated to, if + * applicable, otherwise a NO-OP. + */ @Override public void remove() { assert thisCell != null; @@ -66,8 +69,7 @@ public abstract class CellIterator implements Iterator { @Override public Cell next() { if (nextCell == null) { - if (!hasNext()) - throw new NoSuchElementException(); + if (!hasNext()) throw new NoSuchElementException(); } thisCell = nextCell; nextCell = null; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java index 9ac042810e7..72ee20b5258 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java @@ -23,18 +23,19 @@ import java.util.Date; import java.util.GregorianCalendar; import java.util.Locale; import java.util.TimeZone; - import org.locationtech.spatial4j.shape.Shape; /** - * A PrefixTree for date ranges in which the levels of the tree occur at natural periods of time (e.g. years, - * months, ...). You pass in {@link Calendar} objects with the desired fields set and the unspecified - * fields unset, which conveys the precision. The implementation makes some optimization assumptions about a - * {@link java.util.GregorianCalendar}; others could probably be supported easily. - *

    - * Warning: If you construct a Calendar and then get something from the object like a field (e.g. year) or - * milliseconds, then every field is fully set by side-effect. So after setting the fields, pass it to this - * API first. + * A PrefixTree for date ranges in which the levels of the tree occur at natural periods of time + * (e.g. years, months, ...). You pass in {@link Calendar} objects with the desired fields set and + * the unspecified fields unset, which conveys the precision. The implementation makes some + * optimization assumptions about a {@link java.util.GregorianCalendar}; others could probably be + * supported easily. + * + *

    Warning: If you construct a Calendar and then get something from the object like a field (e.g. + * year) or milliseconds, then every field is fully set by side-effect. So after setting the fields, + * pass it to this API first. + * * @lucene.experimental */ public class DateRangePrefixTree extends NumberRangePrefixTree { @@ -60,49 +61,61 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); /** - * The Java platform default {@link Calendar} with UTC & ROOT Locale. Generally a {@link GregorianCalendar}. - * Do not modify this! + * The Java platform default {@link Calendar} with UTC & ROOT Locale. Generally a {@link + * GregorianCalendar}. Do not modify this! */ - public static final Calendar DEFAULT_CAL;//template + public static final Calendar DEFAULT_CAL; // template + static { DEFAULT_CAL = Calendar.getInstance(UTC, Locale.ROOT); DEFAULT_CAL.clear(); } /** - * A Calendar instance compatible with {@link java.time.ZonedDateTime} as seen from - * {@link GregorianCalendar#from(ZonedDateTime)}. - * Do not modify this! + * A Calendar instance compatible with {@link java.time.ZonedDateTime} as seen from {@link + * GregorianCalendar#from(ZonedDateTime)}. Do not modify this! */ public static final Calendar JAVA_UTIL_TIME_COMPAT_CAL; + static { // see source of GregorianCalendar.from(ZonedDateTime) GregorianCalendar cal = new GregorianCalendar(UTC, Locale.ROOT); cal.setGregorianChange(new Date(Long.MIN_VALUE)); - cal.setFirstDayOfWeek(Calendar.MONDAY);// might not matter? - cal.setMinimalDaysInFirstWeek(4);// might not matter + cal.setFirstDayOfWeek(Calendar.MONDAY); // might not matter? + cal.setMinimalDaysInFirstWeek(4); // might not matter cal.clear(); JAVA_UTIL_TIME_COMPAT_CAL = cal; } private static final int[] FIELD_BY_LEVEL = { - -1/*unused*/, -1, -1, Calendar.YEAR, Calendar.MONTH, Calendar.DAY_OF_MONTH, - Calendar.HOUR_OF_DAY, Calendar.MINUTE, Calendar.SECOND, Calendar.MILLISECOND}; + -1 /*unused*/, + -1, + -1, + Calendar.YEAR, + Calendar.MONTH, + Calendar.DAY_OF_MONTH, + Calendar.HOUR_OF_DAY, + Calendar.MINUTE, + Calendar.SECOND, + Calendar.MILLISECOND + }; private static final int YEAR_LEVEL = 3; - //how many million years are there? - private static final int NUM_MYEARS = 586;// we assert how this was computed in the constructor + // how many million years are there? + private static final int NUM_MYEARS = 586; // we assert how this was computed in the constructor - /** An instanced based on {@link Calendar#getInstance(TimeZone, Locale)} with UTC and Locale.Root. This - * will (always?) be a {@link GregorianCalendar} with a so-called "Gregorian Change Date" of 1582. + /** + * An instanced based on {@link Calendar#getInstance(TimeZone, Locale)} with UTC and Locale.Root. + * This will (always?) be a {@link GregorianCalendar} with a so-called "Gregorian Change Date" of + * 1582. */ @Deprecated public static final DateRangePrefixTree INSTANCE = new DateRangePrefixTree(DEFAULT_CAL); // Instance fields: (all are final) - private final Calendar CAL_TMP;//template + private final Calendar CAL_TMP; // template private final Calendar MINCAL; private final Calendar MAXCAL; @@ -117,39 +130,44 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { private final UnitNRShape minLV, maxLV; private final UnitNRShape gregorianChangeDateLV; - /** Constructs with the specified calendar used as a template to be cloned whenever a new - * Calendar needs to be created. See {@link #DEFAULT_CAL} and {@link #JAVA_UTIL_TIME_COMPAT_CAL}. */ + /** + * Constructs with the specified calendar used as a template to be cloned whenever a new Calendar + * needs to be created. See {@link #DEFAULT_CAL} and {@link #JAVA_UTIL_TIME_COMPAT_CAL}. + */ public DateRangePrefixTree(Calendar templateCal) { - super(new int[]{//sublevels by level - NUM_MYEARS, - 1000,//1 thousand thousand-years in a million years - 1000,//1 thousand years in a thousand-year - calFieldLen(templateCal, Calendar.MONTH), - calFieldLen(templateCal, Calendar.DAY_OF_MONTH), - calFieldLen(templateCal, Calendar.HOUR_OF_DAY), - calFieldLen(templateCal, Calendar.MINUTE), - calFieldLen(templateCal, Calendar.SECOND), - calFieldLen(templateCal, Calendar.MILLISECOND), - }); - CAL_TMP = (Calendar) templateCal.clone();// defensive copy + super( + new int[] { // sublevels by level + NUM_MYEARS, + 1000, // 1 thousand thousand-years in a million years + 1000, // 1 thousand years in a thousand-year + calFieldLen(templateCal, Calendar.MONTH), + calFieldLen(templateCal, Calendar.DAY_OF_MONTH), + calFieldLen(templateCal, Calendar.HOUR_OF_DAY), + calFieldLen(templateCal, Calendar.MINUTE), + calFieldLen(templateCal, Calendar.SECOND), + calFieldLen(templateCal, Calendar.MILLISECOND), + }); + CAL_TMP = (Calendar) templateCal.clone(); // defensive copy MINCAL = (Calendar) CAL_TMP.clone(); MINCAL.setTimeInMillis(Long.MIN_VALUE); MAXCAL = (Calendar) CAL_TMP.clone(); MAXCAL.setTimeInMillis(Long.MAX_VALUE); - //BC years are decreasing, remember. Yet ActualMaximum is the numerically high value, ActualMinimum is 1. + // BC years are decreasing, remember. Yet ActualMaximum is the numerically high value, + // ActualMinimum is 1. BC_FIRSTYEAR = MINCAL.getActualMaximum(Calendar.YEAR); BC_LASTYEAR = MINCAL.getActualMinimum(Calendar.YEAR); // 1 BC_YEARS = BC_FIRSTYEAR - BC_LASTYEAR + 1; AD_FIRSTYEAR = MAXCAL.getActualMinimum(Calendar.YEAR); // 1 AD_LASTYEAR = MAXCAL.getActualMaximum(Calendar.YEAR); - AD_YEAR_BASE = (((BC_YEARS-1) / 1000_000)+1) * 1000_000; // align year 0 at an even # of million years + AD_YEAR_BASE = + (((BC_YEARS - 1) / 1000_000) + 1) * 1000_000; // align year 0 at an even # of million years assert BC_LASTYEAR == 1 && AD_FIRSTYEAR == 1; assert NUM_MYEARS == (AD_YEAR_BASE + AD_LASTYEAR) / 1000_000 + 1; - maxLV = toShape((Calendar)MAXCAL.clone()); - minLV = toShape((Calendar)MINCAL.clone()); + maxLV = toShape((Calendar) MAXCAL.clone()); + minLV = toShape((Calendar) MINCAL.clone()); if (MAXCAL instanceof GregorianCalendar) { - GregorianCalendar gCal = (GregorianCalendar)MAXCAL; + GregorianCalendar gCal = (GregorianCalendar) MAXCAL; gregorianChangeDateLV = toUnitShape(gCal.getGregorianChange()); } else { gregorianChangeDateLV = null; @@ -164,13 +182,13 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { public int getNumSubCells(UnitNRShape lv) { int cmp = comparePrefix(lv, maxLV); assert cmp <= 0; - if (cmp == 0)//edge case (literally!) - return maxLV.getValAtLevel(lv.getLevel()+1) + 1; + if (cmp == 0) // edge case (literally!) + return maxLV.getValAtLevel(lv.getLevel() + 1) + 1; // if using GregorianCalendar and we're after the "Gregorian change date" then we'll compute // the sub-cells ourselves more efficiently without the need to construct a Calendar. cmp = gregorianChangeDateLV != null ? comparePrefix(lv, gregorianChangeDateLV) : -1; - //TODO consider also doing fast-path if field is <= hours even if before greg change date + // TODO consider also doing fast-path if field is <= hours even if before greg change date if (cmp >= 0) { int result = fastSubCells(lv); assert result == slowSubCells(lv) : "fast/slow numSubCells inconsistency"; @@ -181,7 +199,7 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { } private int fastSubCells(UnitNRShape lv) { - if (lv.getLevel() == YEAR_LEVEL + 1) {//month + if (lv.getLevel() == YEAR_LEVEL + 1) { // month switch (lv.getValAtLevel(lv.getLevel())) { case Calendar.SEPTEMBER: case Calendar.APRIL: @@ -189,41 +207,49 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { case Calendar.NOVEMBER: return 30; case Calendar.FEBRUARY: - //get the year (negative numbers for BC) + // get the year (negative numbers for BC) int yearAdj = lv.getValAtLevel(1) * 1_000_000; yearAdj += lv.getValAtLevel(2) * 1000; yearAdj += lv.getValAtLevel(3); int year = yearAdj - AD_YEAR_BASE; - if (year % 4 == 0 && !(year % 100 == 0 && year % 400 != 0) )//leap year + if (year % 4 == 0 && !(year % 100 == 0 && year % 400 != 0)) { + // leap year return 29; - else + } else { return 28; + } default: return 31; } - } else {//typical: + } else { // typical: return super.getNumSubCells(lv); } } private int slowSubCells(UnitNRShape lv) { - int field = FIELD_BY_LEVEL[lv.getLevel()+1]; - //short-circuit optimization (GregorianCalendar assumptions) - if (field == -1 || field == Calendar.YEAR || field >= Calendar.HOUR_OF_DAY)//TODO make configurable - return super.getNumSubCells(lv); - Calendar cal = toCalendar(lv);//somewhat heavyweight op; ideally should be stored on UnitNRShape somehow + int field = FIELD_BY_LEVEL[lv.getLevel() + 1]; + // short-circuit optimization (GregorianCalendar assumptions) + if (field == -1 + || field == Calendar.YEAR + || field >= Calendar.HOUR_OF_DAY) // TODO make configurable + return super.getNumSubCells(lv); + // somewhat heavyweight op; ideally should be stored on UnitNRShape somehow + Calendar cal = toCalendar(lv); return cal.getActualMaximum(field) - cal.getActualMinimum(field) + 1; } - /** Calendar utility method: - * Returns a clone of the {@link Calendar} passed to the constructor with all fields cleared. */ + /** + * Calendar utility method: Returns a clone of the {@link Calendar} passed to the constructor with + * all fields cleared. + */ public Calendar newCal() { return (Calendar) CAL_TMP.clone(); } - /** Calendar utility method: - * Returns the spatial prefix tree level for the corresponding {@link java.util.Calendar} field, such as - * {@link java.util.Calendar#YEAR}. If there's no match, the next greatest level is returned as a negative value. + /** + * Calendar utility method: Returns the spatial prefix tree level for the corresponding {@link + * java.util.Calendar} field, such as {@link java.util.Calendar#YEAR}. If there's no match, the + * next greatest level is returned as a negative value. */ public int getTreeLevelForCalendarField(int calField) { for (int i = YEAR_LEVEL; i < FIELD_BY_LEVEL.length; i++) { @@ -236,35 +262,41 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { throw new IllegalArgumentException("Bad calendar field?: " + calField); } - /** Calendar utility method: - * Gets the Calendar field code of the last field that is set prior to an unset field. It only - * examines fields relevant to the prefix tree. If no fields are set, it returns -1. */ + /** + * Calendar utility method: Gets the Calendar field code of the last field that is set prior to an + * unset field. It only examines fields relevant to the prefix tree. If no fields are set, it + * returns -1. + */ public int getCalPrecisionField(Calendar cal) { int lastField = -1; for (int level = YEAR_LEVEL; level < FIELD_BY_LEVEL.length; level++) { int field = FIELD_BY_LEVEL[level]; - if (!cal.isSet(field)) - break; + if (!cal.isSet(field)) break; lastField = field; } return lastField; } - /** Calendar utility method: - * Calls {@link Calendar#clear(int)} for every field after {@code field}. Beware of Calendar underflow. */ + /** + * Calendar utility method: Calls {@link Calendar#clear(int)} for every field after {@code field}. + * Beware of Calendar underflow. + */ public void clearFieldsAfter(Calendar cal, int field) { int assertEra = -1; - assert (assertEra = (((Calendar)cal.clone()).get(Calendar.ERA))) >= 0;//a trick to only get this if assert enabled - //note: Calendar.ERA == 0; + assert (assertEra = (((Calendar) cal.clone()).get(Calendar.ERA))) + >= 0; // a trick to only get this if assert enabled + // note: Calendar.ERA == 0; for (int f = field + 1; f <= Calendar.MILLISECOND; f++) { cal.clear(f); } - assert field + 1 == Calendar.ERA || ((Calendar)cal.clone()).get(Calendar.ERA) == assertEra : "Calendar underflow"; + assert field + 1 == Calendar.ERA || ((Calendar) cal.clone()).get(Calendar.ERA) == assertEra + : "Calendar underflow"; } - /** Converts {@code value} from a {@link Calendar} or {@link Date} to a {@link Shape}. Other arguments - * result in a {@link java.lang.IllegalArgumentException}. - * If a Calendar is passed in, there might be problems if it is not created via {@link #newCal()}. + /** + * Converts {@code value} from a {@link Calendar} or {@link Date} to a {@link Shape}. Other + * arguments result in a {@link java.lang.IllegalArgumentException}. If a Calendar is passed in, + * there might be problems if it is not created via {@link #newCal()}. */ @Override public UnitNRShape toUnitShape(Object value) { @@ -272,58 +304,61 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { return toShape((Calendar) value); } else if (value instanceof Date) { Calendar cal = newCal(); - cal.setTime((Date)value); + cal.setTime((Date) value); return toShape(cal); } - throw new IllegalArgumentException("Expecting Calendar or Date but got: "+value.getClass()); + throw new IllegalArgumentException("Expecting Calendar or Date but got: " + value.getClass()); } - /** Converts the Calendar into a Shape. - * The isSet() state of the Calendar is re-instated when done. + /** + * Converts the Calendar into a Shape. The isSet() state of the Calendar is re-instated when done. * If a Calendar is passed in, there might be problems if it is not created via {@link #newCal()}. */ public UnitNRShape toShape(Calendar cal) { // Convert a Calendar into a stack of cell numbers - final int calPrecField = getCalPrecisionField(cal);//must call first; getters set all fields + final int calPrecField = getCalPrecisionField(cal); // must call first; getters set all fields try { - int[] valStack = new int[maxLevels];//starts at level 1, not 0 + int[] valStack = new int[maxLevels]; // starts at level 1, not 0 int len = 0; - if (calPrecField >= Calendar.YEAR) {//year or better precision + if (calPrecField >= Calendar.YEAR) { // year or better precision int year = cal.get(Calendar.YEAR); int yearAdj = cal.get(Calendar.ERA) == 0 ? AD_YEAR_BASE - (year - 1) : AD_YEAR_BASE + year; valStack[len++] = yearAdj / 1000_000; - yearAdj -= valStack[len-1] * 1000_000; + yearAdj -= valStack[len - 1] * 1000_000; valStack[len++] = yearAdj / 1000; - yearAdj -= valStack[len-1] * 1000; + yearAdj -= valStack[len - 1] * 1000; valStack[len++] = yearAdj; - for (int level = YEAR_LEVEL +1; level < FIELD_BY_LEVEL.length; level++) { + for (int level = YEAR_LEVEL + 1; level < FIELD_BY_LEVEL.length; level++) { int field = FIELD_BY_LEVEL[level]; - if (field > calPrecField) - break; + if (field > calPrecField) break; valStack[len++] = cal.get(field) - cal.getActualMinimum(field); } } return toShape(valStack, len); } finally { - clearFieldsAfter(cal, calPrecField);//restore precision state modified by get() + clearFieldsAfter(cal, calPrecField); // restore precision state modified by get() } } - /** Calls {@link #toCalendar(org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape)}. */ + /** + * Calls {@link + * #toCalendar(org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape)}. + */ @Override public Object toObject(UnitNRShape shape) { return toCalendar(shape); } - /** Converts the {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape} shape to a - * corresponding Calendar that is cleared below its level. */ + /** + * Converts the {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape} + * shape to a corresponding Calendar that is cleared below its level. + */ public Calendar toCalendar(UnitNRShape lv) { - if (lv.getLevel() == 0) - return newCal(); - if (comparePrefix(lv, minLV) <= 0) {//shouldn't typically happen; sometimes in a debugger - return (Calendar) MINCAL.clone();//full precision; truncation would cause underflow + if (lv.getLevel() == 0) return newCal(); + if (comparePrefix(lv, minLV) <= 0) { // shouldn't typically happen; sometimes in a debugger + return (Calendar) MINCAL.clone(); // full precision; truncation would cause underflow } assert comparePrefix(lv, maxLV) <= 0; Calendar cal = newCal(); @@ -337,16 +372,17 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { } if (yearAdj > AD_YEAR_BASE) { cal.set(Calendar.ERA, 1); - cal.set(Calendar.YEAR, yearAdj - AD_YEAR_BASE);//setting the year resets the era + cal.set(Calendar.YEAR, yearAdj - AD_YEAR_BASE); // setting the year resets the era } else { - cal.set(Calendar.ERA, 0);//we assert this "sticks" at the end + cal.set(Calendar.ERA, 0); // we assert this "sticks" at the end cal.set(Calendar.YEAR, (AD_YEAR_BASE - yearAdj) + 1); } for (int level = YEAR_LEVEL + 1; level <= lv.getLevel(); level++) { int field = FIELD_BY_LEVEL[level]; cal.set(field, lv.getValAtLevel(level) + cal.getActualMinimum(field)); } - assert yearAdj > AD_YEAR_BASE || ((Calendar)cal.clone()).get(Calendar.ERA) == 0 : "ERA / YEAR underflow"; + assert yearAdj > AD_YEAR_BASE || ((Calendar) cal.clone()).get(Calendar.ERA) == 0 + : "ERA / YEAR underflow"; return cal; } @@ -355,17 +391,17 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { return toString(toCalendar(lv)); } - /** Calendar utility method consistent with {@link java.time.format.DateTimeFormatter#ISO_INSTANT} except - * has no trailing 'Z', and will be truncated to the units given according to - * {@link Calendar#isSet(int)}. - * A fully cleared calendar will yield the string "*". - * The isSet() state of the Calendar is re-instated when done. */ + /** + * Calendar utility method consistent with {@link java.time.format.DateTimeFormatter#ISO_INSTANT} + * except has no trailing 'Z', and will be truncated to the units given according to {@link + * Calendar#isSet(int)}. A fully cleared calendar will yield the string "*". The isSet() state of + * the Calendar is re-instated when done. + */ public String toString(Calendar cal) { - final int calPrecField = getCalPrecisionField(cal);//must call first; getters set all fields - if (calPrecField == -1) - return "*"; + final int calPrecField = getCalPrecisionField(cal); // must call first; getters set all fields + if (calPrecField == -1) return "*"; try { - StringBuilder builder = new StringBuilder("yyyy-MM-dd'T'HH:mm:ss.SSS".length());//typical + StringBuilder builder = new StringBuilder("yyyy-MM-dd'T'HH:mm:ss.SSS".length()); // typical int year = cal.get(Calendar.YEAR); // within the era (thus always positve). >= 1. if (cal.get(Calendar.ERA) == 0) { // BC year -= 1; // 1BC should be "0000", so shift by one @@ -396,14 +432,15 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { builder.append(':'); appendPadded(builder, cal.get(Calendar.SECOND), (short) 2); } - if (calPrecField >= Calendar.MILLISECOND && cal.get(Calendar.MILLISECOND) > 0) { // only if non-zero + if (calPrecField >= Calendar.MILLISECOND + && cal.get(Calendar.MILLISECOND) > 0) { // only if non-zero builder.append('.'); - appendPadded(builder, cal.get(Calendar.MILLISECOND), (short) 3); + appendPadded(builder, cal.get(Calendar.MILLISECOND), (short) 3); } return builder.toString(); } finally { - clearFieldsAfter(cal, calPrecField);//restore precision state modified by get() + clearFieldsAfter(cal, calPrecField); // restore precision state modified by get() } } @@ -431,105 +468,99 @@ public class DateRangePrefixTree extends NumberRangePrefixTree { return toShape(parseCalendar(str)); } - /** Calendar utility method: - * The reverse of {@link #toString(java.util.Calendar)}. It will only set the fields found, leaving - * the remainder in an un-set state. A leading '-' or '+' is optional (positive assumed), and a - * trailing 'Z' is also optional. + /** + * Calendar utility method: The reverse of {@link #toString(java.util.Calendar)}. It will only set + * the fields found, leaving the remainder in an un-set state. A leading '-' or '+' is optional + * (positive assumed), and a trailing 'Z' is also optional. + * * @param str not null and not empty * @return not null */ public Calendar parseCalendar(String str) throws ParseException { // example: +2014-10-23T21:22:33.159Z - if (str == null || str.isEmpty()) - throw new IllegalArgumentException("str is null or blank"); + if (str == null || str.isEmpty()) throw new IllegalArgumentException("str is null or blank"); Calendar cal = newCal(); - if (str.equals("*")) - return cal; - int offset = 0;//a pointer + if (str.equals("*")) return cal; + int offset = 0; // a pointer int parsedVal = 0; try { - //year & era: - int lastOffset = str.charAt(str.length()-1) == 'Z' ? str.length() - 1 : str.length(); - int hyphenIdx = str.indexOf('-', 1);//look past possible leading hyphen - if (hyphenIdx < 0) - hyphenIdx = lastOffset; + // year & era: + int lastOffset = str.charAt(str.length() - 1) == 'Z' ? str.length() - 1 : str.length(); + int hyphenIdx = str.indexOf('-', 1); // look past possible leading hyphen + if (hyphenIdx < 0) hyphenIdx = lastOffset; int year = Integer.parseInt(str.substring(offset, hyphenIdx)); cal.set(Calendar.ERA, year <= 0 ? 0 : 1); - cal.set(Calendar.YEAR, year <= 0 ? -1*year + 1 : year); + cal.set(Calendar.YEAR, year <= 0 ? -1 * year + 1 : year); offset = hyphenIdx + 1; - if (lastOffset < offset) - return cal; + if (lastOffset < offset) return cal; - //NOTE: We aren't validating separator chars, and we unintentionally accept leading +/-. + // NOTE: We aren't validating separator chars, and we unintentionally accept leading +/-. // The str.substring()'s hopefully get optimized to be stack-allocated. - //month: - parsedVal = parseAndCheck( str, offset, 1, 12); - cal.set(Calendar.MONTH, parsedVal - 1);//starts at 0 + // month: + parsedVal = parseAndCheck(str, offset, 1, 12); + cal.set(Calendar.MONTH, parsedVal - 1); // starts at 0 offset += 3; - if (lastOffset < offset) - return cal; - //day: - checkDelimeter(str, offset-1, '-'); + if (lastOffset < offset) return cal; + // day: + checkDelimeter(str, offset - 1, '-'); - parsedVal = parseAndCheck( str, offset, 1, 31); + parsedVal = parseAndCheck(str, offset, 1, 31); cal.set(Calendar.DAY_OF_MONTH, parsedVal); offset += 3; - if (lastOffset < offset) - return cal; - checkDelimeter(str, offset-1, 'T'); - //hour: + if (lastOffset < offset) return cal; + checkDelimeter(str, offset - 1, 'T'); + // hour: - parsedVal = parseAndCheck( str, offset, 0, 24); + parsedVal = parseAndCheck(str, offset, 0, 24); cal.set(Calendar.HOUR_OF_DAY, parsedVal); offset += 3; - if (lastOffset < offset) - return cal; - checkDelimeter(str, offset-1, ':'); - //minute: + if (lastOffset < offset) return cal; + checkDelimeter(str, offset - 1, ':'); + // minute: - parsedVal = parseAndCheck( str, offset, 0, 59); + parsedVal = parseAndCheck(str, offset, 0, 59); cal.set(Calendar.MINUTE, parsedVal); offset += 3; - if (lastOffset < offset) - return cal; - checkDelimeter(str, offset-1, ':'); - //second: + if (lastOffset < offset) return cal; + checkDelimeter(str, offset - 1, ':'); + // second: - parsedVal = parseAndCheck( str, offset, 0, 59); + parsedVal = parseAndCheck(str, offset, 0, 59); cal.set(Calendar.SECOND, parsedVal); offset += 3; - if (lastOffset < offset) - return cal; - checkDelimeter(str, offset-1, '.'); - //ms: + if (lastOffset < offset) return cal; + checkDelimeter(str, offset - 1, '.'); + // ms: int maxOffset = lastOffset - offset; // assume remaining is all digits to compute milliseconds // we truncate off > millisecond precision (3 digits only) - int millis = (int) (Integer.parseInt(str.substring(offset, offset + maxOffset)) / Math.pow(10, maxOffset - 3)); + int millis = + (int) + (Integer.parseInt(str.substring(offset, offset + maxOffset)) + / Math.pow(10, maxOffset - 3)); cal.set(Calendar.MILLISECOND, millis); return cal; } catch (Exception e) { - ParseException pe = new ParseException("Improperly formatted datetime: "+str, offset); + ParseException pe = new ParseException("Improperly formatted datetime: " + str, offset); pe.initCause(e); throw pe; } } - private void checkDelimeter(String str, int offset, char delim) { + private void checkDelimeter(String str, int offset, char delim) { if (str.charAt(offset) != delim) { - throw new IllegalArgumentException("Invalid delimeter: '"+str.charAt(offset)+ - "', expecting '"+delim+"'"); + throw new IllegalArgumentException( + "Invalid delimeter: '" + str.charAt(offset) + "', expecting '" + delim + "'"); } } - private int parseAndCheck(String str, int offset, int min, int max) { - int val = Integer.parseInt(str.substring(offset, offset+2)); - if (val < min || val > max) { - throw new IllegalArgumentException("Invalid value: "+val+"," + - " expecting from "+min+" to "+max+"]"); + private int parseAndCheck(String str, int offset, int min, int max) { + int val = Integer.parseInt(str.substring(offset, offset + 2)); + if (val < min || val > max) { + throw new IllegalArgumentException( + "Invalid value: " + val + "," + " expecting from " + min + " to " + max + "]"); } return val; } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/FilterCellIterator.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/FilterCellIterator.java index ef170071876..00991573a22 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/FilterCellIterator.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/FilterCellIterator.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.spatial.prefix.tree; +import java.util.Iterator; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; -import java.util.Iterator; - /** - * A filtering iterator of Cells. Those not matching the provided shape (disjoint) are - * skipped. If {@code shapeFilter} is null then all cells are returned. + * A filtering iterator of Cells. Those not matching the provided shape (disjoint) are skipped. If + * {@code shapeFilter} is null then all cells are returned. * * @lucene.internal */ @@ -39,8 +38,8 @@ class FilterCellIterator extends CellIterator { @Override public boolean hasNext() { thisCell = null; - if (nextCell != null)//calling hasNext twice in a row - return true; + if (nextCell != null) // calling hasNext twice in a row + return true; while (baseIter.hasNext()) { nextCell = baseIter.next(); if (shapeFilter == null) { @@ -49,13 +48,11 @@ class FilterCellIterator extends CellIterator { SpatialRelation rel = nextCell.getShape().relate(shapeFilter); if (rel.intersects()) { nextCell.setShapeRel(rel); - if (rel == SpatialRelation.WITHIN) - nextCell.setLeaf(); + if (rel == SpatialRelation.WITHIN) nextCell.setLeaf(); return true; } } } return false; } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java index 237d26a5f9f..a95d17d8370 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java @@ -19,26 +19,23 @@ package org.apache.lucene.spatial.prefix.tree; import java.util.ArrayList; import java.util.Collection; import java.util.List; - +import org.apache.lucene.util.BytesRef; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.io.GeohashUtils; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; -import org.apache.lucene.util.BytesRef; /** - * A {@link SpatialPrefixTree} based on - * Geohashes. - * Uses {@link GeohashUtils} to do all the geohash work. + * A {@link SpatialPrefixTree} based on Geohashes. Uses {@link GeohashUtils} to do all + * the geohash work. * * @lucene.experimental */ public class GeohashPrefixTree extends LegacyPrefixTree { - /** - * Factory for creating {@link GeohashPrefixTree} instances with useful defaults - */ + /** Factory for creating {@link GeohashPrefixTree} instances with useful defaults */ public static class Factory extends SpatialPrefixTreeFactory { @Override @@ -49,8 +46,8 @@ public class GeohashPrefixTree extends LegacyPrefixTree { @Override protected SpatialPrefixTree newSPT() { - return new GeohashPrefixTree(ctx, - maxLevels != null ? maxLevels : GeohashPrefixTree.getMaxLevelsPossible()); + return new GeohashPrefixTree( + ctx, maxLevels != null ? maxLevels : GeohashPrefixTree.getMaxLevelsPossible()); } } @@ -58,10 +55,11 @@ public class GeohashPrefixTree extends LegacyPrefixTree { super(ctx, maxLevels); Rectangle bounds = ctx.getWorldBounds(); if (bounds.getMinX() != -180) - throw new IllegalArgumentException("Geohash only supports lat-lon world bounds. Got "+bounds); + throw new IllegalArgumentException( + "Geohash only supports lat-lon world bounds. Got " + bounds); int MAXP = getMaxLevelsPossible(); if (maxLevels <= 0 || maxLevels > MAXP) - throw new IllegalArgumentException("maxLevels must be [1-"+MAXP+"] but got "+ maxLevels); + throw new IllegalArgumentException("maxLevels must be [1-" + MAXP + "] but got " + maxLevels); } /** Any more than this and there's no point (double lat and lon are the same). */ @@ -76,19 +74,19 @@ public class GeohashPrefixTree extends LegacyPrefixTree { @Override public int getLevelForDistance(double dist) { - if (dist == 0) - return maxLevels;//short circuit + if (dist == 0) return maxLevels; // short circuit final int level = GeohashUtils.lookupHashLenForWidthHeight(dist, dist); return Math.max(Math.min(level, maxLevels), 1); } @Override protected Cell getCell(Point p, int level) { - return new GhCell(GeohashUtils.encodeLatLon(p.getY(), p.getX(), level));//args are lat,lon (y,x) + return new GhCell( + GeohashUtils.encodeLatLon(p.getY(), p.getX(), level)); // args are lat,lon (y,x) } private static byte[] stringToBytesPlus1(String token) { - //copy ASCII token to byte array with one extra spot for eventual LEAF_BYTE if needed + // copy ASCII token to byte array with one extra spot for eventual LEAF_BYTE if needed byte[] bytes = new byte[token.length() + 1]; for (int i = 0; i < token.length(); i++) { bytes[i] = (byte) token.charAt(i); @@ -98,13 +96,14 @@ public class GeohashPrefixTree extends LegacyPrefixTree { private class GhCell extends LegacyCell { - private String geohash;//cache; never has leaf byte, simply a geohash + private String geohash; // cache; never has leaf byte, simply a geohash GhCell(String geohash) { super(stringToBytesPlus1(geohash), 0, geohash.length()); this.geohash = geohash; - if (isLeaf() && getLevel() < getMaxLevels())//we don't have a leaf byte at max levels (an opt) - this.geohash = geohash.substring(0, geohash.length() - 1); + if (isLeaf() + && getLevel() < getMaxLevels()) // we don't have a leaf byte at max levels (an opt) + this.geohash = geohash.substring(0, geohash.length() - 1); } GhCell(byte[] bytes, int off, int len) { @@ -112,10 +111,14 @@ public class GeohashPrefixTree extends LegacyPrefixTree { } @Override - protected GeohashPrefixTree getGrid() { return GeohashPrefixTree.this; } + protected GeohashPrefixTree getGrid() { + return GeohashPrefixTree.this; + } @Override - protected int getMaxLevels() { return maxLevels; } + protected int getMaxLevels() { + return maxLevels; + } @Override protected void readCell(BytesRef bytesRef) { @@ -125,7 +128,7 @@ public class GeohashPrefixTree extends LegacyPrefixTree { @Override public Collection getSubCells() { - String[] hashes = GeohashUtils.getSubGeohashes(getGeohash());//sorted + String[] hashes = GeohashUtils.getSubGeohashes(getGeohash()); // sorted List cells = new ArrayList<>(hashes.length); for (String hash : hashes) { cells.add(new GhCell(hash)); @@ -135,12 +138,12 @@ public class GeohashPrefixTree extends LegacyPrefixTree { @Override public int getSubCellsSize() { - return 32;//8x4 + return 32; // 8x4 } @Override protected GhCell getSubCell(Point p) { - return (GhCell) getGrid().getCell(p, getLevel() + 1);//not performant! + return (GhCell) getGrid().getCell(p, getLevel() + 1); // not performant! } @Override @@ -152,11 +155,8 @@ public class GeohashPrefixTree extends LegacyPrefixTree { } private String getGeohash() { - if (geohash == null) - geohash = getTokenBytesNoLeaf(null).utf8ToString(); + if (geohash == null) geohash = getTokenBytesNoLeaf(null).utf8ToString(); return geohash; } - - }//class GhCell - + } // class GhCell } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java index bcc1557eebc..8ffb2c852f4 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java @@ -17,40 +17,44 @@ package org.apache.lucene.spatial.prefix.tree; import java.util.Collection; - +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.StringHelper; -/** The base for the original two SPT's: Geohash and Quad. Don't subclass this for new SPTs. - * @lucene.internal */ -//public for RPT pruneLeafyBranches code +/** + * The base for the original two SPT's: Geohash and Quad. Don't subclass this for new SPTs. + * + * @lucene.internal + */ +// public for RPT pruneLeafyBranches code public abstract class LegacyCell implements CellCanPrune { // Important: A LegacyCell doesn't share state for getNextLevelCells(), and // LegacySpatialPrefixTree assumes this in its simplify tree logic. - private static final byte LEAF_BYTE = '+';//NOTE: must sort before letters & numbers + private static final byte LEAF_BYTE = '+'; // NOTE: must sort before letters & numbers - //Arguably we could simply use a BytesRef, using an extra Object. - protected byte[] bytes;//generally bigger to potentially hold a leaf + // Arguably we could simply use a BytesRef, using an extra Object. + protected byte[] bytes; // generally bigger to potentially hold a leaf protected int b_off; - protected int b_len;//doesn't reflect leaf; same as getLevel() + protected int b_len; // doesn't reflect leaf; same as getLevel() protected boolean isLeaf; /** - * When set via getSubCells(filter), it is the relationship between this cell - * and the given shape filter. Doesn't participate in shape equality. + * When set via getSubCells(filter), it is the relationship between this cell and the given shape + * filter. Doesn't participate in shape equality. */ protected SpatialRelation shapeRel; - protected Shape shape;//cached + protected Shape shape; // cached - /** Warning: Refers to the same bytes (no copy). If {@link #setLeaf()} is subsequently called then it - * may modify bytes. */ + /** + * Warning: Refers to the same bytes (no copy). If {@link #setLeaf()} is subsequently called then + * it may modify bytes. + */ protected LegacyCell(byte[] bytes, int off, int len) { this.bytes = bytes; this.b_off = off; @@ -69,10 +73,8 @@ public abstract class LegacyCell implements CellCanPrune { protected void readLeafAdjust() { isLeaf = (b_len > 0 && bytes[b_off + b_len - 1] == LEAF_BYTE); - if (isLeaf) - b_len--; - if (getLevel() == getMaxLevels()) - isLeaf = true; + if (isLeaf) b_len--; + if (getLevel() == getMaxLevels()) isLeaf = true; } protected abstract SpatialPrefixTree getGrid(); @@ -102,8 +104,7 @@ public abstract class LegacyCell implements CellCanPrune { @Override public BytesRef getTokenBytesWithLeaf(BytesRef result) { result = getTokenBytesNoLeaf(result); - if (!isLeaf || getLevel() == getMaxLevels()) - return result; + if (!isLeaf || getLevel() == getMaxLevels()) return result; if (result.bytes.length < result.offset + result.length + 1) { assert false : "Not supposed to happen; performance bug"; byte[] copy = new byte[result.length + 1]; @@ -117,8 +118,7 @@ public abstract class LegacyCell implements CellCanPrune { @Override public BytesRef getTokenBytesNoLeaf(BytesRef result) { - if (result == null) - return new BytesRef(bytes, b_off, b_len); + if (result == null) return new BytesRef(bytes, b_off, b_len); result.bytes = bytes; result.offset = b_off; result.length = b_len; @@ -143,17 +143,19 @@ public abstract class LegacyCell implements CellCanPrune { } /** - * Performant implementations are expected to implement this efficiently by - * considering the current cell's boundary. - *

    - * Precondition: Never called when getLevel() == maxLevel. - * Precondition: this.getShape().relate(p) != DISJOINT. + * Performant implementations are expected to implement this efficiently by considering the + * current cell's boundary. + * + *

      + *
    • Precondition: Never called when getLevel() == maxLevel. + *
    • Precondition: this.getShape().relate(p) != DISJOINT. + *
    */ protected abstract LegacyCell getSubCell(Point p); /** - * Gets the cells at the next grid cell level that covers this cell. - * Precondition: Never called when getLevel() == maxLevel. + * Gets the cells at the next grid cell level that covers this cell. Precondition: Never called + * when getLevel() == maxLevel. * * @return A set of cells (no dups), sorted, modifiable, not empty, not null. */ @@ -161,17 +163,25 @@ public abstract class LegacyCell implements CellCanPrune { @Override public boolean isPrefixOf(Cell c) { - //Note: this only works when each level uses a whole number of bytes. - LegacyCell cell = (LegacyCell)c; + // Note: this only works when each level uses a whole number of bytes. + LegacyCell cell = (LegacyCell) c; boolean result = sliceEquals(cell.bytes, cell.b_off, cell.b_len, bytes, b_off, b_len); - assert result == StringHelper.startsWith(c.getTokenBytesNoLeaf(null), getTokenBytesNoLeaf(null)); + assert result + == StringHelper.startsWith(c.getTokenBytesNoLeaf(null), getTokenBytesNoLeaf(null)); return result; } - /** Copied from {@link org.apache.lucene.util.StringHelper#startsWith(BytesRef, BytesRef)} - * which calls this. This is to avoid creating a BytesRef. */ - private static boolean sliceEquals(byte[] sliceToTest_bytes, int sliceToTest_offset, int sliceToTest_length, - byte[] other_bytes, int other_offset, int other_length) { + /** + * Copied from {@link org.apache.lucene.util.StringHelper#startsWith(BytesRef, BytesRef)} which + * calls this. This is to avoid creating a BytesRef. + */ + private static boolean sliceEquals( + byte[] sliceToTest_bytes, + int sliceToTest_offset, + int sliceToTest_length, + byte[] other_bytes, + int other_offset, + int other_length) { if (sliceToTest_length < other_length) { return false; } @@ -194,11 +204,11 @@ public abstract class LegacyCell implements CellCanPrune { return compare(bytes, b_off, b_len, b.bytes, b.b_off, b.b_len); } - /** Copied from {@link BytesRef#compareTo(BytesRef)}. - * This is to avoid creating a BytesRef. */ - protected static int compare(byte[] aBytes, int aUpto, int a_length, byte[] bBytes, int bUpto, int b_length) { + /** Copied from {@link BytesRef#compareTo(BytesRef)}. This is to avoid creating a BytesRef. */ + protected static int compare( + byte[] aBytes, int aUpto, int a_length, byte[] bBytes, int bUpto, int b_length) { final int aStop = aUpto + Math.min(a_length, b_length); - while(aUpto < aStop) { + while (aUpto < aStop) { int aByte = aBytes[aUpto++] & 0xff; int bByte = bBytes[bUpto++] & 0xff; @@ -214,7 +224,7 @@ public abstract class LegacyCell implements CellCanPrune { @Override public boolean equals(Object obj) { - //this method isn't "normally" called; just in asserts/tests + // this method isn't "normally" called; just in asserts/tests if (obj instanceof Cell) { Cell cell = (Cell) obj; return getTokenBytesWithLeaf(null).equals(cell.getTokenBytesWithLeaf(null)); @@ -230,8 +240,7 @@ public abstract class LegacyCell implements CellCanPrune { @Override public String toString() { - //this method isn't "normally" called; just in asserts/tests + // this method isn't "normally" called; just in asserts/tests return getTokenBytesWithLeaf(null).utf8ToString(); } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/LegacyPrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/LegacyPrefixTree.java index 1a3afcc8740..2bba941681c 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/LegacyPrefixTree.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/LegacyPrefixTree.java @@ -17,15 +17,17 @@ package org.apache.lucene.spatial.prefix.tree; import java.util.Arrays; - +import org.apache.lucene.util.BytesRef; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; -import org.apache.lucene.util.BytesRef; -/** The base for the original two SPT's: Geohash and Quad. Don't subclass this for new SPTs. - * @lucene.internal */ +/** + * The base for the original two SPT's: Geohash and Quad. Don't subclass this for new SPTs. + * + * @lucene.internal + */ abstract class LegacyPrefixTree extends SpatialPrefixTree { public LegacyPrefixTree(SpatialContext ctx, int maxLevels) { super(ctx, maxLevels); @@ -34,51 +36,48 @@ abstract class LegacyPrefixTree extends SpatialPrefixTree { public double getDistanceForLevel(int level) { if (level < 1 || level > getMaxLevels()) throw new IllegalArgumentException("Level must be in 1 to maxLevels range"); - //TODO cache for each level + // TODO cache for each level Cell cell = getCell(ctx.getWorldBounds().getCenter(), level); Rectangle bbox = cell.getShape().getBoundingBox(); double width = bbox.getWidth(); double height = bbox.getHeight(); - //Use standard cartesian hypotenuse. For geospatial, this answer is larger + // Use standard cartesian hypotenuse. For geospatial, this answer is larger // than the correct one but it's okay to over-estimate. return Math.sqrt(width * width + height * height); } - /** - * Returns the cell containing point {@code p} at the specified {@code level}. - */ + /** Returns the cell containing point {@code p} at the specified {@code level}. */ protected abstract Cell getCell(Point p, int level); @Override public Cell readCell(BytesRef term, Cell scratch) { LegacyCell cell = (LegacyCell) scratch; - if (cell == null) - cell = (LegacyCell) getWorldCell(); + if (cell == null) cell = (LegacyCell) getWorldCell(); cell.readCell(term); return cell; } @Override public CellIterator getTreeCellIterator(Shape shape, int detailLevel) { - if (!(shape instanceof Point)) - return super.getTreeCellIterator(shape, detailLevel); + if (!(shape instanceof Point)) return super.getTreeCellIterator(shape, detailLevel); - //This specialization is here because the legacy implementations don't have a fast implementation of - // cell.getSubCells(point). It's fastest here to encode the full bytes for detailLevel, and create + // This specialization is here because the legacy implementations don't have a fast + // implementation of + // cell.getSubCells(point). It's fastest here to encode the full bytes for detailLevel, and + // create // subcells from the bytesRef in a loop. This avoids an O(N^2) encode, and we have O(N) instead. Cell cell = getCell((Point) shape, detailLevel); assert cell instanceof LegacyCell; BytesRef fullBytes = cell.getTokenBytesNoLeaf(null); - //fill in reverse order to be sorted + // fill in reverse order to be sorted Cell[] cells = new Cell[detailLevel]; for (int i = 1; i < detailLevel; i++) { fullBytes.length = i; Cell parentCell = readCell(fullBytes, null); - cells[i-1] = parentCell; + cells[i - 1] = parentCell; } - cells[detailLevel-1] = cell; - return new FilterCellIterator(Arrays.asList(cells).iterator(), null);//null filter + cells[detailLevel - 1] = cell; + return new FilterCellIterator(Arrays.asList(cells).iterator(), null); // null filter } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/NumberRangePrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/NumberRangePrefixTree.java index 72b689bc018..32e08b807ea 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/NumberRangePrefixTree.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/NumberRangePrefixTree.java @@ -17,7 +17,8 @@ package org.apache.lucene.spatial.prefix.tree; import java.text.ParseException; - +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.context.SpatialContextFactory; import org.locationtech.spatial4j.shape.Point; @@ -25,29 +26,32 @@ import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; import org.locationtech.spatial4j.shape.impl.RectangleImpl; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.StringHelper; /** - * A SpatialPrefixTree for single-dimensional numbers and number ranges of fixed precision values (not floating point). - * Despite its name, the indexed values (and queries) need not actually be ranges, they can be unit instance/values. - *

    - * Why might you use this instead of Lucene's built-in integer/long support? Here are some reasons with features based - * on code in this class, or are possible based on this class but require a subclass to fully realize it. + * A SpatialPrefixTree for single-dimensional numbers and number ranges of fixed precision values + * (not floating point). Despite its name, the indexed values (and queries) need not actually be + * ranges, they can be unit instance/values. + * + *

    Why might you use this instead of Lucene's built-in integer/long support? Here are some + * reasons with features based on code in this class, or are possible based on this class but + * require a subclass to fully realize it. + * *

      - *
    • Index ranges, not just unit instances. This is especially useful when the requirement calls for a - * multi-valued range.
    • - *
    • Instead of a fixed "precisionStep", this prefixTree can have a customizable number of child values for any - * prefix (up to 32768). This allows exact alignment of the prefix-tree with typical/expected values, which - * results in better performance. For example in a Date implementation, every month can get its own dedicated prefix, - * every day, etc., even though months vary in duration.
    • - *
    • Arbitrary precision, like {@link java.math.BigDecimal}.
    • - *
    • Standard Lucene integer/long indexing always indexes the full precision of those data types but this one - * is customizable.
    • + *
    • Index ranges, not just unit instances. This is especially useful when the requirement calls + * for a multi-valued range. + *
    • Instead of a fixed "precisionStep", this prefixTree can have a customizable number of child + * values for any prefix (up to 32768). This allows exact alignment of the prefix-tree with + * typical/expected values, which results in better performance. For example in a Date + * implementation, every month can get its own dedicated prefix, every day, etc., even though + * months vary in duration. + *
    • Arbitrary precision, like {@link java.math.BigDecimal}. + *
    • Standard Lucene integer/long indexing always indexes the full precision of those data types + * but this one is customizable. *
    * - * Unlike "normal" spatial components in this module, this special-purpose one only works with {@link Shape}s - * created by the methods on this class, not from any {@link org.locationtech.spatial4j.context.SpatialContext}. + * Unlike "normal" spatial components in this module, this special-purpose one only works with + * {@link Shape}s created by the methods on this class, not from any {@link + * org.locationtech.spatial4j.context.SpatialContext}. * * @see org.apache.lucene.spatial.prefix.NumberRangePrefixTreeStrategy * @see LUCENE-5648 @@ -60,23 +64,30 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { // private static final SpatialContext DUMMY_CTX; + static { SpatialContextFactory factory = new SpatialContextFactory(); factory.geo = false; - factory.worldBounds = new RectangleImpl(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, 0L, 0L, null); + factory.worldBounds = + new RectangleImpl(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, 0L, 0L, null); DUMMY_CTX = factory.newSpatialContext(); } - /** Base interface for {@link Shape}s this prefix tree supports. It extends {@link Shape} (Spatial4j) for compatibility - * with the spatial API even though it doesn't intermix with conventional 2D shapes. + /** + * Base interface for {@link Shape}s this prefix tree supports. It extends {@link Shape} + * (Spatial4j) for compatibility with the spatial API even though it doesn't intermix with + * conventional 2D shapes. + * * @lucene.experimental */ public static interface NRShape extends Shape, Cloneable { /** The result should be parseable by {@link #parseShape(String)}. */ abstract String toString(); - /** Returns this shape rounded to the target level. If we are already more course than the level then the shape is - * simply returned. The result may refer to internal state of the argument so you may want to clone it. + /** + * Returns this shape rounded to the target level. If we are already more course than the level + * then the shape is simply returned. The result may refer to internal state of the argument so + * you may want to clone it. */ public NRShape roundToLevel(int targetLevel); } @@ -85,38 +96,46 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { // Factory / Conversions / parsing relating to NRShapes // - /** Converts the value to a unit shape. Doesn't parse strings; see {@link #parseShape(String)} for - * that. This is the reverse of {@link #toObject(org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape)}. */ + /** + * Converts the value to a unit shape. Doesn't parse strings; see {@link #parseShape(String)} for + * that. This is the reverse of {@link + * #toObject(org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape)}. + */ public abstract UnitNRShape toUnitShape(Object value); - /** Returns a shape that represents the continuous range between {@code start} and {@code end}. It will - * be normalized, and so sometimes a {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape} - * will be returned, other times a - * {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.SpanUnitsNRShape} will be. + /** + * Returns a shape that represents the continuous range between {@code start} and {@code end}. It + * will be normalized, and so sometimes a {@link + * org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape} will be returned, + * other times a {@link + * org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.SpanUnitsNRShape} will be. * - * @throws IllegalArgumentException if the arguments are in the wrong order, or if either contains the other (yet they - * aren't equal). + * @throws IllegalArgumentException if the arguments are in the wrong order, or if either contains + * the other (yet they aren't equal). */ public NRShape toRangeShape(UnitNRShape startUnit, UnitNRShape endUnit) { - //note: this normalization/optimization process is actually REQUIRED based on assumptions elsewhere. - //Normalize start & end - startUnit = startUnit.getShapeAtLevel(truncateStartVals(startUnit, 0)); // chops off trailing min-vals (zeroes) + // note: this normalization/optimization process is actually REQUIRED based on assumptions + // elsewhere. + // Normalize start & end + startUnit = + startUnit.getShapeAtLevel( + truncateStartVals(startUnit, 0)); // chops off trailing min-vals (zeroes) endUnit = endUnit.getShapeAtLevel(truncateEndVals(endUnit, 0)); // chops off trailing max-vals - //Optimize to just start or end if it's equivalent, e.g. April to April 1st is April 1st. + // Optimize to just start or end if it's equivalent, e.g. April to April 1st is April 1st. int cmp = comparePrefix(startUnit, endUnit); if (cmp > 0) { - throw new IllegalArgumentException("Wrong order: "+ startUnit +" TO "+ endUnit); + throw new IllegalArgumentException("Wrong order: " + startUnit + " TO " + endUnit); } - if (cmp == 0) {//one is a prefix of the other + if (cmp == 0) { // one is a prefix of the other if (startUnit.getLevel() == endUnit.getLevel()) { - //same + // same return startUnit; } else if (endUnit.getLevel() > startUnit.getLevel()) { // e.g. April to April 1st if (truncateStartVals(endUnit, startUnit.getLevel()) == startUnit.getLevel()) { return endUnit; } - } else {//minLV level > maxLV level + } else { // minLV level > maxLV level // e.g. April 30 to April if (truncateEndVals(startUnit, endUnit.getLevel()) == endUnit.getLevel()) { return startUnit; @@ -126,11 +145,13 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { return new SpanUnitsNRShape(startUnit, endUnit); } - /** From lv.getLevel on up, it returns the first Level seen with val != 0. It doesn't check past endLevel. */ + /** + * From lv.getLevel on up, it returns the first Level seen with val != 0. It doesn't check past + * endLevel. + */ private int truncateStartVals(UnitNRShape lv, int endLevel) { for (int level = lv.getLevel(); level > endLevel; level--) { - if (lv.getValAtLevel(level) != 0) - return level; + if (lv.getValAtLevel(level) != 0) return level; } return endLevel; } @@ -138,18 +159,21 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { private int truncateEndVals(UnitNRShape lv, int endLevel) { for (int level = lv.getLevel(); level > endLevel; level--) { int max = getNumSubCells(lv.getShapeAtLevel(level - 1)) - 1; - if (lv.getValAtLevel(level) != max) - return level; + if (lv.getValAtLevel(level) != max) return level; } return endLevel; } - /** Converts a UnitNRShape shape to the corresponding type supported by this class, such as a Calendar/BigDecimal. - * This is the reverse of {@link #toUnitShape(Object)}. + /** + * Converts a UnitNRShape shape to the corresponding type supported by this class, such as a + * Calendar/BigDecimal. This is the reverse of {@link #toUnitShape(Object)}. */ public abstract Object toObject(UnitNRShape shape); - /** A string representation of the UnitNRShape that is parse-able by {@link #parseUnitShape(String)}. */ + /** + * A string representation of the UnitNRShape that is parse-able by {@link + * #parseUnitShape(String)}. + */ protected abstract String toString(UnitNRShape lv); protected static String toStringUnitRaw(UnitNRShape lv) { @@ -158,16 +182,19 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { for (int level = 1; level <= lv.getLevel(); level++) { buf.append(lv.getValAtLevel(level)).append(','); } - buf.setLength(buf.length()-1);//chop off ',' + buf.setLength(buf.length() - 1); // chop off ',' buf.append(']'); return buf.toString(); } - /** Detects a range pattern and parses it, otherwise it's parsed as one shape via - * {@link #parseUnitShape(String)}. The range pattern looks like this BNF: + /** + * Detects a range pattern and parses it, otherwise it's parsed as one shape via {@link + * #parseUnitShape(String)}. The range pattern looks like this BNF: + * *
        *   '[' + parseShapeLV + ' TO ' + parseShapeLV + ']'
        * 
    + * * It's the same thing as the toString() of the range shape, notwithstanding range optimization. * * @param str not null or empty @@ -175,19 +202,18 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { * @throws java.text.ParseException If there is a problem */ public NRShape parseShape(String str) throws ParseException { - if (str == null || str.isEmpty()) - throw new IllegalArgumentException("str is null or blank"); + if (str == null || str.isEmpty()) throw new IllegalArgumentException("str is null or blank"); if (str.charAt(0) == '[') { - if (str.charAt(str.length()-1) != ']') - throw new ParseException("If starts with [ must end with ]; got "+str, str.length()-1); + if (str.charAt(str.length() - 1) != ']') + throw new ParseException("If starts with [ must end with ]; got " + str, str.length() - 1); int middle = str.indexOf(" TO "); if (middle < 0) - throw new ParseException("If starts with [ must contain ' TO '; got "+str, -1); + throw new ParseException("If starts with [ must contain ' TO '; got " + str, -1); String leftStr = str.substring(1, middle); - String rightStr = str.substring(middle + " TO ".length(), str.length()-1); + String rightStr = str.substring(middle + " TO ".length(), str.length() - 1); return toRangeShape(parseUnitShape(leftStr), parseUnitShape(rightStr)); } else if (str.charAt(0) == '{') { - throw new ParseException("Exclusive ranges not supported; got "+str, 0); + throw new ParseException("Exclusive ranges not supported; got " + str, 0); } else { return parseUnitShape(str); } @@ -196,25 +222,30 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { /** Parse a String to a UnitNRShape. "*" should be the full-range (level 0 shape). */ protected abstract UnitNRShape parseUnitShape(String str) throws ParseException; - // // UnitNRShape // /** - * A unit value Shape implemented as a stack of numbers, one for each level in the prefix tree. It directly - * corresponds to a {@link Cell}. Spatially speaking, it's analogous to a Point but 1D and has some precision width. + * A unit value Shape implemented as a stack of numbers, one for each level in the prefix tree. It + * directly corresponds to a {@link Cell}. Spatially speaking, it's analogous to a Point but 1D + * and has some precision width. + * * @lucene.experimental */ public static interface UnitNRShape extends NRShape, Comparable { - //note: formerly known as LevelledValue; thus some variables still use 'lv' + // note: formerly known as LevelledValue; thus some variables still use 'lv' /** Get the prefix tree level, the higher the more precise. 0 means the world (universe). */ int getLevel(); - /** Gets the value at the specified level of this unit. level must be >= 0 and <= getLevel(). */ + /** + * Gets the value at the specified level of this unit. level must be >= 0 and <= + * getLevel(). + */ int getValAtLevel(int level); /** Gets an ancestor at the specified level. It shares state, so you may want to clone() it. */ UnitNRShape getShapeAtLevel(int level); + @Override UnitNRShape roundToLevel(int targetLevel); @@ -222,45 +253,54 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { UnitNRShape clone(); } - /** Compares a to b, returning less than 0, 0, or greater than 0, if a is less than, equal to, or - * greater than b, respectively, up to their common prefix (i.e. only min(a.levels,b.levels) are compared). - * @lucene.internal */ + /** + * Compares a to b, returning less than 0, 0, or greater than 0, if a is less than, equal to, or + * greater than b, respectively, up to their common prefix (i.e. only min(a.levels,b.levels) are + * compared). + * + * @lucene.internal + */ protected static int comparePrefix(UnitNRShape a, UnitNRShape b) { int minLevel = Math.min(a.getLevel(), b.getLevel()); for (int level = 1; level <= minLevel; level++) { int diff = a.getValAtLevel(level) - b.getValAtLevel(level); - if (diff != 0) - return diff; + if (diff != 0) return diff; } return 0; } - // // SpanUnitsNRShape // - /** A range Shape; based on a pair of {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape}. - * Spatially speaking, it's analogous to a Rectangle but 1D. It might have been named with Range in the name but it - * may be confusing since even the {@link org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape} - * is in some sense a range. - * @lucene.experimental */ + /** + * A range Shape; based on a pair of {@link + * org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape}. Spatially speaking, + * it's analogous to a Rectangle but 1D. It might have been named with Range in the name but it + * may be confusing since even the {@link + * org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape} is in some sense a + * range. + * + * @lucene.experimental + */ public class SpanUnitsNRShape implements NRShape { private final UnitNRShape minLV, maxLV; - private final int lastLevelInCommon;//computed; not part of identity + private final int lastLevelInCommon; // computed; not part of identity - /** Don't call directly; see - * {@link #toRangeShape(org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape, org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape)}. */ + /** + * Don't call directly; see {@link + * #toRangeShape(org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape, + * org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape)}. + */ private SpanUnitsNRShape(UnitNRShape minLV, UnitNRShape maxLV) { this.minLV = minLV; this.maxLV = maxLV; - //calc lastLevelInCommon + // calc lastLevelInCommon int level = 1; for (; level <= minLV.getLevel() && level <= maxLV.getLevel(); level++) { - if (minLV.getValAtLevel(level) != maxLV.getValAtLevel(level)) - break; + if (minLV.getValAtLevel(level) != maxLV.getValAtLevel(level)) break; } lastLevelInCommon = level - 1; } @@ -270,12 +310,18 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { return DUMMY_CTX; } - public UnitNRShape getMinUnit() { return minLV; } + public UnitNRShape getMinUnit() { + return minLV; + } - public UnitNRShape getMaxUnit() { return maxLV; } + public UnitNRShape getMaxUnit() { + return maxLV; + } /** How many levels are in common between minUnit and maxUnit, not including level 0. */ - private int getLevelsInCommon() { return lastLevelInCommon; } + private int getLevelsInCommon() { + return lastLevelInCommon; + } @Override public NRShape roundToLevel(int targetLevel) { @@ -284,49 +330,62 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public SpatialRelation relate(Shape shape) { -// if (shape instanceof UnitNRShape) -// return relate((UnitNRShape)shape); - if (shape instanceof SpanUnitsNRShape) - return relate((SpanUnitsNRShape) shape); - return shape.relate(this).transpose();//probably a UnitNRShape + // if (shape instanceof UnitNRShape) + // return relate((UnitNRShape)shape); + if (shape instanceof SpanUnitsNRShape) return relate((SpanUnitsNRShape) shape); + return shape.relate(this).transpose(); // probably a UnitNRShape } public SpatialRelation relate(SpanUnitsNRShape ext) { - //This logic somewhat mirrors RectangleImpl.relate_range() + // This logic somewhat mirrors RectangleImpl.relate_range() int extMin_intMax = comparePrefix(ext.getMinUnit(), getMaxUnit()); - if (extMin_intMax > 0) - return SpatialRelation.DISJOINT; + if (extMin_intMax > 0) return SpatialRelation.DISJOINT; int extMax_intMin = comparePrefix(ext.getMaxUnit(), getMinUnit()); - if (extMax_intMin < 0) - return SpatialRelation.DISJOINT; + if (extMax_intMin < 0) return SpatialRelation.DISJOINT; int extMin_intMin = comparePrefix(ext.getMinUnit(), getMinUnit()); int extMax_intMax = comparePrefix(ext.getMaxUnit(), getMaxUnit()); - if ((extMin_intMin > 0 || extMin_intMin == 0 && ext.getMinUnit().getLevel() >= getMinUnit().getLevel()) - && (extMax_intMax < 0 || extMax_intMax == 0 && ext.getMaxUnit().getLevel() >= getMaxUnit().getLevel())) + if ((extMin_intMin > 0 + || extMin_intMin == 0 && ext.getMinUnit().getLevel() >= getMinUnit().getLevel()) + && (extMax_intMax < 0 + || extMax_intMax == 0 && ext.getMaxUnit().getLevel() >= getMaxUnit().getLevel())) return SpatialRelation.CONTAINS; - if ((extMin_intMin < 0 || extMin_intMin == 0 && ext.getMinUnit().getLevel() <= getMinUnit().getLevel()) - && (extMax_intMax > 0 || extMax_intMax == 0 && ext.getMaxUnit().getLevel() <= getMaxUnit().getLevel())) + if ((extMin_intMin < 0 + || extMin_intMin == 0 && ext.getMinUnit().getLevel() <= getMinUnit().getLevel()) + && (extMax_intMax > 0 + || extMax_intMax == 0 && ext.getMaxUnit().getLevel() <= getMaxUnit().getLevel())) return SpatialRelation.WITHIN; return SpatialRelation.INTERSECTS; } @Override - public Rectangle getBoundingBox() { throw new UnsupportedOperationException(); } + public Rectangle getBoundingBox() { + throw new UnsupportedOperationException(); + } @Override - public boolean hasArea() { return true; } + public boolean hasArea() { + return true; + } @Override - public double getArea(SpatialContext spatialContext) { throw new UnsupportedOperationException(); } + public double getArea(SpatialContext spatialContext) { + throw new UnsupportedOperationException(); + } @Override - public Point getCenter() { throw new UnsupportedOperationException(); } + public Point getCenter() { + throw new UnsupportedOperationException(); + } @Override - public Shape getBuffered(double v, SpatialContext spatialContext) { throw new UnsupportedOperationException(); } + public Shape getBuffered(double v, SpatialContext spatialContext) { + throw new UnsupportedOperationException(); + } @Override - public boolean isEmpty() { return false; } + public boolean isEmpty() { + return false; + } /** A deep clone. */ @Override @@ -336,8 +395,11 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public String toString() { - return "[" + NumberRangePrefixTree.this.toString(minLV) + " TO " - + NumberRangePrefixTree.this.toString(maxLV) + "]"; + return "[" + + NumberRangePrefixTree.this.toString(minLV) + + " TO " + + NumberRangePrefixTree.this.toString(maxLV) + + "]"; } @Override @@ -359,7 +421,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { result = 31 * result + maxLV.hashCode(); return result; } - }// class SpanUnitsNRShape + } // class SpanUnitsNRShape // // NumberRangePrefixTree @@ -377,33 +439,33 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { // Fill termLenByLevel this.termLenByLevel = new int[maxLevels + 1]; termLenByLevel[0] = 0; - final int MAX_STATES = 1 << 15;//1 bit less than 2 bytes + final int MAX_STATES = 1 << 15; // 1 bit less than 2 bytes for (int level = 1; level <= maxLevels; level++) { final int states = maxSubCellsByLevel[level - 1]; if (states >= MAX_STATES || states <= 1) { - throw new IllegalArgumentException("Max states is "+MAX_STATES+", given "+states+" at level "+level); + throw new IllegalArgumentException( + "Max states is " + MAX_STATES + ", given " + states + " at level " + level); } boolean twoBytes = states >= 256; - termLenByLevel[level] = termLenByLevel[level-1] + (twoBytes ? 2 : 1); + termLenByLevel[level] = termLenByLevel[level - 1] + (twoBytes ? 2 : 1); } - maxTermLen = termLenByLevel[maxLevels] + 1;// + 1 for leaf byte + maxTermLen = termLenByLevel[maxLevels] + 1; // + 1 for leaf byte // Fill levelByTermLen levelByTermLen = new int[maxTermLen]; levelByTermLen[0] = 0; for (int level = 1; level < termLenByLevel.length; level++) { int termLen = termLenByLevel[level]; - int prevTermLen = termLenByLevel[level-1]; - if (termLen - prevTermLen == 2) {//2 byte delta - //if the term doesn't completely cover this cell then it must be a leaf of the prior. - levelByTermLen[termLen-1] = -1;//won't be used; otherwise erroneous + int prevTermLen = termLenByLevel[level - 1]; + if (termLen - prevTermLen == 2) { // 2 byte delta + // if the term doesn't completely cover this cell then it must be a leaf of the prior. + levelByTermLen[termLen - 1] = -1; // won't be used; otherwise erroneous levelByTermLen[termLen] = level; - } else {//1 byte delta + } else { // 1 byte delta assert termLen - prevTermLen == 1; levelByTermLen[termLen] = level; } } - } @Override @@ -413,22 +475,24 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public int getLevelForDistance(double dist) { - //note: it might be useful to compute which level has a raw width (counted in + // note: it might be useful to compute which level has a raw width (counted in // bottom units, e.g. milliseconds), that covers the provided dist in those units? - return maxLevels; // thus always use full precision. We don't do approximations in this tree/strategy. - //throw new UnsupportedOperationException("Not applicable."); + // + // thus always use full precision. We don't do approximations in this tree/strategy. + return maxLevels; + // throw new UnsupportedOperationException("Not applicable."); } @Override public double getDistanceForLevel(int level) { - //note: we could compute this... should we? + // note: we could compute this... should we? throw new UnsupportedOperationException("Not applicable."); } protected UnitNRShape toShape(int[] valStack, int len) { final NRCell[] cellStack = newCellStack(len); for (int i = 0; i < len; i++) { - cellStack[i+1].resetCellWithCellNum(valStack[i]); + cellStack[i + 1].resetCellWithCellNum(valStack[i]); } return cellStack[len]; } @@ -442,34 +506,32 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { final NRCell[] cellsByLevel = new NRCell[levels + 1]; final BytesRef term = new BytesRef(maxTermLen); for (int level = 0; level <= levels; level++) { - cellsByLevel[level] = new NRCell(cellsByLevel,term,level); + cellsByLevel[level] = new NRCell(cellsByLevel, term, level); } return cellsByLevel; } @Override public Cell readCell(BytesRef term, Cell scratch) { - if (scratch == null) - scratch = getWorldCell(); + if (scratch == null) scratch = getWorldCell(); - //We decode level #, leaf boolean, and populate bytes by reference. We don't decode the stack. + // We decode level #, leaf boolean, and populate bytes by reference. We don't decode the stack. - //reverse lookup term length to the level and hence the cell + // reverse lookup term length to the level and hence the cell NRCell[] cellsByLevel = ((NRCell) scratch).cellsByLevel; boolean isLeaf = term.bytes[term.offset + term.length - 1] == 0; int lenNoLeaf = isLeaf ? term.length - 1 : term.length; NRCell result = cellsByLevel[levelByTermLen[lenNoLeaf]]; if (cellsByLevel[0].termBuf == null) - cellsByLevel[0].termBuf = result.term.bytes;//a kluge; see cell.ensureOwnTermBytes() + cellsByLevel[0].termBuf = result.term.bytes; // a kluge; see cell.ensureOwnTermBytes() result.term.bytes = term.bytes; result.term.offset = term.offset; - result.term.length = lenNoLeaf;//technically this isn't used but may help debugging + result.term.length = lenNoLeaf; // technically this isn't used but may help debugging result.reset(); - if (isLeaf) - result.setLeaf(); + if (isLeaf) result.setLeaf(); - result.cellNumber = -1;//lazy decode flag + result.cellNumber = -1; // lazy decode flag return result; } @@ -483,26 +545,29 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { // NRCell // - /** Most of the PrefixTree implementation is in this one class, which is both - * the Cell, the CellIterator, and the Shape to reduce object allocation. It's implemented as a re-used array/stack - * of Cells at adjacent levels, that all have a reference back to the cell array to traverse. They also share a common - * BytesRef for the term. - * @lucene.internal */ + /** + * Most of the PrefixTree implementation is in this one class, which is both the Cell, the + * CellIterator, and the Shape to reduce object allocation. It's implemented as a re-used + * array/stack of Cells at adjacent levels, that all have a reference back to the cell array to + * traverse. They also share a common BytesRef for the term. + * + * @lucene.internal + */ protected class NRCell extends CellIterator implements Cell, UnitNRShape { - //Shared: (TODO put this in a new class) + // Shared: (TODO put this in a new class) final NRCell[] cellsByLevel; - final BytesRef term;//AKA the token - byte[] termBuf;// see ensureOwnTermBytes(), only for cell0 + final BytesRef term; // AKA the token + byte[] termBuf; // see ensureOwnTermBytes(), only for cell0 - //Cell state... + // Cell state... final int cellLevel; // assert levelStack[cellLevel] == this - int cellNumber; //relative to parent cell. It's unused for level 0. Starts at 0. + int cellNumber; // relative to parent cell. It's unused for level 0. Starts at 0. SpatialRelation cellShapeRel; boolean cellIsLeaf; - //CellIterator state is defined further below + // CellIterator state is defined further below NRCell(NRCell[] cellsByLevel, BytesRef term, int cellLevel) { this.cellsByLevel = cellsByLevel; @@ -513,12 +578,13 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { assert cellsByLevel[cellLevel] == null; } - /** Ensure we own term.bytes so that it's safe to modify. We detect via a kluge in which cellsByLevel[0].termBuf - * is non-null, which is a pre-allocated for use to replace term.bytes. */ + /** + * Ensure we own term.bytes so that it's safe to modify. We detect via a kluge in which + * cellsByLevel[0].termBuf is non-null, which is a pre-allocated for use to replace term.bytes. + */ void ensureOwnTermBytes() { NRCell cell0 = cellsByLevel[0]; - if (cell0.termBuf == null) - return;//we already own the bytes + if (cell0.termBuf == null) return; // we already own the bytes System.arraycopy(term.bytes, term.offset, cell0.termBuf, 0, term.length); term.bytes = cell0.termBuf; term.offset = 0; @@ -533,39 +599,38 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { private void resetCellWithCellNum(int cellNumber) { reset(); - //update bytes + // update bytes // note: see lazyInitCellNumsFromBytes() for the reverse - if (cellNumber >= 0) {//valid + if (cellNumber >= 0) { // valid ensureOwnTermBytes(); int termLen = termLenByLevel[getLevel()]; - boolean twoBytes = (termLen - termLenByLevel[getLevel()-1]) > 1; + boolean twoBytes = (termLen - termLenByLevel[getLevel() - 1]) > 1; if (twoBytes) { - //right 7 bits, plus 1 (may overflow to 8th bit which is okay) - term.bytes[termLen-2] = (byte) (cellNumber >> 7); - term.bytes[termLen-1] = (byte) ((cellNumber & 0x7F) + 1); + // right 7 bits, plus 1 (may overflow to 8th bit which is okay) + term.bytes[termLen - 2] = (byte) (cellNumber >> 7); + term.bytes[termLen - 1] = (byte) ((cellNumber & 0x7F) + 1); } else { - term.bytes[termLen-1] = (byte) (cellNumber+1); + term.bytes[termLen - 1] = (byte) (cellNumber + 1); } - assert term.bytes[termLen-1] != 0; + assert term.bytes[termLen - 1] != 0; term.length = termLen; } this.cellNumber = cellNumber; } private void ensureDecoded() { - if (cellNumber >= 0) - return; - //Decode cell numbers from bytes. This is the inverse of resetCellWithCellNum(). + if (cellNumber >= 0) return; + // Decode cell numbers from bytes. This is the inverse of resetCellWithCellNum(). for (int level = 1; level <= getLevel(); level++) { NRCell cell = cellsByLevel[level]; int termLen = termLenByLevel[level]; - boolean twoBytes = (termLen - termLenByLevel[level-1]) > 1; + boolean twoBytes = (termLen - termLenByLevel[level - 1]) > 1; if (twoBytes) { int byteH = (term.bytes[term.offset + termLen - 2] & 0xFF); int byteL = (term.bytes[term.offset + termLen - 1] & 0xFF); - assert byteL - 1 < (1<<7); - cell.cellNumber = (byteH << 7) + (byteL-1); - assert cell.cellNumber < 1<<15; + assert byteL - 1 < (1 << 7); + cell.cellNumber = (byteH << 7) + (byteL - 1); + assert cell.cellNumber < 1 << 15; } else { cell.cellNumber = (term.bytes[term.offset + termLen - 1] & 0xFF) - 1; assert cell.cellNumber < 255; @@ -611,8 +676,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public BytesRef getTokenBytesNoLeaf(BytesRef result) { - if (result == null) - result = new BytesRef(); + if (result == null) result = new BytesRef(); result.bytes = term.bytes; result.offset = term.offset; result.length = termLenByLevel[cellLevel]; @@ -622,7 +686,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public BytesRef getTokenBytesWithLeaf(BytesRef result) { - ensureOwnTermBytes();//normally shouldn't do anything + ensureOwnTermBytes(); // normally shouldn't do anything result = getTokenBytesNoLeaf(result); if (isLeaf()) { result.bytes[result.length++] = 0; @@ -634,7 +698,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { public boolean isPrefixOf(Cell c) { NRCell otherCell = (NRCell) c; assert term != otherCell.term; - //trick to re-use bytesref; provided that we re-instate it + // trick to re-use bytesref; provided that we re-instate it int myLastLen = term.length; term.length = termLenByLevel[getLevel()]; int otherLastLen = otherCell.term.length; @@ -649,7 +713,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { public int compareToNoLeaf(Cell fromCell) { final NRCell nrCell = (NRCell) fromCell; assert term != nrCell.term; - //trick to re-use bytesref; provided that we re-instate it + // trick to re-use bytesref; provided that we re-instate it int myLastLen = term.length; int otherLastLen = nrCell.term.length; term.length = termLenByLevel[getLevel()]; @@ -668,9 +732,9 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { return subCell; } - //----------- CellIterator + // ----------- CellIterator - Shape iterFilter;//UnitNRShape or NRShape + Shape iterFilter; // UnitNRShape or NRShape boolean iterFirstIsIntersects; boolean iterLastIsIntersects; int iterFirstCellNumber; @@ -679,14 +743,14 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { private void initIter(Shape filter) { cellNumber = -1; if (filter instanceof UnitNRShape && ((UnitNRShape) filter).getLevel() == 0) - filter = null;//world means everything -- no filter + filter = null; // world means everything -- no filter iterFilter = filter; NRCell parent = getShapeAtLevel(getLevel() - 1); // Initialize iter* members. - //no filter means all subcells + // no filter means all subcells if (filter == null) { iterFirstCellNumber = 0; iterFirstIsIntersects = false; @@ -697,7 +761,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { final UnitNRShape minLV; final UnitNRShape maxLV; - final int lastLevelInCommon;//between minLV & maxLV + final int lastLevelInCommon; // between minLV & maxLV if (filter instanceof SpanUnitsNRShape) { SpanUnitsNRShape spanShape = (SpanUnitsNRShape) iterFilter; minLV = spanShape.getMinUnit(); @@ -709,11 +773,13 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { lastLevelInCommon = minLV.getLevel(); } - //fast path optimization that is usually true, but never first level - if (iterFilter == parent.iterFilter && - (getLevel() <= lastLevelInCommon || parent.iterFirstCellNumber != parent.iterLastCellNumber)) { - //TODO benchmark if this optimization pays off. We avoid two comparePrefixLV calls. - if (parent.iterFirstIsIntersects && parent.cellNumber == parent.iterFirstCellNumber + // fast path optimization that is usually true, but never first level + if (iterFilter == parent.iterFilter + && (getLevel() <= lastLevelInCommon + || parent.iterFirstCellNumber != parent.iterLastCellNumber)) { + // TODO benchmark if this optimization pays off. We avoid two comparePrefixLV calls. + if (parent.iterFirstIsIntersects + && parent.cellNumber == parent.iterFirstCellNumber && minLV.getLevel() >= getLevel()) { iterFirstCellNumber = minLV.getValAtLevel(getLevel()); iterFirstIsIntersects = (minLV.getLevel() > getLevel()); @@ -721,7 +787,8 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { iterFirstCellNumber = 0; iterFirstIsIntersects = false; } - if (parent.iterLastIsIntersects && parent.cellNumber == parent.iterLastCellNumber + if (parent.iterLastIsIntersects + && parent.cellNumber == parent.iterLastCellNumber && maxLV.getLevel() >= getLevel()) { iterLastCellNumber = maxLV.getValAtLevel(getLevel()); iterLastIsIntersects = (maxLV.getLevel() > getLevel()); @@ -730,34 +797,32 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { iterLastIsIntersects = false; } if (iterFirstCellNumber == iterLastCellNumber) { - if (iterLastIsIntersects) - iterFirstIsIntersects = true; - else if (iterFirstIsIntersects) - iterLastIsIntersects = true; + if (iterLastIsIntersects) iterFirstIsIntersects = true; + else if (iterFirstIsIntersects) iterLastIsIntersects = true; } return; } - //not common to get here, except for level 1 which always happens + // not common to get here, except for level 1 which always happens int startCmp = comparePrefix(minLV, parent); - if (startCmp > 0) {//start comes after this cell + if (startCmp > 0) { // start comes after this cell iterFirstCellNumber = 0; iterFirstIsIntersects = false; - iterLastCellNumber = -1;//so ends early (no cells) + iterLastCellNumber = -1; // so ends early (no cells) iterLastIsIntersects = false; return; } - int endCmp = comparePrefix(maxLV, parent);//compare to end cell - if (endCmp < 0) {//end comes before this cell + int endCmp = comparePrefix(maxLV, parent); // compare to end cell + if (endCmp < 0) { // end comes before this cell iterFirstCellNumber = 0; iterFirstIsIntersects = false; - iterLastCellNumber = -1;//so ends early (no cells) + iterLastCellNumber = -1; // so ends early (no cells) iterLastIsIntersects = false; return; } if (startCmp < 0 || minLV.getLevel() < getLevel()) { - //start comes before... + // start comes before... iterFirstCellNumber = 0; iterFirstIsIntersects = false; } else { @@ -765,7 +830,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { iterFirstIsIntersects = (minLV.getLevel() > getLevel()); } if (endCmp > 0 || maxLV.getLevel() < getLevel()) { - //end comes after... + // end comes after... iterLastCellNumber = getNumSubCells(parent) - 1; iterLastIsIntersects = false; } else { @@ -773,21 +838,18 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { iterLastIsIntersects = (maxLV.getLevel() > getLevel()); } if (iterFirstCellNumber == iterLastCellNumber) { - if (iterLastIsIntersects) - iterFirstIsIntersects = true; - else if (iterFirstIsIntersects) - iterLastIsIntersects = true; + if (iterLastIsIntersects) iterFirstIsIntersects = true; + else if (iterFirstIsIntersects) iterLastIsIntersects = true; } } @Override public boolean hasNext() { thisCell = null; - if (nextCell != null)//calling hasNext twice in a row - return true; + if (nextCell != null) // calling hasNext twice in a row + return true; - if (cellNumber >= iterLastCellNumber) - return false; + if (cellNumber >= iterLastCellNumber) return false; resetCellWithCellNum(cellNumber < iterFirstCellNumber ? iterFirstCellNumber : cellNumber + 1); @@ -808,14 +870,14 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { return true; } - //TODO override nextFrom to be more efficient + // TODO override nextFrom to be more efficient - //----------- UnitNRShape + // ----------- UnitNRShape @Override public int getValAtLevel(int level) { final int result = cellsByLevel[level].cellNumber; - assert result >= 0;//initialized (decoded) + assert result >= 0; // initialized (decoded) return result; } @@ -837,50 +899,43 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public SpatialRelation relate(Shape shape) { assertDecoded(); - if (shape == iterFilter && cellShapeRel != null) - return cellShapeRel; - if (shape instanceof UnitNRShape) - return relate((UnitNRShape)shape); - if (shape instanceof SpanUnitsNRShape) - return relate((SpanUnitsNRShape)shape); + if (shape == iterFilter && cellShapeRel != null) return cellShapeRel; + if (shape instanceof UnitNRShape) return relate((UnitNRShape) shape); + if (shape instanceof SpanUnitsNRShape) return relate((SpanUnitsNRShape) shape); return shape.relate(this).transpose(); } public SpatialRelation relate(UnitNRShape lv) { assertDecoded(); int cmp = comparePrefix(this, lv); - if (cmp != 0) - return SpatialRelation.DISJOINT; - if (getLevel() > lv.getLevel()) - return SpatialRelation.WITHIN; - return SpatialRelation.CONTAINS;//or equals - //no INTERSECTS; that won't happen. + if (cmp != 0) return SpatialRelation.DISJOINT; + if (getLevel() > lv.getLevel()) return SpatialRelation.WITHIN; + return SpatialRelation.CONTAINS; // or equals + // no INTERSECTS; that won't happen. } public SpatialRelation relate(SpanUnitsNRShape spanShape) { assertDecoded(); int startCmp = comparePrefix(spanShape.getMinUnit(), this); - if (startCmp > 0) {//start comes after this cell + if (startCmp > 0) { // start comes after this cell return SpatialRelation.DISJOINT; } int endCmp = comparePrefix(spanShape.getMaxUnit(), this); - if (endCmp < 0) {//end comes before this cell + if (endCmp < 0) { // end comes before this cell return SpatialRelation.DISJOINT; } int nrMinLevel = spanShape.getMinUnit().getLevel(); int nrMaxLevel = spanShape.getMaxUnit().getLevel(); if ((startCmp < 0 || startCmp == 0 && nrMinLevel <= getLevel()) && (endCmp > 0 || endCmp == 0 && nrMaxLevel <= getLevel())) - return SpatialRelation.WITHIN;//or equals - //At this point it's Contains or Within. - if (startCmp != 0 || endCmp != 0) - return SpatialRelation.INTERSECTS; - //if min or max Level is less, it might be on the equivalent edge. - for (;nrMinLevel < getLevel(); nrMinLevel++) { - if (getValAtLevel(nrMinLevel + 1) != 0) - return SpatialRelation.INTERSECTS; + return SpatialRelation.WITHIN; // or equals + // At this point it's Contains or Within. + if (startCmp != 0 || endCmp != 0) return SpatialRelation.INTERSECTS; + // if min or max Level is less, it might be on the equivalent edge. + for (; nrMinLevel < getLevel(); nrMinLevel++) { + if (getValAtLevel(nrMinLevel + 1) != 0) return SpatialRelation.INTERSECTS; } - for (;nrMaxLevel < getLevel(); nrMaxLevel++) { + for (; nrMaxLevel < getLevel(); nrMaxLevel++) { if (getValAtLevel(nrMaxLevel + 1) != getNumSubCells(getShapeAtLevel(nrMaxLevel)) - 1) return SpatialRelation.INTERSECTS; } @@ -889,7 +944,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public UnitNRShape clone() { - //no leaf distinction; this is purely based on UnitNRShape + // no leaf distinction; this is purely based on UnitNRShape NRCell cell = (NRCell) readCell(getTokenBytesNoLeaf(null), null); cell.ensureOwnTermBytes(); return cell.getShape(); @@ -898,7 +953,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public int compareTo(UnitNRShape o) { assertDecoded(); - //no leaf distinction; this is purely based on UnitNRShape + // no leaf distinction; this is purely based on UnitNRShape int cmp = comparePrefix(this, o); if (cmp != 0) { return cmp; @@ -928,27 +983,27 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { } @Override - public Shape getBuffered(double distance, SpatialContext ctx) { throw new UnsupportedOperationException(); } + public Shape getBuffered(double distance, SpatialContext ctx) { + throw new UnsupportedOperationException(); + } @Override public boolean isEmpty() { return false; } - //------- Object + // ------- Object @Override public boolean equals(Object obj) { if (!(obj instanceof NRCell)) { return false; } - if (this == obj) - return true; + if (this == obj) return true; NRCell nrCell = (NRCell) obj; assert term != nrCell.term; - if (getLevel() != nrCell.getLevel()) - return false; - //trick to re-use bytesref; provided that we re-instate it + if (getLevel() != nrCell.getLevel()) return false; + // trick to re-use bytesref; provided that we re-instate it int myLastLen = term.length; int otherLastLen = nrCell.term.length; boolean answer = getTokenBytesNoLeaf(term).equals(nrCell.getTokenBytesNoLeaf(nrCell.term)); @@ -964,7 +1019,7 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { @Override public int hashCode() { - //trick to re-use bytesref; provided that we re-instate it + // trick to re-use bytesref; provided that we re-instate it int myLastLen = term.length; int result = getTokenBytesNoLeaf(term).hashCode(); term.length = myLastLen; @@ -979,11 +1034,8 @@ public abstract class NumberRangePrefixTree extends SpatialPrefixTree { /** Configure your IDE to use this. */ public String toStringDebug() { String pretty = toString(); - if (getLevel() == 0) - return pretty; + if (getLevel() == 0) return pretty; return toStringUnitRaw(this) + (isLeaf() ? "•" : "") + " " + pretty; } - } // END OF NRCell - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/PackedQuadPrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/PackedQuadPrefixTree.java index dbb40549097..b74f702c9aa 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/PackedQuadPrefixTree.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/PackedQuadPrefixTree.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.NoSuchElementException; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.locationtech.spatial4j.context.SpatialContext; @@ -33,7 +32,8 @@ import org.locationtech.spatial4j.shape.impl.RectangleImpl; /** * Uses a compact binary representation of 8 bytes to encode a spatial quad trie. * - * The binary representation is as follows: + *

    The binary representation is as follows: + * *

      * CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCDDDDDL
      *
    @@ -42,10 +42,10 @@ import org.locationtech.spatial4j.shape.impl.RectangleImpl;
      *       L = isLeaf bit
      * 
    * - * It includes a built-in "pruneLeafyBranches" setting (true by default) similar to - * {@link org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy#setPruneLeafyBranches(boolean)} although - * this one only prunes at the target detail level (where it has the most effect). Usually you should disable RPT's - * prune, since it is very memory in-efficient. + * It includes a built-in "pruneLeafyBranches" setting (true by default) similar to {@link + * org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy#setPruneLeafyBranches(boolean)} + * although this one only prunes at the target detail level (where it has the most effect). Usually + * you should disable RPT's prune, since it is very memory in-efficient. * * @lucene.experimental */ @@ -55,13 +55,12 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { protected boolean leafyPrune = true; - /** - * Factory for creating {@link PackedQuadPrefixTree} instances with useful defaults. - */ + /** Factory for creating {@link PackedQuadPrefixTree} instances with useful defaults. */ public static class Factory extends QuadPrefixTree.Factory { @Override protected SpatialPrefixTree newSPT() { - PackedQuadPrefixTree tree = new PackedQuadPrefixTree(ctx, maxLevels != null ? maxLevels : MAX_LEVELS_POSSIBLE); + PackedQuadPrefixTree tree = + new PackedQuadPrefixTree(ctx, maxLevels != null ? maxLevels : MAX_LEVELS_POSSIBLE); @SuppressWarnings("deprecation") Version lucene830 = Version.LUCENE_8_3_0; tree.robust = getVersion().onOrAfter(lucene830); @@ -72,13 +71,21 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { public PackedQuadPrefixTree(SpatialContext ctx, int maxLevels) { super(ctx, maxLevels); if (maxLevels > MAX_LEVELS_POSSIBLE) { - throw new IllegalArgumentException("maxLevels of " + maxLevels + " exceeds limit of " + MAX_LEVELS_POSSIBLE); + throw new IllegalArgumentException( + "maxLevels of " + maxLevels + " exceeds limit of " + MAX_LEVELS_POSSIBLE); } } @Override public String toString() { - return getClass().getSimpleName() + "(maxLevels:" + maxLevels + ",ctx:" + ctx + ",prune:" + leafyPrune + ")"; + return getClass().getSimpleName() + + "(maxLevels:" + + maxLevels + + ",ctx:" + + ctx + + ",prune:" + + leafyPrune + + ")"; } @Override @@ -90,9 +97,10 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { public Cell getCell(Point p, int level) { if (!robust) { // old method List cells = new ArrayList<>(1); - buildNotRobustly(xmid, ymid, 0, cells, 0x0L, ctx.getShapeFactory().pointXY(p.getX(), p.getY()), level); + buildNotRobustly( + xmid, ymid, 0, cells, 0x0L, ctx.getShapeFactory().pointXY(p.getX(), p.getY()), level); if (!cells.isEmpty()) { - return cells.get(0);//note cells could be longer if p on edge + return cells.get(0); // note cells could be longer if p on edge } } @@ -100,14 +108,14 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { double currentYmid = ymid; double xp = p.getX(); double yp = p.getY(); - long term = 0L; + long term = 0L; int levelLimit = level > maxLevels ? maxLevels : level; SpatialRelation rel = SpatialRelation.CONTAINS; - for (int lvl = 0; lvl < levelLimit; lvl++){ + for (int lvl = 0; lvl < levelLimit; lvl++) { int quad = battenberg(currentXmid, currentYmid, xp, yp); double halfWidth = levelW[lvl + 1]; double halfHeight = levelH[lvl + 1]; - switch(quad){ + switch (quad) { case 0: currentXmid -= halfWidth; currentYmid += halfHeight; @@ -127,14 +135,15 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { default: } // set bits for next level - term |= (((long)(quad))<<(64-((lvl + 1)<<1))); + term |= (((long) (quad)) << (64 - ((lvl + 1) << 1))); // increment level - term = ((term>>>1)+1)<<1; + term = ((term >>> 1) + 1) << 1; } return new PackedQuadCell(term, rel); } - protected void buildNotRobustly(double x, double y, int level, List matches, long term, Shape shape, int maxLevel) { + protected void buildNotRobustly( + double x, double y, int level, List matches, long term, Shape shape, int maxLevel) { double w = levelW[level] / 2; double h = levelH[level] / 2; @@ -146,11 +155,17 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { checkBattenbergNotRobustly(QUAD[3], x + w, y - h, level, matches, term, shape, maxLevel); } - protected void checkBattenbergNotRobustly(byte quad, double cx, double cy, int level, List matches, - long term, Shape shape, int maxLevel) { + protected void checkBattenbergNotRobustly( + byte quad, + double cx, + double cy, + int level, + List matches, + long term, + Shape shape, + int maxLevel) { // short-circuit if we find a match for the point (no need to continue recursion) - if (shape instanceof Point && !matches.isEmpty()) - return; + if (shape instanceof Point && !matches.isEmpty()) return; double w = levelW[level] / 2; double h = levelH[level] / 2; @@ -161,13 +176,13 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { } // set bits for next level - term |= (((long)(quad))<<(64-(++level<<1))); + term |= (((long) (quad)) << (64 - (++level << 1))); // increment level - term = ((term>>>1)+1)<<1; + term = ((term >>> 1) + 1) << 1; if (SpatialRelation.CONTAINS == v || (level >= maxLevel)) { matches.add(new PackedQuadCell(term, v.transpose())); - } else {// SpatialRelation.WITHIN, SpatialRelation.INTERSECTS + } else { // SpatialRelation.WITHIN, SpatialRelation.INTERSECTS buildNotRobustly(cx, cy, level, matches, term, shape, maxLevel); } } @@ -175,8 +190,7 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { @Override public Cell readCell(BytesRef term, Cell scratch) { PackedQuadCell cell = (PackedQuadCell) scratch; - if (cell == null) - cell = (PackedQuadCell) getWorldCell(); + if (cell == null) cell = (PackedQuadCell) getWorldCell(); cell.readCell(term); return cell; } @@ -184,7 +198,8 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { @Override public CellIterator getTreeCellIterator(Shape shape, int detailLevel) { if (detailLevel > maxLevels) { - throw new IllegalArgumentException("detailLevel:" + detailLevel +" exceed max: " + maxLevels); + throw new IllegalArgumentException( + "detailLevel:" + detailLevel + " exceed max: " + maxLevels); } return new PrefixTreeIterator(shape, (short) detailLevel); } @@ -193,9 +208,12 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { return leafyPrune; } - /** Like {@link org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy#setPruneLeafyBranches(boolean)} - * but more memory efficient and only applies to the detailLevel, where it has the most effect. */ - public void setPruneLeafyBranches( boolean pruneLeafyBranches ) { + /** + * Like {@link + * org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy#setPruneLeafyBranches(boolean)} + * but more memory efficient and only applies to the detailLevel, where it has the most effect. + */ + public void setPruneLeafyBranches(boolean pruneLeafyBranches) { this.leafyPrune = pruneLeafyBranches; } @@ -229,37 +247,38 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { } private final int getShiftForLevel(final int level) { - return 64 - (level<<1); + return 64 - (level << 1); } public boolean isEnd(final int level, final int shift) { - return (term != 0x0L && ((((0x1L<<(level<<1))-1)-(term>>>shift)) == 0x0L)); + return (term != 0x0L && ((((0x1L << (level << 1)) - 1) - (term >>> shift)) == 0x0L)); } /** - * Get the next cell in the tree without using recursion. descend parameter requests traversal to the child nodes, - * setting this to false will step to the next sibling. - * Note: This complies with lexicographical ordering, once you've moved to the next sibling there is no backtracking. + * Get the next cell in the tree without using recursion. descend parameter requests traversal + * to the child nodes, setting this to false will step to the next sibling. Note: This complies + * with lexicographical ordering, once you've moved to the next sibling there is no + * backtracking. */ public PackedQuadCell nextCell(boolean descend) { final int level = getLevel(); final int shift = getShiftForLevel(level); // base case: can't go further - if ( (!descend && isEnd(level, shift)) || isEnd(maxLevels, getShiftForLevel(maxLevels))) { + if ((!descend && isEnd(level, shift)) || isEnd(maxLevels, getShiftForLevel(maxLevels))) { return null; } long newTerm; - final boolean isLeaf = (term&0x1L)==0x1L; + final boolean isLeaf = (term & 0x1L) == 0x1L; // if descend requested && we're not at the maxLevel if ((descend && !isLeaf && (level != maxLevels)) || level == 0) { // simple case: increment level bits (next level) - newTerm = ((term>>>1)+0x1L)<<1; - } else { // we're not descending or we can't descend - newTerm = term + (0x1L<>> 1) + 0x1L) << 1; + } else { // we're not descending or we can't descend + newTerm = term + (0x1L << shift); // we're at the last sibling...force descend - if (((term>>>shift)&0x3L) == 0x3L) { + if (((term >>> shift) & 0x3L) == 0x3L) { // adjust level for number popping up - newTerm = ((newTerm>>>1) - (Long.numberOfTrailingZeros(newTerm>>>shift)>>>1))<<1; + newTerm = ((newTerm >>> 1) - (Long.numberOfTrailingZeros(newTerm >>> shift) >>> 1)) << 1; } } return new PackedQuadCell(newTerm); @@ -267,7 +286,7 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { @Override protected void readLeafAdjust() { - isLeaf = ((0x1L)&term) == 0x1L; + isLeaf = ((0x1L) & term) == 0x1L; if (getLevel() == getMaxLevels()) { isLeaf = true; } @@ -300,26 +319,33 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { @Override public int compareToNoLeaf(Cell fromCell) { PackedQuadCell b = (PackedQuadCell) fromCell; - //TODO clear last bit without the condition - final long thisTerm = (((0x1L)&term) == 0x1L) ? term-1 : term; - final long fromTerm = (((0x1L)&b.term) == 0x1L) ? b.term-1 : b.term; + // TODO clear last bit without the condition + final long thisTerm = (((0x1L) & term) == 0x1L) ? term - 1 : term; + final long fromTerm = (((0x1L) & b.term) == 0x1L) ? b.term - 1 : b.term; final int result = Long.compareUnsigned(thisTerm, fromTerm); assert Math.signum(result) - == Math.signum(compare(longToByteArray(thisTerm, new byte[8]), 0, 8, longToByteArray(fromTerm, new byte[8]), 0, 8)); // TODO remove + == Math.signum( + compare( + longToByteArray(thisTerm, new byte[8]), + 0, + 8, + longToByteArray(fromTerm, new byte[8]), + 0, + 8)); // TODO remove return result; } @Override public int getLevel() { - int l = (int)((term >>> 1)&0x1FL); + int l = (int) ((term >>> 1) & 0x1FL); return l; } @Override protected Collection getSubCells() { List cells = new ArrayList<>(4); - PackedQuadCell pqc = (new PackedQuadCell(((term&0x1)==0x1) ? this.term-1 : this.term)) - .nextCell(true); + PackedQuadCell pqc = + (new PackedQuadCell(((term & 0x1) == 0x1) ? this.term - 1 : this.term)).nextCell(true); cells.add(pqc); cells.add((pqc = pqc.nextCell(false))); cells.add((pqc = pqc.nextCell(false))); @@ -329,28 +355,27 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { @Override protected QuadCell getSubCell(Point p) { - return (PackedQuadCell) PackedQuadPrefixTree.this.getCell(p, getLevel() + 1);//not performant! + return (PackedQuadCell) + PackedQuadPrefixTree.this.getCell(p, getLevel() + 1); // not performant! } @Override public boolean isPrefixOf(Cell c) { - PackedQuadCell cell = (PackedQuadCell)c; + PackedQuadCell cell = (PackedQuadCell) c; return (this.term == 0x0L) || isInternalPrefix(cell); } protected boolean isInternalPrefix(PackedQuadCell c) { - final int shift = 64 - (getLevel()<<1); - return ((term>>>shift)-(c.term>>>shift)) == 0x0L; + final int shift = 64 - (getLevel() << 1); + return ((term >>> shift) - (c.term >>> shift)) == 0x0L; } protected long concat(byte postfix) { // extra leaf bit - return this.term | (((long)(postfix))<<((getMaxLevels()-getLevel()<<1)+6)); + return this.term | (((long) (postfix)) << ((getMaxLevels() - getLevel() << 1) + 6)); } - /** - * Constructs a bounding box shape out of the encoded cell - */ + /** Constructs a bounding box shape out of the encoded cell */ @Override protected Rectangle makeShape() { double xmin = PackedQuadPrefixTree.this.xmin; @@ -358,8 +383,8 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { int level = getLevel(); byte b; - for (short l=0, i=1; l>>(64-(i<<1))) & 0x3L); + for (short l = 0, i = 1; l < level; ++l, ++i) { + b = (byte) ((term >>> (64 - (i << 1))) & 0x3L); switch (b) { case 0x00: @@ -370,7 +395,7 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { ymin += levelH[l]; break; case 0x02: - break;//nothing really + break; // nothing really case 0x03: xmin += levelW[l]; break; @@ -391,14 +416,19 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { } private long fromBytes(byte b1, byte b2, byte b3, byte b4, byte b5, byte b6, byte b7, byte b8) { - return ((long)b1 & 255L) << 56 | ((long)b2 & 255L) << 48 | ((long)b3 & 255L) << 40 - | ((long)b4 & 255L) << 32 | ((long)b5 & 255L) << 24 | ((long)b6 & 255L) << 16 - | ((long)b7 & 255L) << 8 | (long)b8 & 255L; + return ((long) b1 & 255L) << 56 + | ((long) b2 & 255L) << 48 + | ((long) b3 & 255L) << 40 + | ((long) b4 & 255L) << 32 + | ((long) b5 & 255L) << 24 + | ((long) b6 & 255L) << 16 + | ((long) b7 & 255L) << 8 + | (long) b8 & 255L; } private byte[] longToByteArray(long value, byte[] result) { - for(int i = 7; i >= 0; --i) { - result[i] = (byte)((int)(value & 255L)); + for (int i = 7; i >= 0; --i) { + result[i] = (byte) ((int) (value & 255L)); value >>= 8; } return result; @@ -406,13 +436,18 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { private long longFromByteArray(byte[] bytes, int ofs) { assert bytes.length >= 8; - return fromBytes(bytes[0+ofs], bytes[1+ofs], bytes[2+ofs], bytes[3+ofs], - bytes[4+ofs], bytes[5+ofs], bytes[6+ofs], bytes[7+ofs]); + return fromBytes( + bytes[0 + ofs], + bytes[1 + ofs], + bytes[2 + ofs], + bytes[3 + ofs], + bytes[4 + ofs], + bytes[5 + ofs], + bytes[6 + ofs], + bytes[7 + ofs]); } - /** - * Used for debugging, this will print the bits of the cell - */ + /** Used for debugging, this will print the bits of the cell */ @Override public String toString() { StringBuilder s = new StringBuilder(64); @@ -420,14 +455,15 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { for (int i = 0; i < numberOfLeadingZeros; i++) { s.append('0'); } - if (term != 0) - s.append(Long.toBinaryString(term)); + if (term != 0) s.append(Long.toBinaryString(term)); return s.toString(); } } // PackedQuadCell - /** This is a streamlined version of TreeCellIterator, with built-in support to prune at detailLevel - * (but not recursively upwards). */ + /** + * This is a streamlined version of TreeCellIterator, with built-in support to prune at + * detailLevel (but not recursively upwards). + */ protected class PrefixTreeIterator extends CellIterator { private Shape shape; private PackedQuadCell thisCell; @@ -439,7 +475,7 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { PrefixTreeIterator(Shape shape, short detailLevel) { this.shape = shape; - this.thisCell = ((PackedQuadCell)(getWorldCell())).nextCell(true); + this.thisCell = ((PackedQuadCell) (getWorldCell())).nextCell(true); this.detailLevel = detailLevel; this.nextCell = null; } @@ -461,7 +497,7 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { if (rel == SpatialRelation.WITHIN) { thisCell.setLeaf(); thisCell = thisCell.nextCell(false); - } else { // intersects || contains + } else { // intersects || contains level = (short) (thisCell.getLevel()); if (level == detailLevel || pruned(rel)) { thisCell.setLeaf(); @@ -484,7 +520,10 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { private boolean pruned(SpatialRelation rel) { int leaves; if (rel == SpatialRelation.INTERSECTS && leafyPrune && level == detailLevel - 1) { - for (leaves=0, pruneIter=thisCell.getNextLevelCells(shape); pruneIter.hasNext(); pruneIter.next(), ++leaves); + for (leaves = 0, pruneIter = thisCell.getNextLevelCells(shape); + pruneIter.hasNext(); + pruneIter.next(), ++leaves) + ; return leaves == 4; } return false; @@ -505,7 +544,7 @@ public class PackedQuadPrefixTree extends QuadPrefixTree { @Override public void remove() { - //no-op + // no-op } } } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java index 89cf5e1e6f1..2e72745d07a 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java @@ -22,27 +22,23 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Locale; - +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; -import org.apache.lucene.util.BytesRef; /** - * A {@link SpatialPrefixTree} which uses a - * quad tree in which an - * indexed term will be generated for each cell, 'A', 'B', 'C', 'D'. + * A {@link SpatialPrefixTree} which uses a quad + * tree in which an indexed term will be generated for each cell, 'A', 'B', 'C', 'D'. * * @lucene.experimental */ public class QuadPrefixTree extends LegacyPrefixTree { - /** - * Factory for creating {@link QuadPrefixTree} instances with useful defaults - */ + /** Factory for creating {@link QuadPrefixTree} instances with useful defaults */ public static class Factory extends SpatialPrefixTreeFactory { @Override protected int getLevelForDistance(double degrees) { @@ -51,8 +47,8 @@ public class QuadPrefixTree extends LegacyPrefixTree { @Override protected SpatialPrefixTree newSPT() { - QuadPrefixTree tree = new QuadPrefixTree(ctx, - maxLevels != null ? maxLevels : MAX_LEVELS_POSSIBLE); + QuadPrefixTree tree = + new QuadPrefixTree(ctx, maxLevels != null ? maxLevels : MAX_LEVELS_POSSIBLE); @SuppressWarnings("deprecation") Version LUCENE_8_3_0 = Version.LUCENE_8_3_0; tree.robust = getVersion().onOrAfter(LUCENE_8_3_0); @@ -60,7 +56,7 @@ public class QuadPrefixTree extends LegacyPrefixTree { } } - public static final int MAX_LEVELS_POSSIBLE = 50;//not really sure how big this should be + public static final int MAX_LEVELS_POSSIBLE = 50; // not really sure how big this should be public static final int DEFAULT_MAX_LEVELS = 12; protected final double xmin; @@ -76,10 +72,10 @@ public class QuadPrefixTree extends LegacyPrefixTree { final double[] levelW; final double[] levelH; - protected boolean robust = true; // for backward compatibility, use the old method if user specified old version. + protected boolean robust = + true; // for backward compatibility, use the old method if user specified old version. - public QuadPrefixTree( - SpatialContext ctx, Rectangle bounds, int maxLevels) { + public QuadPrefixTree(SpatialContext ctx, Rectangle bounds, int maxLevels) { super(ctx, maxLevels); this.xmin = bounds.getMinX(); this.xmax = bounds.getMaxX(); @@ -91,10 +87,10 @@ public class QuadPrefixTree extends LegacyPrefixTree { gridW = xmax - xmin; gridH = ymax - ymin; - this.xmid = xmin + gridW/2.0; - this.ymid = ymin + gridH/2.0; - levelW[0] = gridW/2.0; - levelH[0] = gridH/2.0; + this.xmid = xmin + gridW / 2.0; + this.ymid = ymin + gridH / 2.0; + levelW[0] = gridW / 2.0; + levelH[0] = gridH / 2.0; for (int i = 1; i < levelW.length; i++) { levelW[i] = levelW[i - 1] / 2.0; @@ -106,8 +102,7 @@ public class QuadPrefixTree extends LegacyPrefixTree { this(ctx, DEFAULT_MAX_LEVELS); } - public QuadPrefixTree( - SpatialContext ctx, int maxLevels) { + public QuadPrefixTree(SpatialContext ctx, int maxLevels) { this(ctx, ctx.getWorldBounds(), maxLevels); } @@ -129,12 +124,12 @@ public class QuadPrefixTree extends LegacyPrefixTree { @Override public int getLevelForDistance(double dist) { - if (dist == 0)//short circuit - return maxLevels; - for (int i = 0; i < maxLevels-1; i++) { - //note: level[i] is actually a lookup for level i+1 - if(dist > levelW[i] && dist > levelH[i]) { - return i+1; + if (dist == 0) // short circuit + return maxLevels; + for (int i = 0; i < maxLevels - 1; i++) { + // note: level[i] is actually a lookup for level i+1 + if (dist > levelW[i] && dist > levelH[i]) { + return i + 1; } } return maxLevels; @@ -144,9 +139,16 @@ public class QuadPrefixTree extends LegacyPrefixTree { public Cell getCell(Point p, int level) { if (!robust) { // old method List cells = new ArrayList<>(1); - buildNotRobustly(xmid, ymid, 0, cells, new BytesRef(maxLevels+1), ctx.getShapeFactory().pointXY(p.getX(),p.getY()), level); + buildNotRobustly( + xmid, + ymid, + 0, + cells, + new BytesRef(maxLevels + 1), + ctx.getShapeFactory().pointXY(p.getX(), p.getY()), + level); if (!cells.isEmpty()) { - return cells.get(0);//note cells could be longer if p on edge + return cells.get(0); // note cells could be longer if p on edge } } @@ -154,14 +156,14 @@ public class QuadPrefixTree extends LegacyPrefixTree { double currentYmid = ymid; double xp = p.getX(); double yp = p.getY(); - BytesRef str = new BytesRef(maxLevels+1); + BytesRef str = new BytesRef(maxLevels + 1); int levelLimit = level > maxLevels ? maxLevels : level; SpatialRelation rel = SpatialRelation.CONTAINS; - for (int lvl = 0; lvl < levelLimit; lvl++){ + for (int lvl = 0; lvl < levelLimit; lvl++) { int c = battenberg(currentXmid, currentYmid, xp, yp); double halfWidth = levelW[lvl + 1]; double halfHeight = levelH[lvl + 1]; - switch(c){ + switch (c) { case 0: currentXmid -= halfWidth; currentYmid += halfHeight; @@ -180,19 +182,13 @@ public class QuadPrefixTree extends LegacyPrefixTree { break; default: } - str.bytes[str.length++] = (byte)('A' + c); + str.bytes[str.length++] = (byte) ('A' + c); } return new QuadCell(str, rel); } private void buildNotRobustly( - double x, - double y, - int level, - List matches, - BytesRef str, - Shape shape, - int maxLevel) { + double x, double y, int level, List matches, BytesRef str, Shape shape, int maxLevel) { assert str.length == level; double w = levelW[level] / 2; double h = levelH[level] / 2; @@ -228,17 +224,17 @@ public class QuadPrefixTree extends LegacyPrefixTree { Rectangle rectangle = ctx.getShapeFactory().rect(cx - w, cx + w, cy - h, cy + h); SpatialRelation v = shape.relate(rectangle); if (SpatialRelation.CONTAINS == v) { - str.bytes[str.length++] = (byte)c;//append - //str.append(SpatialPrefixGrid.COVER); + str.bytes[str.length++] = (byte) c; // append + // str.append(SpatialPrefixGrid.COVER); matches.add(new QuadCell(BytesRef.deepCopyOf(str), v.transpose())); } else if (SpatialRelation.DISJOINT == v) { // nothing } else { // SpatialRelation.WITHIN, SpatialRelation.INTERSECTS - str.bytes[str.length++] = (byte)c;//append + str.bytes[str.length++] = (byte) c; // append - int nextLevel = level+1; + int nextLevel = level + 1; if (nextLevel >= maxLevel) { - //str.append(SpatialPrefixGrid.INTERSECTS); + // str.append(SpatialPrefixGrid.INTERSECTS); matches.add(new QuadCell(BytesRef.deepCopyOf(str), v.transpose())); } else { buildNotRobustly(cx, cy, nextLevel, matches, str, shape, maxLevel); @@ -248,7 +244,7 @@ public class QuadPrefixTree extends LegacyPrefixTree { } /** Returns a Z-Order quadrant [0-3]. */ - protected int battenberg(double xmid, double ymid, double xp, double yp){ + protected int battenberg(double xmid, double ymid, double xp, double yp) { // http://en.wikipedia.org/wiki/Z-order_%28curve%29 if (ymid <= yp) { if (xmid >= xp) { @@ -280,25 +276,29 @@ public class QuadPrefixTree extends LegacyPrefixTree { } @Override - protected QuadPrefixTree getGrid() { return QuadPrefixTree.this; } + protected QuadPrefixTree getGrid() { + return QuadPrefixTree.this; + } @Override - protected int getMaxLevels() { return maxLevels; } + protected int getMaxLevels() { + return maxLevels; + } @Override protected Collection getSubCells() { BytesRef source = getTokenBytesNoLeaf(null); List cells = new ArrayList<>(4); - cells.add(new QuadCell(concat(source, (byte)'A'), null)); - cells.add(new QuadCell(concat(source, (byte)'B'), null)); - cells.add(new QuadCell(concat(source, (byte)'C'), null)); - cells.add(new QuadCell(concat(source, (byte)'D'), null)); + cells.add(new QuadCell(concat(source, (byte) 'A'), null)); + cells.add(new QuadCell(concat(source, (byte) 'B'), null)); + cells.add(new QuadCell(concat(source, (byte) 'C'), null)); + cells.add(new QuadCell(concat(source, (byte) 'D'), null)); return cells; } protected BytesRef concat(BytesRef source, byte b) { - //+2 for new char + potential leaf + // +2 for new char + potential leaf final byte[] buffer = new byte[source.length + 2]; System.arraycopy(source.bytes, source.offset, buffer, 0, source.length); BytesRef target = new BytesRef(buffer); @@ -314,13 +314,12 @@ public class QuadPrefixTree extends LegacyPrefixTree { @Override protected QuadCell getSubCell(Point p) { - return (QuadCell) QuadPrefixTree.this.getCell(p, getLevel() + 1);//not performant! + return (QuadCell) QuadPrefixTree.this.getCell(p, getLevel() + 1); // not performant! } @Override public Shape getShape() { - if (shape == null) - shape = makeShape(); + if (shape == null) shape = makeShape(); return shape; } @@ -340,7 +339,7 @@ public class QuadPrefixTree extends LegacyPrefixTree { ymin += levelH[i]; break; case 'C': - break;//nothing really + break; // nothing really case 'D': xmin += levelW[i]; break; @@ -351,13 +350,13 @@ public class QuadPrefixTree extends LegacyPrefixTree { int len = token.length; double width, height; if (len > 0) { - width = levelW[len-1]; - height = levelH[len-1]; + width = levelW[len - 1]; + height = levelH[len - 1]; } else { width = gridW; height = gridH; } return ctx.getShapeFactory().rect(xmin, xmin + width, ymin, ymin + height); } - }//QuadCell + } // QuadCell } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTree.java index f77c578e553..4fc1ceaa354 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTree.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTree.java @@ -17,12 +17,11 @@ package org.apache.lucene.spatial.prefix.tree; -import java.util.ArrayList; -import java.util.List; - import com.google.common.geometry.S2CellId; import com.google.common.geometry.S2LatLng; import com.google.common.geometry.S2Projections; +import java.util.ArrayList; +import java.util.List; import org.apache.lucene.util.BytesRef; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.distance.DistanceUtils; @@ -30,128 +29,128 @@ import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; /** - * Spatial prefix tree for S2 Geometry. Shape factories - * for the given {@link SpatialContext} must implement the interface {@link S2ShapeFactory}. + * Spatial prefix tree for S2 Geometry. Shape factories for the + * given {@link SpatialContext} must implement the interface {@link S2ShapeFactory}. * - * The tree can be configured on how it divided itself by providing an arity. The default arity is 1 - * which divided every sub-cell in 4 (except the first level that is always divided by 6) . Arity 2 - * divides sub-cells in 16 and arity 3 in 64 sub-cells. + *

    The tree can be configured on how it divided itself by providing an arity. The default arity + * is 1 which divided every sub-cell in 4 (except the first level that is always divided by 6) . + * Arity 2 divides sub-cells in 16 and arity 3 in 64 sub-cells. * * @lucene.experimental */ public class S2PrefixTree extends SpatialPrefixTree { + /** Factory for creating {@link S2PrefixTree} instances with useful defaults */ + protected static class Factory extends SpatialPrefixTreeFactory { - /** - * Factory for creating {@link S2PrefixTree} instances with useful defaults - */ - protected static class Factory extends SpatialPrefixTreeFactory { - - @Override - protected int getLevelForDistance(double degrees) { - S2PrefixTree grid = new S2PrefixTree(ctx, S2PrefixTree.getMaxLevels(1)); - return grid.getLevelForDistance(degrees); - } - - @Override - protected SpatialPrefixTree newSPT() { - return new S2PrefixTree(ctx, - maxLevels != null ? maxLevels : S2PrefixTree.getMaxLevels(1)); - } - - } - - //factory to generate S2 cell shapes - protected final S2ShapeFactory s2ShapeFactory; - protected final int arity; - - /** - * Creates a S2 spatial tree with arity 1. - * - * @param ctx The provided spatial context. The shape factor of the spatial context - * must implement {@link S2ShapeFactory} - * @param maxLevels The provided maximum level for this tree. - */ - public S2PrefixTree(SpatialContext ctx, int maxLevels) { - this(ctx, maxLevels, 1); - } - - /** - * Creates a S2 spatial tree with provided arity. - * - * @param ctx The provided spatial context. The shape factor of the spatial context - * must implement {@link S2ShapeFactory} - * @param maxLevels The provided maximum level for this tree. - * @param arity The arity of the tree. - */ - public S2PrefixTree(SpatialContext ctx, int maxLevels, int arity) { - super(ctx, maxLevels); - if (!(ctx.getShapeFactory() instanceof S2ShapeFactory)) { - throw new IllegalArgumentException("Spatial context does not support S2 spatial index."); - } - this.s2ShapeFactory = (S2ShapeFactory) ctx.getShapeFactory(); - if (arity <1 || arity > 3) { - throw new IllegalArgumentException("Invalid value for S2 tree arity. Possible values are 1, 2 or 3. Provided value is " + arity + "."); - } - this.arity = arity; - } - - /** - * Get max levels for this spatial tree. - * - * @param arity The arity of the tree. - * @return The maximum number of levels by the provided arity. - */ - public static int getMaxLevels(int arity) { - return S2CellId.MAX_LEVEL/arity + 1; + @Override + protected int getLevelForDistance(double degrees) { + S2PrefixTree grid = new S2PrefixTree(ctx, S2PrefixTree.getMaxLevels(1)); + return grid.getLevelForDistance(degrees); } @Override - public int getLevelForDistance(double dist) { - if (dist == 0){ - return maxLevels; - } - int level = S2Projections.MAX_WIDTH.getMinLevel(dist * DistanceUtils.DEGREES_TO_RADIANS); - int roundLevel = level % arity != 0 ? 1 : 0; - level = level/arity + roundLevel; - return Math.min(maxLevels, level + 1); + protected SpatialPrefixTree newSPT() { + return new S2PrefixTree(ctx, maxLevels != null ? maxLevels : S2PrefixTree.getMaxLevels(1)); } + } - @Override - public double getDistanceForLevel(int level) { - if (level == 0) { - return 180; - } - return S2Projections.MAX_WIDTH.getValue(arity * (level - 1)) * DistanceUtils.RADIANS_TO_DEGREES; - } + // factory to generate S2 cell shapes + protected final S2ShapeFactory s2ShapeFactory; + protected final int arity; - @Override - public Cell getWorldCell() { - return new S2PrefixTreeCell(this, null); - } + /** + * Creates a S2 spatial tree with arity 1. + * + * @param ctx The provided spatial context. The shape factor of the spatial context must implement + * {@link S2ShapeFactory} + * @param maxLevels The provided maximum level for this tree. + */ + public S2PrefixTree(SpatialContext ctx, int maxLevels) { + this(ctx, maxLevels, 1); + } - @Override - public Cell readCell(BytesRef term, Cell scratch) { - S2PrefixTreeCell cell = (S2PrefixTreeCell) scratch; - if (cell == null) { - cell = (S2PrefixTreeCell) getWorldCell(); - } - cell.readCell(this, term); - return cell; + /** + * Creates a S2 spatial tree with provided arity. + * + * @param ctx The provided spatial context. The shape factor of the spatial context must implement + * {@link S2ShapeFactory} + * @param maxLevels The provided maximum level for this tree. + * @param arity The arity of the tree. + */ + public S2PrefixTree(SpatialContext ctx, int maxLevels, int arity) { + super(ctx, maxLevels); + if (!(ctx.getShapeFactory() instanceof S2ShapeFactory)) { + throw new IllegalArgumentException("Spatial context does not support S2 spatial index."); } + this.s2ShapeFactory = (S2ShapeFactory) ctx.getShapeFactory(); + if (arity < 1 || arity > 3) { + throw new IllegalArgumentException( + "Invalid value for S2 tree arity. Possible values are 1, 2 or 3. Provided value is " + + arity + + "."); + } + this.arity = arity; + } - @Override - public CellIterator getTreeCellIterator(Shape shape, int detailLevel) { - if (!(shape instanceof Point)) { - return super.getTreeCellIterator(shape, detailLevel); - } - Point p = (Point) shape; - S2CellId id = S2CellId.fromLatLng(S2LatLng.fromDegrees(p.getY(), p.getX())).parent(arity * (detailLevel - 1)); - List cells = new ArrayList<>(detailLevel); - for (int i=0; i < detailLevel - 1; i++) { - cells.add(new S2PrefixTreeCell(this, id.parent(i * arity))); - } - cells.add(new S2PrefixTreeCell(this, id)); - return new FilterCellIterator(cells.iterator(), null); + /** + * Get max levels for this spatial tree. + * + * @param arity The arity of the tree. + * @return The maximum number of levels by the provided arity. + */ + public static int getMaxLevels(int arity) { + return S2CellId.MAX_LEVEL / arity + 1; + } + + @Override + public int getLevelForDistance(double dist) { + if (dist == 0) { + return maxLevels; } -} \ No newline at end of file + int level = S2Projections.MAX_WIDTH.getMinLevel(dist * DistanceUtils.DEGREES_TO_RADIANS); + int roundLevel = level % arity != 0 ? 1 : 0; + level = level / arity + roundLevel; + return Math.min(maxLevels, level + 1); + } + + @Override + public double getDistanceForLevel(int level) { + if (level == 0) { + return 180; + } + return S2Projections.MAX_WIDTH.getValue(arity * (level - 1)) * DistanceUtils.RADIANS_TO_DEGREES; + } + + @Override + public Cell getWorldCell() { + return new S2PrefixTreeCell(this, null); + } + + @Override + public Cell readCell(BytesRef term, Cell scratch) { + S2PrefixTreeCell cell = (S2PrefixTreeCell) scratch; + if (cell == null) { + cell = (S2PrefixTreeCell) getWorldCell(); + } + cell.readCell(this, term); + return cell; + } + + @Override + public CellIterator getTreeCellIterator(Shape shape, int detailLevel) { + if (!(shape instanceof Point)) { + return super.getTreeCellIterator(shape, detailLevel); + } + Point p = (Point) shape; + S2CellId id = + S2CellId.fromLatLng(S2LatLng.fromDegrees(p.getY(), p.getX())) + .parent(arity * (detailLevel - 1)); + List cells = new ArrayList<>(detailLevel); + for (int i = 0; i < detailLevel - 1; i++) { + cells.add(new S2PrefixTreeCell(this, id.parent(i * arity))); + } + cells.add(new S2PrefixTreeCell(this, id)); + return new FilterCellIterator(cells.iterator(), null); + } +} diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java index e9b5818e16d..ee283141921 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2PrefixTreeCell.java @@ -17,13 +17,12 @@ package org.apache.lucene.spatial.prefix.tree; +import com.google.common.geometry.S2CellId; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; - -import com.google.common.geometry.S2CellId; import org.apache.lucene.util.BytesRef; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; @@ -35,263 +34,264 @@ import org.locationtech.spatial4j.shape.SpatialRelation; */ class S2PrefixTreeCell implements CellCanPrune { - //Faces of S2 Geometry - private static S2CellId[] FACES = new S2CellId[6]; + // Faces of S2 Geometry + private static S2CellId[] FACES = new S2CellId[6]; - static { - FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0); - FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0); - FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0); - FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0); - FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0); - FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0); + static { + FACES[0] = S2CellId.fromFacePosLevel(0, 0, 0); + FACES[1] = S2CellId.fromFacePosLevel(1, 0, 0); + FACES[2] = S2CellId.fromFacePosLevel(2, 0, 0); + FACES[3] = S2CellId.fromFacePosLevel(3, 0, 0); + FACES[4] = S2CellId.fromFacePosLevel(4, 0, 0); + FACES[5] = S2CellId.fromFacePosLevel(5, 0, 0); + } + + /*Special character to define a cell leaf*/ + private static final byte LEAF = '+'; + /*Tokens are used to serialize cells*/ + private static final byte[] TOKENS = { + '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', + 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', + 't', 'u', 'v', 'w', 'x', 'y', 'z' + }; + /*Map containing mapping between tokens and integer values*/ + private static final Map PIXELS; + + static { + PIXELS = new HashMap<>(TOKENS.length); + for (int i = 0; i < TOKENS.length; i++) { + PIXELS.put(TOKENS[i], i); } + } - /*Special character to define a cell leaf*/ - private static final byte LEAF = '+'; - /*Tokens are used to serialize cells*/ - private static final byte[] TOKENS = {'.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', - 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}; - /*Map containing mapping between tokens and integer values*/ - private static final Map PIXELS; + S2CellId cellId; + int level; // cache level + S2PrefixTree tree; - static { - PIXELS = new HashMap<>(TOKENS.length); - for (int i = 0; i < TOKENS.length; i++) { - PIXELS.put(TOKENS[i], i); - } + SpatialRelation shapeRel = null; + boolean isLeaf; + Shape shape = null; + + S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId) { + this.cellId = cellId; + this.tree = tree; + setLevel(); + if (getLevel() == tree.getMaxLevels()) { + setLeaf(); } + } - S2CellId cellId; - int level; //cache level - S2PrefixTree tree; - - SpatialRelation shapeRel = null; - boolean isLeaf; - Shape shape = null; - - S2PrefixTreeCell(S2PrefixTree tree, S2CellId cellId) { - this.cellId = cellId; - this.tree = tree; - setLevel(); - if (getLevel() == tree.getMaxLevels()) { - setLeaf(); - } + void readCell(S2PrefixTree tree, BytesRef ref) { + isLeaf = false; + shape = null; + shapeRel = null; + this.tree = tree; + cellId = getS2CellIdFromBytesRef(ref); + setLevel(); + if (isLeaf(ref) || getLevel() == tree.getMaxLevels()) { + setLeaf(); } + } - void readCell(S2PrefixTree tree, BytesRef ref) { - isLeaf = false; - shape = null; - shapeRel = null; - this.tree = tree; - cellId = getS2CellIdFromBytesRef(ref); - setLevel(); - if (isLeaf(ref) || getLevel() == tree.getMaxLevels()) { - setLeaf(); - } - } + @Override + public SpatialRelation getShapeRel() { + return shapeRel; + } - @Override - public SpatialRelation getShapeRel() { - return shapeRel; - } + @Override + public void setShapeRel(SpatialRelation rel) { + shapeRel = rel; + } - @Override - public void setShapeRel(SpatialRelation rel) { - shapeRel = rel; - } + @Override + public boolean isLeaf() { + return isLeaf; + } - @Override - public boolean isLeaf() { - return isLeaf; - } + @Override + public void setLeaf() { + isLeaf = true; + } - @Override - public void setLeaf() { - isLeaf = true; + @Override + public BytesRef getTokenBytesWithLeaf(BytesRef result) { + result = getTokenBytesNoLeaf(result); + // max levels do not have leaf + if (isLeaf() && !(getLevel() == tree.getMaxLevels())) { + // Add leaf byte + result.bytes[result.offset + result.length] = LEAF; + result.length++; } + return result; + } - @Override - public BytesRef getTokenBytesWithLeaf(BytesRef result) { - result = getTokenBytesNoLeaf(result); - //max levels do not have leaf - if (isLeaf() && !(getLevel() == tree.getMaxLevels())) { - //Add leaf byte - result.bytes[result.offset + result.length] = LEAF; - result.length++; - } - return result; + @Override + public BytesRef getTokenBytesNoLeaf(BytesRef result) { + if (result == null) { + result = new BytesRef(); } + getBytesRefFromS2CellId(cellId, result); + return result; + } - @Override - public BytesRef getTokenBytesNoLeaf(BytesRef result) { - if (result == null) { - result = new BytesRef(); - } - getBytesRefFromS2CellId(cellId, result); - return result; - } + @Override + public int getLevel() { + return this.level; + } - @Override - public int getLevel() { - return this.level; + /** Cache level of cell. */ + private void setLevel() { + if (this.cellId == null) { + this.level = 0; + } else { + assert cellId.level() % tree.arity == 0; + this.level = (this.cellId.level() / tree.arity) + 1; } + } - /** - * Cache level of cell. - */ - private void setLevel() { - if (this.cellId == null) { - this.level = 0; - } else { - assert cellId.level() % tree.arity == 0; - this.level = (this.cellId.level() / tree.arity) + 1; - } + @Override + public CellIterator getNextLevelCells(Shape shapeFilter) { + S2CellId[] children; + if (cellId == null) { // this is the world cell + children = FACES; + } else { + int nChildren = (int) Math.pow(4, tree.arity); + children = new S2CellId[nChildren]; + children[0] = cellId.childBegin(cellId.level() + tree.arity); + for (int i = 1; i < nChildren; i++) { + children[i] = children[i - 1].next(); + } } + List cells = new ArrayList<>(children.length); + for (S2CellId pixel : children) { + cells.add(new S2PrefixTreeCell(tree, pixel)); + } + return new FilterCellIterator(cells.iterator(), shapeFilter); + } - @Override - public CellIterator getNextLevelCells(Shape shapeFilter) { - S2CellId[] children; - if (cellId == null) { // this is the world cell - children = FACES; - } else { - int nChildren = (int) Math.pow(4, tree.arity); - children = new S2CellId[nChildren]; - children[0] = cellId.childBegin(cellId.level() + tree.arity); - for (int i = 1; i < nChildren; i++) { - children[i] = children[i - 1].next(); - } - } - List cells = new ArrayList<>(children.length); - for (S2CellId pixel : children) { - cells.add(new S2PrefixTreeCell(tree, pixel)); - } - return new FilterCellIterator(cells.iterator(), shapeFilter); + @Override + public Shape getShape() { + if (shape == null) { + if (cellId == null) { // World cell + shape = tree.getSpatialContext().getWorldBounds(); + } else { + shape = tree.s2ShapeFactory.getS2CellShape(cellId); + } } + return shape; + } - @Override - public Shape getShape() { - if (shape == null) { - if (cellId == null) { //World cell - shape = tree.getSpatialContext().getWorldBounds(); - } else { - shape = tree.s2ShapeFactory.getS2CellShape(cellId); - } - } - return shape; + @Override + public boolean isPrefixOf(Cell c) { + if (cellId == null) { + return true; } + S2PrefixTreeCell cell = (S2PrefixTreeCell) c; + return cellId.contains(cell.cellId); + } - @Override - public boolean isPrefixOf(Cell c) { - if (cellId == null) { - return true; - } - S2PrefixTreeCell cell = (S2PrefixTreeCell) c; - return cellId.contains(cell.cellId); + @Override + public int compareToNoLeaf(Cell fromCell) { + if (cellId == null) { + return 1; } + S2PrefixTreeCell cell = (S2PrefixTreeCell) fromCell; + return cellId.compareTo(cell.cellId); + } - @Override - public int compareToNoLeaf(Cell fromCell) { - if (cellId == null) { - return 1; - } - S2PrefixTreeCell cell = (S2PrefixTreeCell) fromCell; - return cellId.compareTo(cell.cellId); - } + /** + * Check if a cell is a leaf. + * + * @param ref The Byteref of the leaf + * @return true if it is a leaf, e.g last byte is the special Character. + */ + private boolean isLeaf(BytesRef ref) { + return (ref.bytes[ref.offset + ref.length - 1] == LEAF); + } - /** - * Check if a cell is a leaf. - * - * @param ref The Byteref of the leaf - * @return true if it is a leaf, e.g last byte is the special Character. - */ - private boolean isLeaf(BytesRef ref) { - return (ref.bytes[ref.offset + ref.length - 1] == LEAF); + /** + * Get the {@link S2CellId} from the {@link BytesRef} representation. + * + * @param ref The bytes. + * @return the corresponding S2 cell. + */ + private S2CellId getS2CellIdFromBytesRef(BytesRef ref) { + int length = ref.length; + if (isLeaf(ref)) { + length--; } + if (length == 0) { + return null; // world cell + } + int face = PIXELS.get(ref.bytes[ref.offset]); + S2CellId cellId = FACES[face]; + long id = cellId.id(); + for (int i = ref.offset + 1; i < ref.offset + length; i++) { + int thisLevel = i - ref.offset; + int pos = PIXELS.get(ref.bytes[i]); + // first child at level + id = id - (id & -id) + (1L << (2 * (S2CellId.MAX_LEVEL - thisLevel * tree.arity))); + // next until pos + id = id + pos * ((id & -id) << 1); + } + return new S2CellId(id); + } - /** - * Get the {@link S2CellId} from the {@link BytesRef} representation. - * - * @param ref The bytes. - * @return the corresponding S2 cell. - */ - private S2CellId getS2CellIdFromBytesRef(BytesRef ref) { - int length = ref.length; - if (isLeaf(ref)) { - length--; - } - if (length == 0) { - return null; //world cell - } - int face = PIXELS.get(ref.bytes[ref.offset]); - S2CellId cellId = FACES[face]; - long id = cellId.id(); - for (int i = ref.offset + 1; i < ref.offset + length; i++) { - int thisLevel = i - ref.offset; - int pos = PIXELS.get(ref.bytes[i]); - // first child at level - id = id - (id & -id) + (1L << (2 * (S2CellId.MAX_LEVEL - thisLevel * tree.arity))); - // next until pos - id = id + pos * ((id & -id) << 1); - } - return new S2CellId(id); + /** + * Codify a {@link S2CellId} into its {@link BytesRef} representation. + * + * @param cellId The S2 Cell id to codify. + * @param bref The byteref representation. + */ + private void getBytesRefFromS2CellId(S2CellId cellId, BytesRef bref) { + if (cellId == null) { // world cell + bref.length = 0; + return; } + int length = getLevel() + 1; + byte[] b = bref.bytes.length >= length ? bref.bytes : new byte[length]; + b[0] = TOKENS[cellId.face()]; + for (int i = 1; i < getLevel(); i++) { + int offset = 0; + int level = tree.arity * i; + for (int j = 1; j < tree.arity; j++) { + offset = 4 * offset + cellId.childPosition(level - tree.arity + j); + } + b[i] = TOKENS[4 * offset + cellId.childPosition(level)]; + } + bref.bytes = b; + bref.length = getLevel(); + bref.offset = 0; + } - /** - * Codify a {@link S2CellId} into its {@link BytesRef} representation. - * - * @param cellId The S2 Cell id to codify. - * @param bref The byteref representation. - */ - private void getBytesRefFromS2CellId(S2CellId cellId, BytesRef bref) { - if (cellId == null) {//world cell - bref.length = 0; - return; - } - int length = getLevel() + 1; - byte[] b = bref.bytes.length >= length ? bref.bytes : new byte[length]; - b[0] = TOKENS[cellId.face()]; - for (int i = 1; i < getLevel(); i++) { - int offset = 0; - int level = tree.arity * i; - for (int j = 1; j < tree.arity; j++) { - offset = 4 * offset + cellId.childPosition(level - tree.arity + j); - } - b[i] = TOKENS[4 * offset + cellId.childPosition(level)]; - } - bref.bytes = b; - bref.length = getLevel(); - bref.offset = 0; + @Override + public int getSubCellsSize() { + if (cellId == null) { // root node + return 6; } + return (int) Math.pow(4, tree.arity); + } - @Override - public int getSubCellsSize() { - if (cellId == null) { //root node - return 6; - } - return (int) Math.pow(4, tree.arity); + @Override + public int hashCode() { + if (cellId == null) { + return super.hashCode(); } + return this.cellId.hashCode(); + } - @Override - public int hashCode() { - if (cellId == null) { - return super.hashCode(); - } - return this.cellId.hashCode(); - } + @Override + public boolean equals(Object o) { + S2PrefixTreeCell cell = (S2PrefixTreeCell) o; + return Objects.equals(cellId, cell.cellId); + } - @Override - public boolean equals(Object o) { - S2PrefixTreeCell cell = (S2PrefixTreeCell) o; - return Objects.equals(cellId, cell.cellId); + @Override + public String toString() { + if (cellId == null) { + return "0"; } - - @Override - public String toString() { - if (cellId == null) { - return "0"; - } - return cellId.toString(); - } -} \ No newline at end of file + return cellId.toString(); + } +} diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2ShapeFactory.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2ShapeFactory.java index 1306f601676..70bfc023180 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2ShapeFactory.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/S2ShapeFactory.java @@ -17,18 +17,17 @@ package org.apache.lucene.spatial.prefix.tree; - import com.google.common.geometry.S2CellId; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.ShapeFactory; /** - * Shape factory for Spatial contexts that support S2 geometry. It is an extension of - * Spatial4j {@link ShapeFactory}. + * Shape factory for Spatial contexts that support S2 geometry. It is an extension of Spatial4j + * {@link ShapeFactory}. * * @lucene.experimental */ -public interface S2ShapeFactory extends ShapeFactory{ +public interface S2ShapeFactory extends ShapeFactory { /** * Factory method for S2 cell shapes. diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SingletonCellIterator.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SingletonCellIterator.java index 177b431ba79..96259d4d227 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SingletonCellIterator.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SingletonCellIterator.java @@ -24,7 +24,7 @@ package org.apache.lucene.spatial.prefix.tree; class SingletonCellIterator extends CellIterator { SingletonCellIterator(Cell cell) { - this.nextCell = cell;//preload nextCell + this.nextCell = cell; // preload nextCell } @Override @@ -32,5 +32,4 @@ class SingletonCellIterator extends CellIterator { thisCell = null; return nextCell != null; } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java index ae2fe83d20e..abf96829070 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java @@ -16,18 +16,16 @@ */ package org.apache.lucene.spatial.prefix.tree; +import org.apache.lucene.util.BytesRef; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.shape.Shape; -import org.apache.lucene.util.BytesRef; /** - * A spatial Prefix Tree, or Trie, which decomposes shapes into prefixed strings - * at variable lengths corresponding to variable precision. Each string - * corresponds to a rectangular spatial region. This approach is - * also referred to "Grids", "Tiles", and "Spatial Tiers". - *

    - * Implementations of this class should be thread-safe and immutable once - * initialized. + * A spatial Prefix Tree, or Trie, which decomposes shapes into prefixed strings at variable lengths + * corresponding to variable precision. Each string corresponds to a rectangular spatial region. + * This approach is also referred to "Grids", "Tiles", and "Spatial Tiers". + * + *

    Implementations of this class should be thread-safe and immutable once initialized. * * @lucene.experimental */ @@ -57,11 +55,10 @@ public abstract class SpatialPrefixTree { } /** - * Returns the level of the largest grid in which its longest side is less - * than or equal to the provided distance (in degrees). Consequently {@code - * dist} acts as an error epsilon declaring the amount of detail needed in the - * grid, such that you can get a grid with just the right amount of - * precision. + * Returns the level of the largest grid in which its longest side is less than or equal to the + * provided distance (in degrees). Consequently {@code dist} acts as an error epsilon declaring + * the amount of detail needed in the grid, such that you can get a grid with just the right + * amount of precision. * * @param dist {@code >= 0} * @return level [1 to maxLevels] @@ -69,9 +66,8 @@ public abstract class SpatialPrefixTree { public abstract int getLevelForDistance(double dist); /** - * Given a cell having the specified level, returns the distance from opposite - * corners. Since this might vary depending on where the cell is, this method - * may over-estimate. + * Given a cell having the specified level, returns the distance from opposite corners. Since this + * might vary depending on where the cell is, this method may over-estimate. * * @param level [1 to maxLevels] * @return {@code > 0} @@ -79,31 +75,29 @@ public abstract class SpatialPrefixTree { public abstract double getDistanceForLevel(int level); /** - * Returns the level 0 cell which encompasses all spatial data. Equivalent to {@link #readCell(BytesRef,Cell)} - * with no bytes. + * Returns the level 0 cell which encompasses all spatial data. Equivalent to {@link + * #readCell(BytesRef,Cell)} with no bytes. */ - public abstract Cell getWorldCell(); //another possible name: getTopCell + public abstract Cell getWorldCell(); // another possible name: getTopCell /** - * This creates a new Cell (or re-using {@code scratch} if provided), initialized to the state as read - * by the bytes. - * Warning: An implementation may refer to the same byte array (no copy). If {@link Cell#setLeaf()} is - * subsequently called, it would then modify these bytes. + * This creates a new Cell (or re-using {@code scratch} if provided), initialized to the state as + * read by the bytes. Warning: An implementation may refer to the same byte array (no copy). If + * {@link Cell#setLeaf()} is subsequently called, it would then modify these bytes. */ public abstract Cell readCell(BytesRef term, Cell scratch); /** - * Gets the intersecting cells for the specified shape, without exceeding - * detail level. If a cell is within the query shape then it's marked as a - * leaf and none of its children are added. For cells at detailLevel, they are marked as - * leaves too, unless it's a point. - *

    - * IMPORTANT: Cells returned from the iterator can be re-used for cells at the same level. So you can't simply - * iterate to subsequent cells and still refer to the former cell nor the bytes returned from the former cell, unless - * you know the former cell is a parent. + * Gets the intersecting cells for the specified shape, without exceeding detail level. If a cell + * is within the query shape then it's marked as a leaf and none of its children are added. For + * cells at detailLevel, they are marked as leaves too, unless it's a point. * - * @param shape the shape; possibly null but the caller should liberally call - * {@code remove()} if so. + *

    IMPORTANT: Cells returned from the iterator can be re-used for cells at the same level. So + * you can't simply iterate to subsequent cells and still refer to the former cell nor the bytes + * returned from the former cell, unless you know the former cell is a parent. + * + * @param shape the shape; possibly null but the caller should liberally call {@code remove()} if + * so. * @param detailLevel the maximum detail level to get cells for * @return the matching cells */ @@ -113,5 +107,4 @@ public abstract class SpatialPrefixTree { } return new TreeCellIterator(shape, detailLevel, getWorldCell()); } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeFactory.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeFactory.java index 0c4c659160c..de963574e74 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeFactory.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeFactory.java @@ -18,20 +18,19 @@ package org.apache.lucene.spatial.prefix.tree; import java.text.ParseException; import java.util.Map; - import org.apache.lucene.util.Version; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.distance.DistanceUtils; /** - * Abstract Factory for creating {@link SpatialPrefixTree} instances with useful - * defaults and passed on configurations defined in a Map. + * Abstract Factory for creating {@link SpatialPrefixTree} instances with useful defaults and passed + * on configurations defined in a Map. * * @lucene.experimental */ public abstract class SpatialPrefixTreeFactory { - private static final double DEFAULT_GEO_MAX_DETAIL_KM = 0.001;//1m + private static final double DEFAULT_GEO_MAX_DETAIL_KM = 0.001; // 1m public static final String PREFIX_TREE = "prefixTree"; public static final String MAX_LEVELS = "maxLevels"; public static final String MAX_DIST_ERR = "maxDistErr"; @@ -43,24 +42,21 @@ public abstract class SpatialPrefixTreeFactory { private Version version; /** - * The factory is looked up via "prefixTree" in args, expecting "geohash" or "quad". - * If it's neither of these, then "geohash" is chosen for a geo context, otherwise "quad" is chosen. - * The "version" arg, if present, is parsed with {@link Version} and the prefix tree might be sensitive to it. + * The factory is looked up via "prefixTree" in args, expecting "geohash" or "quad". If it's + * neither of these, then "geohash" is chosen for a geo context, otherwise "quad" is chosen. The + * "version" arg, if present, is parsed with {@link Version} and the prefix tree might be + * sensitive to it. */ - public static SpatialPrefixTree makeSPT(Map args, ClassLoader classLoader, SpatialContext ctx) { - //TODO refactor to use Java SPI like how Lucene already does for codecs/postingsFormats, etc + public static SpatialPrefixTree makeSPT( + Map args, ClassLoader classLoader, SpatialContext ctx) { + // TODO refactor to use Java SPI like how Lucene already does for codecs/postingsFormats, etc SpatialPrefixTreeFactory instance; String cname = args.get(PREFIX_TREE); - if (cname == null) - cname = ctx.isGeo() ? "geohash" : "quad"; - if ("geohash".equalsIgnoreCase(cname)) - instance = new GeohashPrefixTree.Factory(); - else if ("quad".equalsIgnoreCase(cname)) - instance = new QuadPrefixTree.Factory(); - else if ("packedQuad".equalsIgnoreCase(cname)) - instance = new PackedQuadPrefixTree.Factory(); - else if ("s2".equalsIgnoreCase(cname)) - instance = new S2PrefixTree.Factory(); + if (cname == null) cname = ctx.isGeo() ? "geohash" : "quad"; + if ("geohash".equalsIgnoreCase(cname)) instance = new GeohashPrefixTree.Factory(); + else if ("quad".equalsIgnoreCase(cname)) instance = new QuadPrefixTree.Factory(); + else if ("packedQuad".equalsIgnoreCase(cname)) instance = new PackedQuadPrefixTree.Factory(); + else if ("s2".equalsIgnoreCase(cname)) instance = new S2PrefixTree.Factory(); else { try { Class c = classLoader.loadClass(cname); @@ -100,25 +96,22 @@ public abstract class SpatialPrefixTreeFactory { String maxDetailDistStr = args.get(MAX_DIST_ERR); if (maxDetailDistStr == null) { if (!ctx.isGeo()) { - return;//let default to max + return; // let default to max } - degrees = DistanceUtils.dist2Degrees(DEFAULT_GEO_MAX_DETAIL_KM, DistanceUtils.EARTH_MEAN_RADIUS_KM); + degrees = + DistanceUtils.dist2Degrees(DEFAULT_GEO_MAX_DETAIL_KM, DistanceUtils.EARTH_MEAN_RADIUS_KM); } else { degrees = Double.parseDouble(maxDetailDistStr); } maxLevels = getLevelForDistance(degrees); } - /** - * Set the version of Lucene this tree should mimic the behavior for for analysis. - */ + /** Set the version of Lucene this tree should mimic the behavior for for analysis. */ public void setVersion(Version v) { version = v; } - /** - * Return the version of Lucene this tree will mimic the behavior of for analysis. - */ + /** Return the version of Lucene this tree will mimic the behavior of for analysis. */ public Version getVersion() { return version; } @@ -127,5 +120,4 @@ public abstract class SpatialPrefixTreeFactory { protected abstract int getLevelForDistance(double degrees); protected abstract SpatialPrefixTree newSPT(); - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/TreeCellIterator.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/TreeCellIterator.java index 39c8068d8f9..5ee7ccc4c12 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/TreeCellIterator.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/TreeCellIterator.java @@ -20,19 +20,21 @@ import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; /** - * Navigates a {@link org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree} from a given cell (typically the world - * cell) down to a maximum number of configured levels, filtered by a given shape. Intermediate non-leaf cells are - * returned. It supports {@link #remove()} for skipping traversal of subcells of the current cell. + * Navigates a {@link org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree} from a given cell + * (typically the world cell) down to a maximum number of configured levels, filtered by a given + * shape. Intermediate non-leaf cells are returned. It supports {@link #remove()} for skipping + * traversal of subcells of the current cell. * * @lucene.internal */ class TreeCellIterator extends CellIterator { - //This class uses a stack approach, which is more efficient than creating linked nodes. And it might more easily + // This class uses a stack approach, which is more efficient than creating linked nodes. And it + // might more easily // pave the way for re-using Cell & CellIterator at a given level in the future. - private final Shape shapeFilter;//possibly null - private final CellIterator[] iterStack;//starts at level 1 - private int stackIdx;//-1 when done + private final Shape shapeFilter; // possibly null + private final CellIterator[] iterStack; // starts at level 1 + private int stackIdx; // -1 when done private boolean descend; public TreeCellIterator(Shape shapeFilter, int detailLevel, Cell parentCell) { @@ -40,39 +42,40 @@ class TreeCellIterator extends CellIterator { assert parentCell.getLevel() == 0; iterStack = new CellIterator[detailLevel]; iterStack[0] = parentCell.getNextLevelCells(shapeFilter); - stackIdx = 0;//always points to an iter (non-null) - //note: not obvious but needed to visit the first cell before trying to descend + stackIdx = 0; // always points to an iter (non-null) + // note: not obvious but needed to visit the first cell before trying to descend descend = false; } @Override public boolean hasNext() { - if (nextCell != null) - return true; + if (nextCell != null) return true; while (true) { - if (stackIdx == -1)//the only condition in which we return false - return false; - //If we can descend... - if (descend && !(stackIdx == iterStack.length - 1 || iterStack[stackIdx].thisCell().isLeaf())) { + if (stackIdx == -1) // the only condition in which we return false + return false; + // If we can descend... + if (descend + && !(stackIdx == iterStack.length - 1 || iterStack[stackIdx].thisCell().isLeaf())) { CellIterator nextIter = iterStack[stackIdx].thisCell().getNextLevelCells(shapeFilter); - //push stack + // push stack iterStack[++stackIdx] = nextIter; } - //Get sibling... + // Get sibling... if (iterStack[stackIdx].hasNext()) { nextCell = iterStack[stackIdx].next(); - //at detailLevel - if (stackIdx == iterStack.length - 1 && !(shapeFilter instanceof Point)) //point check is a kludge - nextCell.setLeaf();//because at bottom + // at detailLevel + if (stackIdx == iterStack.length - 1 + && !(shapeFilter instanceof Point)) // point check is a kludge + nextCell.setLeaf(); // because at bottom break; } - //Couldn't get next; go up... - //pop stack + // Couldn't get next; go up... + // pop stack iterStack[stackIdx--] = null; - descend = false;//so that we don't re-descend where we just were + descend = false; // so that we don't re-descend where we just were } assert nextCell != null; - descend = true;//reset + descend = true; // reset return true; } @@ -82,6 +85,6 @@ class TreeCellIterator extends CellIterator { descend = false; } - //TODO implement a smart nextFrom() that looks at the parent's bytes first + // TODO implement a smart nextFrom() that looks at the parent's bytes first } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/package-info.java index 20a4a0e8590..c6bb34dbb41 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/package-info.java @@ -16,15 +16,16 @@ */ /** - * This package is about SpatialPrefixTree and any supporting classes. - * A SpatialPrefixTree supports spatial indexing by index-time tokens - * where adding characters to a string gives greater resolution. - *

    - * Potential Implementations include: + * This package is about SpatialPrefixTree and any supporting classes. A SpatialPrefixTree supports + * spatial indexing by index-time tokens where adding characters to a string gives greater + * resolution. + * + *

    Potential Implementations include: + * *

      - *
    • http://en.wikipedia.org/wiki/Quadtree - *
    • http://en.wikipedia.org/wiki/Geohash - *
    • http://healpix.jpl.nasa.gov/ + *
    • http://en.wikipedia.org/wiki/Quadtree + *
    • http://en.wikipedia.org/wiki/Geohash + *
    • http://healpix.jpl.nasa.gov/ *
    */ -package org.apache.lucene.spatial.prefix.tree; \ No newline at end of file +package org.apache.lucene.spatial.prefix.tree; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialArgs.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialArgs.java index 37a5503e16a..fa3fecf565c 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialArgs.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialArgs.java @@ -22,8 +22,8 @@ import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; /** - * Principally holds the query {@link Shape} and the {@link SpatialOperation}. - * It's used as an argument to some methods on {@link org.apache.lucene.spatial.SpatialStrategy}. + * Principally holds the query {@link Shape} and the {@link SpatialOperation}. It's used as an + * argument to some methods on {@link org.apache.lucene.spatial.SpatialStrategy}. * * @lucene.experimental */ @@ -44,9 +44,8 @@ public class SpatialArgs { } /** - * Computes the distance given a shape and the {@code distErrPct}. The - * algorithm is the fraction of the distance from the center of the query - * shape to its closest bounding box corner. + * Computes the distance given a shape and the {@code distErrPct}. The algorithm is the fraction + * of the distance from the center of the query shape to its closest bounding box corner. * * @param shape Mandatory. * @param distErrPct 0 to 0.5 @@ -55,13 +54,14 @@ public class SpatialArgs { */ public static double calcDistanceFromErrPct(Shape shape, double distErrPct, SpatialContext ctx) { if (distErrPct < 0 || distErrPct > 0.5) { - throw new IllegalArgumentException("distErrPct " + distErrPct + " must be between [0 to 0.5]"); + throw new IllegalArgumentException( + "distErrPct " + distErrPct + " must be between [0 to 0.5]"); } if (distErrPct == 0 || shape instanceof Point) { return 0; } Rectangle bbox = shape.getBoundingBox(); - //Compute the distance from the center to a corner. Because the distance + // Compute the distance from the center to a corner. Because the distance // to a bottom corner vs a top corner can vary in a geospatial scenario, // take the closest one (greater precision). Point ctr = bbox.getCenter(); @@ -71,15 +71,14 @@ public class SpatialArgs { } /** - * Gets the error distance that specifies how precise the query shape is. This - * looks at {@link #getDistErr()}, {@link #getDistErrPct()}, and {@code - * defaultDistErrPct}. + * Gets the error distance that specifies how precise the query shape is. This looks at {@link + * #getDistErr()}, {@link #getDistErrPct()}, and {@code defaultDistErrPct}. + * * @param defaultDistErrPct 0 to 0.5 * @return {@code >= 0} */ public double resolveDistErr(SpatialContext ctx, double defaultDistErrPct) { - if (distErr != null) - return distErr; + if (distErr != null) return distErr; double distErrPct = (this.distErrPct != null ? this.distErrPct : defaultDistErrPct); return calcDistanceFromErrPct(shape, distErrPct, ctx); } @@ -95,9 +94,9 @@ public class SpatialArgs { return SpatialArgsParser.writeSpatialArgs(this); } - //------------------------------------------------ + // ------------------------------------------------ // Getters & Setters - //------------------------------------------------ + // ------------------------------------------------ public SpatialOperation getOperation() { return operation; @@ -116,25 +115,24 @@ public class SpatialArgs { } /** - * A measure of acceptable error of the shape as a fraction. This effectively - * inflates the size of the shape but should not shrink it. + * A measure of acceptable error of the shape as a fraction. This effectively inflates the size of + * the shape but should not shrink it. * * @return 0 to 0.5 * @see #calcDistanceFromErrPct(org.locationtech.spatial4j.shape.Shape, double, - * org.locationtech.spatial4j.context.SpatialContext) + * org.locationtech.spatial4j.context.SpatialContext) */ public Double getDistErrPct() { return distErrPct; } public void setDistErrPct(Double distErrPct) { - if (distErrPct != null) - this.distErrPct = distErrPct; + if (distErrPct != null) this.distErrPct = distErrPct; } /** - * The acceptable error of the shape. This effectively inflates the - * size of the shape but should not shrink it. + * The acceptable error of the shape. This effectively inflates the size of the shape but should + * not shrink it. * * @return {@code >= 0} */ diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java index bd3916ca0b2..7f4d1bea8c2 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java @@ -16,29 +16,31 @@ */ package org.apache.lucene.spatial.query; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Shape; - import java.text.ParseException; import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.StringTokenizer; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Shape; /** - * Parses a string that usually looks like "OPERATION(SHAPE)" into a {@link SpatialArgs} - * object. The set of operations supported are defined in {@link SpatialOperation}, such - * as "Intersects" being a common one. The shape portion is defined by WKT {@link org.locationtech.spatial4j.io.WktShapeParser}, - * but it can be overridden/customized via {@link #parseShape(String, org.locationtech.spatial4j.context.SpatialContext)}. - * There are some optional name-value pair parameters that follow the closing parenthesis. Example: + * Parses a string that usually looks like "OPERATION(SHAPE)" into a {@link SpatialArgs} object. The + * set of operations supported are defined in {@link SpatialOperation}, such as "Intersects" being a + * common one. The shape portion is defined by WKT {@link + * org.locationtech.spatial4j.io.WktShapeParser}, but it can be overridden/customized via {@link + * #parseShape(String, org.locationtech.spatial4j.context.SpatialContext)}. There are some optional + * name-value pair parameters that follow the closing parenthesis. Example: + * *
      *   Intersects(ENVELOPE(-10,-8,22,20)) distErrPct=0.025
      * 
    - *

    - * In the future it would be good to support something at least semi-standardized like a - * variant of - * [E]CQL. + * + *

    In the future it would be good to support something at least semi-standardized like a variant + * of + * [E]CQL. * * @lucene.experimental */ @@ -54,9 +56,9 @@ public class SpatialArgsParser { str.append('('); str.append(args.getShape().toString()); if (args.getDistErrPct() != null) - str.append(" distErrPct=").append(String.format(Locale.ROOT, "%.2f%%", args.getDistErrPct() * 100d)); - if (args.getDistErr() != null) - str.append(" distErr=").append(args.getDistErr()); + str.append(" distErrPct=") + .append(String.format(Locale.ROOT, "%.2f%%", args.getDistErrPct() * 100d)); + if (args.getDistErr() != null) str.append(" distErr=").append(args.getDistErr()); str.append(')'); return str.toString(); } @@ -64,14 +66,16 @@ public class SpatialArgsParser { /** * Parses a string such as "Intersects(ENVELOPE(-10,-8,22,20)) distErrPct=0.025". * - * @param v The string to parse. Mandatory. + * @param v The string to parse. Mandatory. * @param ctx The spatial context. Mandatory. * @return Not null. - * @throws IllegalArgumentException if the parameters don't make sense or an add-on parameter is unknown + * @throws IllegalArgumentException if the parameters don't make sense or an add-on parameter is + * unknown * @throws ParseException If there is a problem parsing the string * @throws InvalidShapeException When the coordinates are invalid for the shape */ - public SpatialArgs parse(String v, SpatialContext ctx) throws ParseException, InvalidShapeException { + public SpatialArgs parse(String v, SpatialContext ctx) + throws ParseException, InvalidShapeException { int idx = v.indexOf('('); int edx = v.lastIndexOf(')'); @@ -113,7 +117,7 @@ public class SpatialArgsParser { } protected Shape parseShape(String str, SpatialContext ctx) throws ParseException { - //return ctx.readShape(str);//still in Spatial4j 0.4 but will be deleted + // return ctx.readShape(str);//still in Spatial4j 0.4 but will be deleted return ctx.readShapeFromWkt(str); } @@ -125,8 +129,10 @@ public class SpatialArgsParser { return v == null ? defaultValue : Boolean.parseBoolean(v); } - /** Parses "a=b zScaling=d f" (whitespace separated) into name-value pairs. If there - * is no '=' as in 'f' above then it's short for f=f. */ + /** + * Parses "a=b zScaling=d f" (whitespace separated) into name-value pairs. If there is no '=' as + * in 'f' above then it's short for f=f. + */ protected static Map parseMap(String body) { Map map = new HashMap<>(); StringTokenizer st = new StringTokenizer(body, " \n\t"); diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialOperation.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialOperation.java index 1eeb4bc08d3..b865ea2be24 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialOperation.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/SpatialOperation.java @@ -16,121 +16,135 @@ */ package org.apache.lucene.spatial.query; -import org.locationtech.spatial4j.shape.Rectangle; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.SpatialRelation; - import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.SpatialRelation; /** * A predicate that compares a stored geometry to a supplied geometry. It's enum-like. For more - * explanation of each predicate, consider looking at the source implementation - * of {@link #evaluate(org.locationtech.spatial4j.shape.Shape, org.locationtech.spatial4j.shape.Shape)}. It's important - * to be aware that Lucene-spatial makes no distinction of shape boundaries, unlike many standardized - * definitions. Nor does it make dimensional distinctions (e.g. line vs polygon). - * You can lookup a predicate by "Covers" or "Contains", for example, and you will get the - * same underlying predicate implementation. + * explanation of each predicate, consider looking at the source implementation of {@link + * #evaluate(org.locationtech.spatial4j.shape.Shape, org.locationtech.spatial4j.shape.Shape)}. It's + * important to be aware that Lucene-spatial makes no distinction of shape boundaries, unlike many + * standardized definitions. Nor does it make dimensional distinctions (e.g. line vs polygon). You + * can lookup a predicate by "Covers" or "Contains", for example, and you will get the same + * underlying predicate implementation. * * @see DE-9IM at Wikipedia, based on OGC specs * @see - * ESRIs docs on spatial relations - * + * ESRIs docs on spatial relations * @lucene.experimental */ public abstract class SpatialOperation implements Serializable { - //TODO rename to SpatialPredicate. Use enum? LUCENE-5771 + // TODO rename to SpatialPredicate. Use enum? LUCENE-5771 // Private registry - private static final Map registry = new HashMap<>();//has aliases + private static final Map registry = new HashMap<>(); // has aliases private static final List list = new ArrayList<>(); // Geometry Operations /** Bounding box of the *indexed* shape, then {@link #Intersects}. */ - public static final SpatialOperation BBoxIntersects = new SpatialOperation("BBoxIntersects") { - @Override - public boolean evaluate(Shape indexedShape, Shape queryShape) { - return indexedShape.getBoundingBox().relate(queryShape).intersects(); - } - }; + public static final SpatialOperation BBoxIntersects = + new SpatialOperation("BBoxIntersects") { + @Override + public boolean evaluate(Shape indexedShape, Shape queryShape) { + return indexedShape.getBoundingBox().relate(queryShape).intersects(); + } + }; /** Bounding box of the *indexed* shape, then {@link #IsWithin}. */ - public static final SpatialOperation BBoxWithin = new SpatialOperation("BBoxWithin") { - { - register("BBoxCoveredBy");//alias -- the better name - } - @Override - public boolean evaluate(Shape indexedShape, Shape queryShape) { - Rectangle bbox = indexedShape.getBoundingBox(); - return bbox.relate(queryShape) == SpatialRelation.WITHIN || bbox.equals(queryShape); - } - }; + public static final SpatialOperation BBoxWithin = + new SpatialOperation("BBoxWithin") { + { + register("BBoxCoveredBy"); // alias -- the better name + } + + @Override + public boolean evaluate(Shape indexedShape, Shape queryShape) { + Rectangle bbox = indexedShape.getBoundingBox(); + return bbox.relate(queryShape) == SpatialRelation.WITHIN || bbox.equals(queryShape); + } + }; /** Meets the "Covers" OGC definition (boundary-neutral). */ - public static final SpatialOperation Contains = new SpatialOperation("Contains") { - { - register("Covers");//alias -- the better name - } - @Override - public boolean evaluate(Shape indexedShape, Shape queryShape) { - return indexedShape.relate(queryShape) == SpatialRelation.CONTAINS || indexedShape.equals(queryShape); - } - }; + public static final SpatialOperation Contains = + new SpatialOperation("Contains") { + { + register("Covers"); // alias -- the better name + } + + @Override + public boolean evaluate(Shape indexedShape, Shape queryShape) { + return indexedShape.relate(queryShape) == SpatialRelation.CONTAINS + || indexedShape.equals(queryShape); + } + }; /** Meets the "Intersects" OGC definition. */ - public static final SpatialOperation Intersects = new SpatialOperation("Intersects") { - @Override - public boolean evaluate(Shape indexedShape, Shape queryShape) { - return indexedShape.relate(queryShape).intersects(); - } - }; + public static final SpatialOperation Intersects = + new SpatialOperation("Intersects") { + @Override + public boolean evaluate(Shape indexedShape, Shape queryShape) { + return indexedShape.relate(queryShape).intersects(); + } + }; /** Meets the "Equals" OGC definition. */ - public static final SpatialOperation IsEqualTo = new SpatialOperation("Equals") { - { - register("IsEqualTo");//alias (deprecated) - } - @Override - public boolean evaluate(Shape indexedShape, Shape queryShape) { - return indexedShape.equals(queryShape); - } - }; + public static final SpatialOperation IsEqualTo = + new SpatialOperation("Equals") { + { + register("IsEqualTo"); // alias (deprecated) + } + + @Override + public boolean evaluate(Shape indexedShape, Shape queryShape) { + return indexedShape.equals(queryShape); + } + }; /** Meets the "Disjoint" OGC definition. */ - public static final SpatialOperation IsDisjointTo = new SpatialOperation("Disjoint") { - { - register("IsDisjointTo");//alias (deprecated) - } - @Override - public boolean evaluate(Shape indexedShape, Shape queryShape) { - return ! indexedShape.relate(queryShape).intersects(); - } - }; + public static final SpatialOperation IsDisjointTo = + new SpatialOperation("Disjoint") { + { + register("IsDisjointTo"); // alias (deprecated) + } + + @Override + public boolean evaluate(Shape indexedShape, Shape queryShape) { + return !indexedShape.relate(queryShape).intersects(); + } + }; /** Meets the "CoveredBy" OGC definition (boundary-neutral). */ - public static final SpatialOperation IsWithin = new SpatialOperation("Within") { - { - register("IsWithin");//alias (deprecated) - register("CoveredBy");//alias -- the more appropriate name. - } - @Override - public boolean evaluate(Shape indexedShape, Shape queryShape) { - return indexedShape.relate(queryShape) == SpatialRelation.WITHIN || indexedShape.equals(queryShape); - } - }; + public static final SpatialOperation IsWithin = + new SpatialOperation("Within") { + { + register("IsWithin"); // alias (deprecated) + register("CoveredBy"); // alias -- the more appropriate name. + } + + @Override + public boolean evaluate(Shape indexedShape, Shape queryShape) { + return indexedShape.relate(queryShape) == SpatialRelation.WITHIN + || indexedShape.equals(queryShape); + } + }; /** Almost meets the "Overlaps" OGC definition, but boundary-neutral (boundary==interior). */ - public static final SpatialOperation Overlaps = new SpatialOperation("Overlaps") { - @Override - public boolean evaluate(Shape indexedShape, Shape queryShape) { - return indexedShape.relate(queryShape) == SpatialRelation.INTERSECTS;//not Contains or Within or Disjoint - } - }; + public static final SpatialOperation Overlaps = + new SpatialOperation("Overlaps") { + @Override + public boolean evaluate(Shape indexedShape, Shape queryShape) { + return indexedShape.relate(queryShape) + == SpatialRelation.INTERSECTS; // not Contains or Within or Disjoint + } + }; private final String name; protected SpatialOperation(String name) { this.name = name; register(name); - list.add( this ); + list.add(this); } protected void register(String name) { @@ -138,13 +152,13 @@ public abstract class SpatialOperation implements Serializable { registry.put(name.toUpperCase(Locale.ROOT), this); } - public static SpatialOperation get( String v ) { - SpatialOperation op = registry.get( v ); - if( op == null ) { + public static SpatialOperation get(String v) { + SpatialOperation op = registry.get(v); + if (op == null) { op = registry.get(v.toUpperCase(Locale.ROOT)); } - if( op == null ) { - throw new IllegalArgumentException("Unknown Operation: " + v ); + if (op == null) { + throw new IllegalArgumentException("Unknown Operation: " + v); } return op; } @@ -153,9 +167,9 @@ public abstract class SpatialOperation implements Serializable { return list; } - public static boolean is( SpatialOperation op, SpatialOperation ... tst ) { - for( SpatialOperation t : tst ) { - if( op == t ) { + public static boolean is(SpatialOperation op, SpatialOperation... tst) { + for (SpatialOperation t : tst) { + if (op == t) { return true; } } @@ -163,8 +177,8 @@ public abstract class SpatialOperation implements Serializable { } /** - * Returns whether the relationship between indexedShape and queryShape is - * satisfied by this operation. + * Returns whether the relationship between indexedShape and queryShape is satisfied by this + * operation. */ public abstract boolean evaluate(Shape indexedShape, Shape queryShape); diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/UnsupportedSpatialOperation.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/UnsupportedSpatialOperation.java index d6cb152b4c1..e6046ea662c 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/UnsupportedSpatialOperation.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/UnsupportedSpatialOperation.java @@ -17,7 +17,9 @@ package org.apache.lucene.spatial.query; /** - * Exception thrown when the {@link org.apache.lucene.spatial.SpatialStrategy} cannot implement the requested operation. + * Exception thrown when the {@link org.apache.lucene.spatial.SpatialStrategy} cannot implement the + * requested operation. + * * @lucene.experimental */ public class UnsupportedSpatialOperation extends UnsupportedOperationException { diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/package-info.java index a7946a4dc41..bd94c15a898 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/query/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Spatial Query options useful for client side requests - */ +/** Spatial Query options useful for client side requests */ package org.apache.lucene.spatial.query; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java index da70f0d39eb..b5d5fa5744a 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java @@ -22,7 +22,6 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.FilterOutputStream; import java.io.IOException; - import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Field; import org.apache.lucene.index.BinaryDocValues; @@ -51,81 +50,77 @@ import org.locationtech.spatial4j.io.BinaryCodec; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; - /** - * A SpatialStrategy based on serializing a Shape stored into BinaryDocValues. - * This is not at all fast; it's designed to be used in conjunction with another index based - * SpatialStrategy that is approximated (like {@link org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy}) - * to add precision or eventually make more specific / advanced calculations on the per-document - * geometry. + * A SpatialStrategy based on serializing a Shape stored into BinaryDocValues. This is not at all + * fast; it's designed to be used in conjunction with another index based SpatialStrategy that is + * approximated (like {@link org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy}) to add + * precision or eventually make more specific / advanced calculations on the per-document geometry. * The serialization uses Spatial4j's {@link org.locationtech.spatial4j.io.BinaryCodec}. * * @lucene.experimental */ public class SerializedDVStrategy extends SpatialStrategy { - /** - * A cache heuristic for the buf size based on the last shape size. - */ - //TODO do we make this non-volatile since it's merely a heuristic? - private volatile int indexLastBufSize = 8 * 1024;//8KB default on first run + /** A cache heuristic for the buf size based on the last shape size. */ + // TODO do we make this non-volatile since it's merely a heuristic? + private volatile int indexLastBufSize = 8 * 1024; // 8KB default on first run - /** - * Constructs the spatial strategy with its mandatory arguments. - */ + /** Constructs the spatial strategy with its mandatory arguments. */ public SerializedDVStrategy(SpatialContext ctx, String fieldName) { super(ctx, fieldName); } @Override public Field[] createIndexableFields(Shape shape) { - int bufSize = Math.max(128, (int) (this.indexLastBufSize * 1.5));//50% headroom over last + int bufSize = Math.max(128, (int) (this.indexLastBufSize * 1.5)); // 50% headroom over last ByteArrayOutputStream byteStream = new ByteArrayOutputStream(bufSize); - final BytesRef bytesRef = new BytesRef();//receiver of byteStream's bytes + final BytesRef bytesRef = new BytesRef(); // receiver of byteStream's bytes try { ctx.getBinaryCodec().writeShape(new DataOutputStream(byteStream), shape); - //this is a hack to avoid redundant byte array copying by byteStream.toByteArray() - byteStream.writeTo(new FilterOutputStream(null/*not used*/) { - @Override - public void write(byte[] b, int off, int len) throws IOException { - bytesRef.bytes = b; - bytesRef.offset = off; - bytesRef.length = len; - } - }); + // this is a hack to avoid redundant byte array copying by byteStream.toByteArray() + byteStream.writeTo( + new FilterOutputStream(null /*not used*/) { + @Override + public void write(byte[] b, int off, int len) throws IOException { + bytesRef.bytes = b; + bytesRef.offset = off; + bytesRef.length = len; + } + }); } catch (IOException e) { throw new RuntimeException(e); } - this.indexLastBufSize = bytesRef.length;//cache heuristic - return new Field[]{new BinaryDocValuesField(getFieldName(), bytesRef)}; + this.indexLastBufSize = bytesRef.length; // cache heuristic + return new Field[] {new BinaryDocValuesField(getFieldName(), bytesRef)}; } @Override public DoubleValuesSource makeDistanceValueSource(Point queryPoint, double multiplier) { - //TODO if makeShapeValueSource gets lifted to the top; this could become a generic impl. + // TODO if makeShapeValueSource gets lifted to the top; this could become a generic impl. return new DistanceToShapeValueSource(makeShapeValueSource(), queryPoint, multiplier, ctx); } /** - * Returns a Query that should be used in a random-access fashion. - * Use in another manner will be SLOW. + * Returns a Query that should be used in a random-access fashion. Use in another manner will be + * SLOW. */ @Override public Query makeQuery(SpatialArgs args) { ShapeValuesSource shapeValueSource = makeShapeValueSource(); - ShapeValuesPredicate predicateValueSource = new ShapeValuesPredicate(shapeValueSource, args.getOperation(), args.getShape()); + ShapeValuesPredicate predicateValueSource = + new ShapeValuesPredicate(shapeValueSource, args.getOperation(), args.getShape()); return new PredicateValueSourceQuery(predicateValueSource); } - /** - * Provides access to each shape per document - */ //TODO raise to SpatialStrategy + /** Provides access to each shape per document */ + // TODO raise to SpatialStrategy public ShapeValuesSource makeShapeValueSource() { return new ShapeDocValueSource(getFieldName(), ctx.getBinaryCodec()); } - /** Warning: don't iterate over the results of this query; it's designed for use in a random-access fashion - * by {@link TwoPhaseIterator}. + /** + * Warning: don't iterate over the results of this query; it's designed for use in a random-access + * fashion by {@link TwoPhaseIterator}. */ static class PredicateValueSourceQuery extends Query { private final ShapeValuesPredicate predicateValueSource; @@ -135,7 +130,8 @@ public class SerializedDVStrategy extends SpatialStrategy { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { @@ -148,7 +144,6 @@ public class SerializedDVStrategy extends SpatialStrategy { public boolean isCacheable(LeafReaderContext ctx) { return predicateValueSource.isCacheable(ctx); } - }; } @@ -159,8 +154,8 @@ public class SerializedDVStrategy extends SpatialStrategy { @Override public boolean equals(Object other) { - return sameClassAs(other) && - predicateValueSource.equals(((PredicateValueSourceQuery) other).predicateValueSource); + return sameClassAs(other) + && predicateValueSource.equals(((PredicateValueSourceQuery) other).predicateValueSource); } @Override @@ -170,20 +165,19 @@ public class SerializedDVStrategy extends SpatialStrategy { @Override public String toString(String field) { - return "PredicateValueSourceQuery(" + - predicateValueSource.toString() + - ")"; + return "PredicateValueSourceQuery(" + predicateValueSource.toString() + ")"; } - }//PredicateValueSourceQuery + } // PredicateValueSourceQuery /** * Implements a ShapeValueSource by deserializing a Shape from BinaryDocValues using BinaryCodec. + * * @see #makeShapeValueSource() */ static class ShapeDocValueSource extends ShapeValuesSource { private final String fieldName; - private final BinaryCodec binaryCodec;//spatial4j + private final BinaryCodec binaryCodec; // spatial4j private ShapeDocValueSource(String fieldName, BinaryCodec binaryCodec) { this.fieldName = fieldName; @@ -203,11 +197,11 @@ public class SerializedDVStrategy extends SpatialStrategy { @Override public Shape value() throws IOException { BytesRef bytesRef = docValues.binaryValue(); - DataInputStream dataInput - = new DataInputStream(new ByteArrayInputStream(bytesRef.bytes, bytesRef.offset, bytesRef.length)); + DataInputStream dataInput = + new DataInputStream( + new ByteArrayInputStream(bytesRef.bytes, bytesRef.offset, bytesRef.length)); return binaryCodec.readShape(dataInput); } - }; } @@ -238,5 +232,5 @@ public class SerializedDVStrategy extends SpatialStrategy { public String toString() { return "shapeDocVal(" + fieldName + ")"; } - }//ShapeDocValueSource + } // ShapeDocValueSource } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/package-info.java index 8f88a73cd8b..58b2a445ac2 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Strategies that serialize the shape (non-indexed). - */ -package org.apache.lucene.spatial.serialized; \ No newline at end of file +/** Strategies that serialize the shape (non-indexed). */ +package org.apache.lucene.spatial.serialized; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dBinaryCodec.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dBinaryCodec.java index 6754e746b70..2ccb5d3d9f0 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dBinaryCodec.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dBinaryCodec.java @@ -22,7 +22,6 @@ import java.io.DataOutput; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.lucene.spatial3d.geom.GeoAreaShape; import org.apache.lucene.spatial3d.geom.GeoBBox; import org.apache.lucene.spatial3d.geom.GeoCircle; @@ -54,12 +53,14 @@ public class Geo3dBinaryCodec extends BinaryCodec { @Override public Shape readShape(DataInput dataInput) throws IOException { - SerializableObject serializableObject = SerializableObject.readObject(planetModel, (InputStream) dataInput); + SerializableObject serializableObject = + SerializableObject.readObject(planetModel, (InputStream) dataInput); if (serializableObject instanceof GeoAreaShape) { GeoAreaShape shape = (GeoAreaShape) serializableObject; return new Geo3dShape<>(shape, ctx); } - throw new IllegalArgumentException("trying to read a not supported shape: " + serializableObject.getClass()); + throw new IllegalArgumentException( + "trying to read a not supported shape: " + serializableObject.getClass()); } @Override @@ -68,18 +69,21 @@ public class Geo3dBinaryCodec extends BinaryCodec { Geo3dShape geoAreaShape = (Geo3dShape) s; SerializableObject.writeObject((OutputStream) dataOutput, geoAreaShape.shape); } else { - throw new IllegalArgumentException("trying to write a not supported shape: " + s.getClass().getName()); + throw new IllegalArgumentException( + "trying to write a not supported shape: " + s.getClass().getName()); } } @Override public Point readPoint(DataInput dataInput) throws IOException { - SerializableObject serializableObject = SerializableObject.readObject(planetModel, (InputStream) dataInput); + SerializableObject serializableObject = + SerializableObject.readObject(planetModel, (InputStream) dataInput); if (serializableObject instanceof GeoPointShape) { GeoPointShape shape = (GeoPointShape) serializableObject; return new Geo3dPointShape(shape, ctx); } - throw new IllegalArgumentException("trying to read a not supported point shape: " + serializableObject.getClass()); + throw new IllegalArgumentException( + "trying to read a not supported point shape: " + serializableObject.getClass()); } @Override @@ -89,12 +93,14 @@ public class Geo3dBinaryCodec extends BinaryCodec { @Override public Rectangle readRect(DataInput dataInput) throws IOException { - SerializableObject serializableObject = SerializableObject.readObject(planetModel, (InputStream) dataInput); + SerializableObject serializableObject = + SerializableObject.readObject(planetModel, (InputStream) dataInput); if (serializableObject instanceof GeoBBox) { GeoBBox shape = (GeoBBox) serializableObject; return new Geo3dRectangleShape(shape, ctx); } - throw new IllegalArgumentException("trying to read a not supported rectangle shape: " + serializableObject.getClass()); + throw new IllegalArgumentException( + "trying to read a not supported rectangle shape: " + serializableObject.getClass()); } @Override @@ -104,12 +110,14 @@ public class Geo3dBinaryCodec extends BinaryCodec { @Override public Circle readCircle(DataInput dataInput) throws IOException { - SerializableObject serializableObject = SerializableObject.readObject(planetModel, (InputStream) dataInput); + SerializableObject serializableObject = + SerializableObject.readObject(planetModel, (InputStream) dataInput); if (serializableObject instanceof GeoCircle) { GeoCircle shape = (GeoCircle) serializableObject; return new Geo3dCircleShape(shape, ctx); } - throw new IllegalArgumentException("trying to read a not supported circle shape: " + serializableObject.getClass()); + throw new IllegalArgumentException( + "trying to read a not supported circle shape: " + serializableObject.getClass()); } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dCircleShape.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dCircleShape.java index d01e2b8e74d..f459138d63e 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dCircleShape.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dCircleShape.java @@ -38,10 +38,12 @@ public class Geo3dCircleShape extends Geo3dShape implements Circle { @Override public void reset(double x, double y, double radiusDEG) { - shape = GeoCircleFactory.makeGeoCircle(shape.getPlanetModel(), - y * DistanceUtils.DEGREES_TO_RADIANS, - x * DistanceUtils.DEGREES_TO_RADIANS, - radiusDEG * DistanceUtils.DEGREES_TO_RADIANS); + shape = + GeoCircleFactory.makeGeoCircle( + shape.getPlanetModel(), + y * DistanceUtils.DEGREES_TO_RADIANS, + x * DistanceUtils.DEGREES_TO_RADIANS, + radiusDEG * DistanceUtils.DEGREES_TO_RADIANS); center = null; boundingBox = null; } @@ -53,13 +55,15 @@ public class Geo3dCircleShape extends Geo3dShape implements Circle { @Override public Point getCenter() { - Point center = this.center;//volatile read once + Point center = this.center; // volatile read once if (center == null) { - center = new Geo3dPointShape( - GeoPointShapeFactory.makeGeoPointShape(shape.getPlanetModel(), - shape.getCenter().getLatitude(), - shape.getCenter().getLongitude()), - spatialcontext); + center = + new Geo3dPointShape( + GeoPointShapeFactory.makeGeoPointShape( + shape.getPlanetModel(), + shape.getCenter().getLatitude(), + shape.getCenter().getLongitude()), + spatialcontext); this.center = center; } return center; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dDistanceCalculator.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dDistanceCalculator.java index 8fdb4813dd5..461f05c095d 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dDistanceCalculator.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dDistanceCalculator.java @@ -45,7 +45,8 @@ public class Geo3dDistanceCalculator implements DistanceCalculator { if (from instanceof Geo3dPointShape && to instanceof Geo3dPointShape) { GeoPointShape pointShape1 = ((Geo3dPointShape) from).shape; GeoPointShape pointShape2 = ((Geo3dPointShape) to).shape; - return planetModel.surfaceDistance(pointShape1.getCenter(), pointShape2.getCenter()) * DistanceUtils.RADIANS_TO_DEGREES; + return planetModel.surfaceDistance(pointShape1.getCenter(), pointShape2.getCenter()) + * DistanceUtils.RADIANS_TO_DEGREES; } return distance(from, to.getX(), to.getY()); } @@ -56,13 +57,17 @@ public class Geo3dDistanceCalculator implements DistanceCalculator { if (from instanceof Geo3dPointShape) { fromGeoPoint = (((Geo3dPointShape) from).shape).getCenter(); } else { - fromGeoPoint = new GeoPoint(planetModel, - from.getY() * DistanceUtils.DEGREES_TO_RADIANS, - from.getX() * DistanceUtils.DEGREES_TO_RADIANS); + fromGeoPoint = + new GeoPoint( + planetModel, + from.getY() * DistanceUtils.DEGREES_TO_RADIANS, + from.getX() * DistanceUtils.DEGREES_TO_RADIANS); } - GeoPoint toGeoPoint = new GeoPoint(planetModel, - toY * DistanceUtils.DEGREES_TO_RADIANS, - toX * DistanceUtils.DEGREES_TO_RADIANS); + GeoPoint toGeoPoint = + new GeoPoint( + planetModel, + toY * DistanceUtils.DEGREES_TO_RADIANS, + toX * DistanceUtils.DEGREES_TO_RADIANS); return planetModel.surfaceDistance(fromGeoPoint, toGeoPoint) * DistanceUtils.RADIANS_TO_DEGREES; } @@ -72,7 +77,8 @@ public class Geo3dDistanceCalculator implements DistanceCalculator { } @Override - public Point pointOnBearing(Point from, double distDEG, double bearingDEG, SpatialContext ctx, Point reuse) { + public Point pointOnBearing( + Point from, double distDEG, double bearingDEG, SpatialContext ctx, Point reuse) { Geo3dPointShape geoFrom = (Geo3dPointShape) from; GeoPoint point = (GeoPoint) geoFrom.shape; double dist = DistanceUtils.DEGREES_TO_RADIANS * distDEG; @@ -83,14 +89,14 @@ public class Geo3dDistanceCalculator implements DistanceCalculator { if (reuse != null) { reuse.reset(newLon, newLat); return reuse; - } - else { + } else { return ctx.getShapeFactory().pointXY(newLon, newLat); } } @Override - public Rectangle calcBoxByDistFromPt(Point from, double distDEG, SpatialContext ctx, Rectangle reuse) { + public Rectangle calcBoxByDistFromPt( + Point from, double distDEG, SpatialContext ctx, Rectangle reuse) { Circle circle = ctx.getShapeFactory().circle(from, distDEG); return circle.getBoundingBox(); } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dPointShape.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dPointShape.java index c0d127df914..909c71bd118 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dPointShape.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dPointShape.java @@ -39,9 +39,11 @@ public class Geo3dPointShape extends Geo3dShape implements Point @Override public void reset(double x, double y) { - shape = GeoPointShapeFactory.makeGeoPointShape(shape.getPlanetModel(), - y * DistanceUtils.DEGREES_TO_RADIANS, - x * DistanceUtils.DEGREES_TO_RADIANS); + shape = + GeoPointShapeFactory.makeGeoPointShape( + shape.getPlanetModel(), + y * DistanceUtils.DEGREES_TO_RADIANS, + x * DistanceUtils.DEGREES_TO_RADIANS); center = this; boundingBox = null; } @@ -58,7 +60,7 @@ public class Geo3dPointShape extends Geo3dShape implements Point @Override public Rectangle getBoundingBox() { - Rectangle bbox = this.boundingBox;//volatile read once + Rectangle bbox = this.boundingBox; // volatile read once if (bbox == null) { bbox = new Geo3dRectangleShape(shape, spatialcontext); this.boundingBox = bbox; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dRectangleShape.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dRectangleShape.java index d354dcc698a..720c12c0d5c 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dRectangleShape.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dRectangleShape.java @@ -41,12 +41,13 @@ public class Geo3dRectangleShape extends Geo3dShape implements Rectangl private double minY; private double maxY; - public Geo3dRectangleShape(final GeoBBox shape, - final SpatialContext spatialcontext, - double minX, - double maxX, - double minY, - double maxY) { + public Geo3dRectangleShape( + final GeoBBox shape, + final SpatialContext spatialcontext, + double minX, + double maxX, + double minY, + double maxY) { super(shape, spatialcontext); this.minX = minX; this.maxX = maxX; @@ -59,29 +60,38 @@ public class Geo3dRectangleShape extends Geo3dShape implements Rectangl setBoundsFromshape(); } - - /** - * Set the bounds from the wrapped GeoBBox. - */ + /** Set the bounds from the wrapped GeoBBox. */ private void setBoundsFromshape() { LatLonBounds bounds = new LatLonBounds(); shape.getBounds(bounds); - minX = bounds.checkNoLongitudeBound() ? -180.0 : bounds.getLeftLongitude() * DistanceUtils.RADIANS_TO_DEGREES; - minY = bounds.checkNoBottomLatitudeBound() ? -90.0 : bounds.getMinLatitude() * DistanceUtils.RADIANS_TO_DEGREES; - maxX = bounds.checkNoLongitudeBound() ? 180.0 : bounds.getRightLongitude() * DistanceUtils.RADIANS_TO_DEGREES; - maxY = bounds.checkNoTopLatitudeBound() ? 90.0 : bounds.getMaxLatitude() * DistanceUtils.RADIANS_TO_DEGREES; + minX = + bounds.checkNoLongitudeBound() + ? -180.0 + : bounds.getLeftLongitude() * DistanceUtils.RADIANS_TO_DEGREES; + minY = + bounds.checkNoBottomLatitudeBound() + ? -90.0 + : bounds.getMinLatitude() * DistanceUtils.RADIANS_TO_DEGREES; + maxX = + bounds.checkNoLongitudeBound() + ? 180.0 + : bounds.getRightLongitude() * DistanceUtils.RADIANS_TO_DEGREES; + maxY = + bounds.checkNoTopLatitudeBound() + ? 90.0 + : bounds.getMaxLatitude() * DistanceUtils.RADIANS_TO_DEGREES; } @Override public Point getCenter() { - Point center = this.center;//volatile read once + Point center = this.center; // volatile read once if (center == null) { GeoPoint point = shape.getCenter(); - center = new Geo3dPointShape( - GeoPointShapeFactory.makeGeoPointShape(shape.getPlanetModel(), - point.getLatitude(), - point.getLongitude()), - spatialcontext); + center = + new Geo3dPointShape( + GeoPointShapeFactory.makeGeoPointShape( + shape.getPlanetModel(), point.getLatitude(), point.getLongitude()), + spatialcontext); this.center = center; } return center; @@ -89,11 +99,13 @@ public class Geo3dRectangleShape extends Geo3dShape implements Rectangl @Override public void reset(double minX, double maxX, double minY, double maxY) { - shape = GeoBBoxFactory.makeGeoBBox(shape.getPlanetModel(), - maxY * DistanceUtils.DEGREES_TO_RADIANS, - minY * DistanceUtils.DEGREES_TO_RADIANS, - minX * DistanceUtils.DEGREES_TO_RADIANS, - maxX * DistanceUtils.DEGREES_TO_RADIANS); + shape = + GeoBBoxFactory.makeGeoBBox( + shape.getPlanetModel(), + maxY * DistanceUtils.DEGREES_TO_RADIANS, + minY * DistanceUtils.DEGREES_TO_RADIANS, + minX * DistanceUtils.DEGREES_TO_RADIANS, + maxX * DistanceUtils.DEGREES_TO_RADIANS); center = null; boundingBox = null; } @@ -140,7 +152,6 @@ public class Geo3dRectangleShape extends Geo3dShape implements Rectangl @Override public boolean getCrossesDateLine() { return (getMaxX() > 0 && getMinX() < 0); - } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShape.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShape.java index 327ac8f581f..299a6cd1ace 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShape.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShape.java @@ -32,14 +32,12 @@ import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; /** - * A Spatial4j Shape wrapping a {@link GeoAreaShape} ("Geo3D") -- a 3D planar geometry - * based Spatial4j Shape implementation. - * Geo3D implements shapes on the surface of a sphere or ellipsoid. + * A Spatial4j Shape wrapping a {@link GeoAreaShape} ("Geo3D") -- a 3D planar geometry based + * Spatial4j Shape implementation. Geo3D implements shapes on the surface of a sphere or ellipsoid. * * @param is the type of {@link GeoAreaShape} * @lucene.experimental */ - public class Geo3dShape implements Shape { protected final SpatialContext spatialcontext; @@ -63,7 +61,8 @@ public class Geo3dShape implements Shape { } else if (other instanceof Point) { relationship = relate((Point) other); } else { - throw new RuntimeException("Unimplemented shape relationship determination: " + other.getClass()); + throw new RuntimeException( + "Unimplemented shape relationship determination: " + other.getClass()); } switch (relationship) { @@ -86,19 +85,23 @@ public class Geo3dShape implements Shape { private int relate(Rectangle r) { // Construct the right kind of GeoArea first - GeoArea geoArea = GeoAreaFactory.makeGeoArea(shape.getPlanetModel(), - r.getMaxY() * DistanceUtils.DEGREES_TO_RADIANS, - r.getMinY() * DistanceUtils.DEGREES_TO_RADIANS, - r.getMinX() * DistanceUtils.DEGREES_TO_RADIANS, - r.getMaxX() * DistanceUtils.DEGREES_TO_RADIANS); + GeoArea geoArea = + GeoAreaFactory.makeGeoArea( + shape.getPlanetModel(), + r.getMaxY() * DistanceUtils.DEGREES_TO_RADIANS, + r.getMinY() * DistanceUtils.DEGREES_TO_RADIANS, + r.getMinX() * DistanceUtils.DEGREES_TO_RADIANS, + r.getMaxX() * DistanceUtils.DEGREES_TO_RADIANS); return geoArea.getRelationship(shape); } private int relate(Point p) { - GeoPoint point = new GeoPoint(shape.getPlanetModel(), - p.getY() * DistanceUtils.DEGREES_TO_RADIANS, - p.getX() * DistanceUtils.DEGREES_TO_RADIANS); + GeoPoint point = + new GeoPoint( + shape.getPlanetModel(), + p.getY() * DistanceUtils.DEGREES_TO_RADIANS, + p.getX() * DistanceUtils.DEGREES_TO_RADIANS); if (shape.isWithin(point)) { return GeoArea.WITHIN; @@ -108,7 +111,7 @@ public class Geo3dShape implements Shape { @Override public Rectangle getBoundingBox() { - Rectangle bbox = this.boundingBox;//volatile read once + Rectangle bbox = this.boundingBox; // volatile read once if (bbox == null) { LatLonBounds bounds = new LatLonBounds(); shape.getBounds(bounds); @@ -131,7 +134,7 @@ public class Geo3dShape implements Shape { @Override public Point getCenter() { - Point center = this.center;//volatile read once + Point center = this.center; // volatile read once if (center == null) { center = getBoundingBox().getCenter(); this.center = center; @@ -161,8 +164,7 @@ public class Geo3dShape implements Shape { @Override public boolean equals(Object o) { - if (!(o instanceof Geo3dShape)) - return false; + if (!(o instanceof Geo3dShape)) return false; final Geo3dShape other = (Geo3dShape) o; return (other.spatialcontext.equals(spatialcontext) && other.shape.equals(shape)); } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java index ee30d62a291..75b7460b62a 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java @@ -17,12 +17,11 @@ package org.apache.lucene.spatial.spatial4j; -import java.util.ArrayList; -import java.util.List; - import com.google.common.geometry.S2Cell; import com.google.common.geometry.S2CellId; import com.google.common.geometry.S2Point; +import java.util.ArrayList; +import java.util.List; import org.apache.lucene.spatial.prefix.tree.S2ShapeFactory; import org.apache.lucene.spatial3d.geom.GeoBBox; import org.apache.lucene.spatial3d.geom.GeoBBoxFactory; @@ -60,10 +59,11 @@ public class Geo3dShapeFactory implements S2ShapeFactory { private PlanetModel planetModel; /** - * Default accuracy for circles when not using the unit sphere. - * It is equivalent to ~10m on the surface of the earth. + * Default accuracy for circles when not using the unit sphere. It is equivalent to ~10m on the + * surface of the earth. */ private static final double DEFAULT_CIRCLE_ACCURACY = 1e-4; + private double circleAccuracy = DEFAULT_CIRCLE_ACCURACY; @SuppressWarnings("unchecked") @@ -79,8 +79,8 @@ public class Geo3dShapeFactory implements S2ShapeFactory { } /** - * Set the accuracy for circles in decimal degrees. Note that accuracy has no effect - * when the planet model is a sphere. In that case, circles are always fully precise. + * Set the accuracy for circles in decimal degrees. Note that accuracy has no effect when the + * planet model is a sphere. In that case, circles are always fully precise. * * @param circleAccuracy the provided accuracy in decimal degrees. */ @@ -133,25 +133,26 @@ public class Geo3dShapeFactory implements S2ShapeFactory { } @Override - public void verifyZ(double v) { - } + public void verifyZ(double v) {} @Override public Point pointXY(double x, double y) { - GeoPointShape point = GeoPointShapeFactory.makeGeoPointShape(planetModel, - y * DistanceUtils.DEGREES_TO_RADIANS, - x * DistanceUtils.DEGREES_TO_RADIANS); + GeoPointShape point = + GeoPointShapeFactory.makeGeoPointShape( + planetModel, + y * DistanceUtils.DEGREES_TO_RADIANS, + x * DistanceUtils.DEGREES_TO_RADIANS); return new Geo3dPointShape(point, context); } @Override public Point pointXYZ(double x, double y, double z) { GeoPoint point = new GeoPoint(x, y, z); - GeoPointShape pointShape = GeoPointShapeFactory.makeGeoPointShape(planetModel, - point.getLatitude(), - point.getLongitude()); + GeoPointShape pointShape = + GeoPointShapeFactory.makeGeoPointShape( + planetModel, point.getLatitude(), point.getLongitude()); return new Geo3dPointShape(pointShape, context); - //throw new UnsupportedOperationException(); + // throw new UnsupportedOperationException(); } @Override @@ -161,11 +162,13 @@ public class Geo3dShapeFactory implements S2ShapeFactory { @Override public Rectangle rect(double minX, double maxX, double minY, double maxY) { - GeoBBox bBox = GeoBBoxFactory.makeGeoBBox(planetModel, - maxY * DistanceUtils.DEGREES_TO_RADIANS, - minY * DistanceUtils.DEGREES_TO_RADIANS, - minX * DistanceUtils.DEGREES_TO_RADIANS, - maxX * DistanceUtils.DEGREES_TO_RADIANS); + GeoBBox bBox = + GeoBBoxFactory.makeGeoBBox( + planetModel, + maxY * DistanceUtils.DEGREES_TO_RADIANS, + minY * DistanceUtils.DEGREES_TO_RADIANS, + minX * DistanceUtils.DEGREES_TO_RADIANS, + maxX * DistanceUtils.DEGREES_TO_RADIANS); return new Geo3dRectangleShape(bBox, context, minX, maxX, minY, maxY); } @@ -173,20 +176,22 @@ public class Geo3dShapeFactory implements S2ShapeFactory { public Circle circle(double x, double y, double distance) { GeoCircle circle; if (planetModel.isSphere()) { - circle = GeoCircleFactory.makeGeoCircle(planetModel, - y * DistanceUtils.DEGREES_TO_RADIANS, - x * DistanceUtils.DEGREES_TO_RADIANS, - distance * DistanceUtils.DEGREES_TO_RADIANS); - } - else { - //accuracy is defined as a linear distance in this class. At tiny distances, linear distance - //can be approximated to surface distance in radians. - circle = GeoCircleFactory.makeExactGeoCircle(planetModel, - y * DistanceUtils.DEGREES_TO_RADIANS, - x * DistanceUtils.DEGREES_TO_RADIANS, - distance * DistanceUtils.DEGREES_TO_RADIANS, - circleAccuracy * DistanceUtils.DEGREES_TO_RADIANS); - + circle = + GeoCircleFactory.makeGeoCircle( + planetModel, + y * DistanceUtils.DEGREES_TO_RADIANS, + x * DistanceUtils.DEGREES_TO_RADIANS, + distance * DistanceUtils.DEGREES_TO_RADIANS); + } else { + // accuracy is defined as a linear distance in this class. At tiny distances, linear distance + // can be approximated to surface distance in radians. + circle = + GeoCircleFactory.makeExactGeoCircle( + planetModel, + y * DistanceUtils.DEGREES_TO_RADIANS, + x * DistanceUtils.DEGREES_TO_RADIANS, + distance * DistanceUtils.DEGREES_TO_RADIANS, + circleAccuracy * DistanceUtils.DEGREES_TO_RADIANS); } return new Geo3dCircleShape(circle, context); } @@ -250,7 +255,8 @@ public class Geo3dShapeFactory implements S2ShapeFactory { GeoPoint point2 = getGeoPoint(cell.getVertexRaw(1)); GeoPoint point3 = getGeoPoint(cell.getVertexRaw(2)); GeoPoint point4 = getGeoPoint(cell.getVertexRaw(3)); - return new Geo3dShape<>(GeoS2ShapeFactory.makeGeoS2Shape(planetModel, point1, point2, point3, point4), context); + return new Geo3dShape<>( + GeoS2ShapeFactory.makeGeoS2Shape(planetModel, point1, point2, point3, point4), context); } private GeoPoint getGeoPoint(S2Point point) { @@ -258,8 +264,8 @@ public class Geo3dShapeFactory implements S2ShapeFactory { } /** - * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.PointsBuilder} interface to - * generate {@link GeoPoint}. + * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.PointsBuilder} + * interface to generate {@link GeoPoint}. * * @param is normally this object */ @@ -270,7 +276,11 @@ public class Geo3dShapeFactory implements S2ShapeFactory { @SuppressWarnings("unchecked") @Override public T pointXY(double x, double y) { - GeoPoint point = new GeoPoint(planetModel, y * DistanceUtils.DEGREES_TO_RADIANS, x * DistanceUtils.DEGREES_TO_RADIANS); + GeoPoint point = + new GeoPoint( + planetModel, + y * DistanceUtils.DEGREES_TO_RADIANS, + x * DistanceUtils.DEGREES_TO_RADIANS); points.add(point); return (T) this; } @@ -287,10 +297,11 @@ public class Geo3dShapeFactory implements S2ShapeFactory { } /** - * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.LineStringBuilder} to generate - * line strings. + * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.LineStringBuilder} + * to generate line strings. */ - private class Geo3dLineStringBuilder extends Geo3dPointBuilder implements LineStringBuilder { + private class Geo3dLineStringBuilder extends Geo3dPointBuilder + implements LineStringBuilder { double distance = 0; @@ -302,16 +313,19 @@ public class Geo3dShapeFactory implements S2ShapeFactory { @Override public Shape build() { - GeoPath path = GeoPathFactory.makeGeoPath(planetModel, distance, points.toArray(new GeoPoint[points.size()])); + GeoPath path = + GeoPathFactory.makeGeoPath( + planetModel, distance, points.toArray(new GeoPoint[points.size()])); return new Geo3dShape<>(path, context); } } /** - * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.PolygonBuilder} to generate - * polygons. + * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.PolygonBuilder} to + * generate polygons. */ - private class Geo3dPolygonBuilder extends Geo3dPointBuilder implements PolygonBuilder { + private class Geo3dPolygonBuilder extends Geo3dPointBuilder + implements PolygonBuilder { List polyHoles = new ArrayList<>(); @@ -320,7 +334,8 @@ public class Geo3dShapeFactory implements S2ShapeFactory { return new Geo3dHoleBuilder(); } - class Geo3dHoleBuilder extends Geo3dPointBuilder implements PolygonBuilder.HoleBuilder { + class Geo3dHoleBuilder extends Geo3dPointBuilder + implements PolygonBuilder.HoleBuilder { @Override public PolygonBuilder endHole() { polyHoles.add(new GeoPolygonFactory.PolygonDescription(points)); @@ -331,7 +346,8 @@ public class Geo3dShapeFactory implements S2ShapeFactory { @SuppressWarnings("unchecked") @Override public Shape build() { - GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points, polyHoles); + GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points, polyHoles); GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, description); if (polygon == null) { throw new InvalidShapeException("Invalid polygon, all points are coplanar"); @@ -345,13 +361,16 @@ public class Geo3dShapeFactory implements S2ShapeFactory { } } - private class Geo3dMultiPointBuilder extends Geo3dPointBuilder implements MultiPointBuilder { + private class Geo3dMultiPointBuilder extends Geo3dPointBuilder + implements MultiPointBuilder { @Override public Shape build() { GeoCompositeAreaShape areaShape = new GeoCompositeAreaShape(planetModel); for (GeoPoint point : points) { - GeoPointShape pointShape = GeoPointShapeFactory.makeGeoPointShape(planetModel, point.getLatitude(), point.getLongitude()); + GeoPointShape pointShape = + GeoPointShapeFactory.makeGeoPointShape( + planetModel, point.getLatitude(), point.getLongitude()); areaShape.addShape(pointShape); } return new Geo3dShape<>(areaShape, context); @@ -359,8 +378,8 @@ public class Geo3dShapeFactory implements S2ShapeFactory { } /** - * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.MultiLineStringBuilder} to generate - * multi-lines + * Geo3d implementation of {@link + * org.locationtech.spatial4j.shape.ShapeFactory.MultiLineStringBuilder} to generate multi-lines */ private class Geo3dMultiLineBuilder implements MultiLineStringBuilder { @@ -390,9 +409,9 @@ public class Geo3dShapeFactory implements S2ShapeFactory { } /** - * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.MultiPolygonBuilder} to generate - * multi-polygons. We have chosen to use a composite shape but - * it might be possible to use GeoComplexPolygon. + * Geo3d implementation of {@link + * org.locationtech.spatial4j.shape.ShapeFactory.MultiPolygonBuilder} to generate multi-polygons. + * We have chosen to use a composite shape but it might be possible to use GeoComplexPolygon. */ private class Geo3dMultiPolygonBuilder implements MultiPolygonBuilder { @@ -422,8 +441,8 @@ public class Geo3dShapeFactory implements S2ShapeFactory { } /** - * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.MultiShapeBuilder} to generate - * geometry collections. + * Geo3d implementation of {@link org.locationtech.spatial4j.shape.ShapeFactory.MultiShapeBuilder} + * to generate geometry collections. * * @param is the type of shapes. */ diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dSpatialContextFactory.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dSpatialContextFactory.java index 6124b90222c..9c6421abf66 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dSpatialContextFactory.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dSpatialContextFactory.java @@ -18,7 +18,6 @@ package org.apache.lucene.spatial.spatial4j; import java.util.Map; - import org.apache.lucene.spatial3d.geom.PlanetModel; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.context.SpatialContextFactory; @@ -30,19 +29,13 @@ import org.locationtech.spatial4j.context.SpatialContextFactory; */ public class Geo3dSpatialContextFactory extends SpatialContextFactory { - /** - * The default planet model - */ + /** The default planet model */ private static final PlanetModel DEFAULT_PLANET_MODEL = PlanetModel.SPHERE; - /** - * The planet model - */ + /** The planet model */ public PlanetModel planetModel; - /** - * Empty Constructor. - */ + /** Empty Constructor. */ public Geo3dSpatialContextFactory() { this.binaryCodecClass = Geo3dBinaryCodec.class; this.shapeFactoryClass = Geo3dShapeFactory.class; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/package-info.java index 7815318b530..c06ead93d64 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/package-info.java @@ -16,4 +16,4 @@ */ /** Spatial4j stuff that ideally belongs in Spatial4j (isn't related to Lucene). */ -package org.apache.lucene.spatial.spatial4j; \ No newline at end of file +package org.apache.lucene.spatial.spatial4j; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/CachingDoubleValueSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/CachingDoubleValueSource.java index 7a63f3444c6..8ee189bfa90 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/CachingDoubleValueSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/CachingDoubleValueSource.java @@ -19,7 +19,6 @@ package org.apache.lucene.spatial.util; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; @@ -27,8 +26,8 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; /** - * Caches the doubleVal of another value source in a HashMap - * so that it is computed only once. + * Caches the doubleVal of another value source in a HashMap so that it is computed only once. + * * @lucene.internal */ public class CachingDoubleValueSource extends DoubleValuesSource { @@ -43,11 +42,12 @@ public class CachingDoubleValueSource extends DoubleValuesSource { @Override public String toString() { - return "Cached["+source.toString()+"]"; + return "Cached[" + source.toString() + "]"; } @Override - public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) throws IOException { + public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) + throws IOException { final int base = readerContext.docBase; final DoubleValues vals = source.getValues(readerContext, scores); return new DoubleValues() { @@ -70,7 +70,6 @@ public class CachingDoubleValueSource extends DoubleValuesSource { } int doc = -1; - }; } @@ -85,7 +84,8 @@ public class CachingDoubleValueSource extends DoubleValuesSource { } @Override - public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException { + public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) + throws IOException { return source.explain(ctx, docId, scoreExplanation); } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/DistanceToShapeValueSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/DistanceToShapeValueSource.java index 12d49a8801c..bc55b71f071 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/DistanceToShapeValueSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/DistanceToShapeValueSource.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.util; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; @@ -41,11 +40,11 @@ public class DistanceToShapeValueSource extends DoubleValuesSource { private final double multiplier; private final DistanceCalculator distCalc; - //TODO if DoubleValues returns NaN; will things be ok? + // TODO if DoubleValues returns NaN; will things be ok? private final double nullValue; - public DistanceToShapeValueSource(ShapeValuesSource shapeValueSource, Point queryPoint, - double multiplier, SpatialContext ctx) { + public DistanceToShapeValueSource( + ShapeValuesSource shapeValueSource, Point queryPoint, double multiplier, SpatialContext ctx) { this.shapeValueSource = shapeValueSource; this.queryPoint = queryPoint; this.multiplier = multiplier; @@ -55,25 +54,34 @@ public class DistanceToShapeValueSource extends DoubleValuesSource { @Override public String toString() { - return "distance(" + queryPoint + " to " + shapeValueSource.toString() + ")*" + multiplier + ")"; + return "distance(" + + queryPoint + + " to " + + shapeValueSource.toString() + + ")*" + + multiplier + + ")"; } @Override - public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) throws IOException { + public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) + throws IOException { final ShapeValues shapeValues = shapeValueSource.getValues(readerContext); - return DoubleValues.withDefault(new DoubleValues() { - @Override - public double doubleValue() throws IOException { - return distCalc.distance(queryPoint, shapeValues.value().getCenter()) * multiplier; - } + return DoubleValues.withDefault( + new DoubleValues() { + @Override + public double doubleValue() throws IOException { + return distCalc.distance(queryPoint, shapeValues.value().getCenter()) * multiplier; + } - @Override - public boolean advanceExact(int doc) throws IOException { - return shapeValues.advanceExact(doc); - } - }, nullValue); + @Override + public boolean advanceExact(int doc) throws IOException { + return shapeValues.advanceExact(doc); + } + }, + nullValue); } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ReciprocalDoubleValuesSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ReciprocalDoubleValuesSource.java index 4475a42ae32..3a0206f99c3 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ReciprocalDoubleValuesSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ReciprocalDoubleValuesSource.java @@ -19,16 +19,13 @@ package org.apache.lucene.spatial.util; import java.io.IOException; import java.util.Objects; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; -/** - * Transforms a DoubleValuesSource using the formula v = k / (v + k) - */ +/** Transforms a DoubleValuesSource using the formula v = k / (v + k) */ public class ReciprocalDoubleValuesSource extends DoubleValuesSource { private final double distToEdge; @@ -36,8 +33,9 @@ public class ReciprocalDoubleValuesSource extends DoubleValuesSource { /** * Creates a ReciprocalDoubleValuesSource - * @param distToEdge the value k in v = k / (v + k) - * @param input the input DoubleValuesSource to transform + * + * @param distToEdge the value k in v = k / (v + k) + * @param input the input DoubleValuesSource to transform */ public ReciprocalDoubleValuesSource(double distToEdge, DoubleValuesSource input) { this.distToEdge = distToEdge; @@ -75,10 +73,13 @@ public class ReciprocalDoubleValuesSource extends DoubleValuesSource { } @Override - public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException { + public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) + throws IOException { Explanation expl = input.explain(ctx, docId, scoreExplanation); - return Explanation.match(recip(expl.getValue().doubleValue()), - distToEdge + " / (v + " + distToEdge + "), computed from:", expl); + return Explanation.match( + recip(expl.getValue().doubleValue()), + distToEdge + " / (v + " + distToEdge + "), computed from:", + expl); } @Override @@ -91,8 +92,7 @@ public class ReciprocalDoubleValuesSource extends DoubleValuesSource { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ReciprocalDoubleValuesSource that = (ReciprocalDoubleValuesSource) o; - return Double.compare(that.distToEdge, distToEdge) == 0 && - Objects.equals(input, that.input); + return Double.compare(that.distToEdge, distToEdge) == 0 && Objects.equals(input, that.input); } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeAreaValueSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeAreaValueSource.java index 3cac762e603..d499e777fa5 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeAreaValueSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeAreaValueSource.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.util; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; @@ -31,16 +30,17 @@ import org.locationtech.spatial4j.shape.Shape; * The area of a Shape retrieved from an ShapeValuesSource * * @see Shape#getArea(org.locationtech.spatial4j.context.SpatialContext) - * * @lucene.experimental */ public class ShapeAreaValueSource extends DoubleValuesSource { private final ShapeValuesSource shapeValueSource; - private final SpatialContext ctx;//not part of identity; should be associated with shapeValueSource indirectly + // not part of identity; should be associated with shapeValueSource indirectly + private final SpatialContext ctx; private final boolean geoArea; private double multiplier; - public ShapeAreaValueSource(ShapeValuesSource shapeValueSource, SpatialContext ctx, boolean geoArea, double multiplier) { + public ShapeAreaValueSource( + ShapeValuesSource shapeValueSource, SpatialContext ctx, boolean geoArea, double multiplier) { this.shapeValueSource = shapeValueSource; this.ctx = ctx; this.geoArea = geoArea; @@ -53,19 +53,22 @@ public class ShapeAreaValueSource extends DoubleValuesSource { } @Override - public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) throws IOException { + public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) + throws IOException { final ShapeValues shapeValues = shapeValueSource.getValues(readerContext); - return DoubleValues.withDefault(new DoubleValues() { - @Override - public double doubleValue() throws IOException { - return shapeValues.value().getArea(geoArea ? ctx : null) * multiplier; - } + return DoubleValues.withDefault( + new DoubleValues() { + @Override + public double doubleValue() throws IOException { + return shapeValues.value().getArea(geoArea ? ctx : null) * multiplier; + } - @Override - public boolean advanceExact(int doc) throws IOException { - return shapeValues.advanceExact(doc); - } - }, 0); + @Override + public boolean advanceExact(int doc) throws IOException { + return shapeValues.advanceExact(doc); + } + }, + 0); } @Override @@ -102,5 +105,4 @@ public class ShapeAreaValueSource extends DoubleValuesSource { result = 31 * result + (geoArea ? 1 : 0); return result; } - } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCache.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCache.java index e24cd683210..1d77b285d80 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCache.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCache.java @@ -16,17 +16,17 @@ */ package org.apache.lucene.spatial.util; -import org.locationtech.spatial4j.shape.Shape; - import java.util.ArrayList; import java.util.List; +import org.locationtech.spatial4j.shape.Shape; /** - * Bounded Cache of Shapes associated with docIds. Note, multiple Shapes can be - * associated with a given docId. - *

    - * WARNING: This class holds the data in an extremely inefficient manner as all Points are in memory as objects and they - * are stored in many ArrayLists (one per document). So it works but doesn't scale. It will be replaced in the future. + * Bounded Cache of Shapes associated with docIds. Note, multiple Shapes can be associated with a + * given docId. + * + *

    WARNING: This class holds the data in an extremely inefficient manner as all Points are in + * memory as objects and they are stored in many ArrayLists (one per document). So it works but + * doesn't scale. It will be replaced in the future. * * @lucene.internal */ @@ -35,20 +35,20 @@ public class ShapeFieldCache { public final int defaultLength; @SuppressWarnings({"unchecked", "rawtypes"}) - public ShapeFieldCache( int length, int defaultLength ) { + public ShapeFieldCache(int length, int defaultLength) { cache = new List[length]; - this.defaultLength= defaultLength; + this.defaultLength = defaultLength; } - public void add( int docid, T s ) { + public void add(int docid, T s) { List list = cache[docid]; - if( list == null ) { + if (list == null) { list = cache[docid] = new ArrayList<>(defaultLength); } - list.add( s ); + list.add(s); } - public List getShapes( int docid ) { + public List getShapes(int docid) { return cache[docid]; } } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheDistanceValueSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheDistanceValueSource.java index 66ac3436605..25dc26dada0 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheDistanceValueSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheDistanceValueSource.java @@ -18,7 +18,6 @@ package org.apache.lucene.spatial.util; import java.io.IOException; import java.util.List; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; @@ -28,10 +27,9 @@ import org.locationtech.spatial4j.distance.DistanceCalculator; import org.locationtech.spatial4j.shape.Point; /** - * A DoubleValuesSource that returns the spatial distance - * between an input point and a document's points in - * {@link ShapeFieldCacheProvider}. The shortest distance is returned if a - * document has more than one point. + * A DoubleValuesSource that returns the spatial distance between an input point and a document's + * points in {@link ShapeFieldCacheProvider}. The shortest distance is returned if a document has + * more than one point. * * @lucene.internal */ @@ -42,8 +40,8 @@ public class ShapeFieldCacheDistanceValueSource extends DoubleValuesSource { private final ShapeFieldCacheProvider provider; private final double multiplier; - public ShapeFieldCacheDistanceValueSource(SpatialContext ctx, - ShapeFieldCacheProvider provider, Point from, double multiplier) { + public ShapeFieldCacheDistanceValueSource( + SpatialContext ctx, ShapeFieldCacheProvider provider, Point from, double multiplier) { this.ctx = ctx; this.from = from; this.provider = provider; @@ -52,37 +50,39 @@ public class ShapeFieldCacheDistanceValueSource extends DoubleValuesSource { @Override public String toString() { - return getClass().getSimpleName()+"("+provider+", "+from+")"; + return getClass().getSimpleName() + "(" + provider + ", " + from + ")"; } @Override - public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) throws IOException { + public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) + throws IOException { final double nullValue = (ctx.isGeo() ? 180 * multiplier : Double.MAX_VALUE); - return DoubleValues.withDefault(new DoubleValues() { - private final ShapeFieldCache cache = - provider.getCache(readerContext.reader()); - private final Point from = ShapeFieldCacheDistanceValueSource.this.from; - private final DistanceCalculator calculator = ctx.getDistCalc(); + return DoubleValues.withDefault( + new DoubleValues() { + private final ShapeFieldCache cache = provider.getCache(readerContext.reader()); + private final Point from = ShapeFieldCacheDistanceValueSource.this.from; + private final DistanceCalculator calculator = ctx.getDistCalc(); - private List currentVals; + private List currentVals; - @Override - public double doubleValue() throws IOException { - double v = calculator.distance(from, currentVals.get(0)); - for (int i = 1; i < currentVals.size(); i++) { - v = Math.min(v, calculator.distance(from, currentVals.get(i))); - } - return v * multiplier; - } + @Override + public double doubleValue() throws IOException { + double v = calculator.distance(from, currentVals.get(0)); + for (int i = 1; i < currentVals.size(); i++) { + v = Math.min(v, calculator.distance(from, currentVals.get(i))); + } + return v * multiplier; + } - @Override - public boolean advanceExact(int doc) throws IOException { - currentVals = cache.getShapes(doc); - return currentVals != null; - } - }, nullValue); + @Override + public boolean advanceExact(int doc) throws IOException { + currentVals = cache.getShapes(doc); + return currentVals != null; + } + }, + nullValue); } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java index bca73ccf4dc..11c725554c9 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java @@ -16,21 +16,20 @@ */ package org.apache.lucene.spatial.util; -import org.locationtech.spatial4j.shape.Shape; -import org.apache.lucene.index.*; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.util.BytesRef; - import java.io.IOException; import java.util.WeakHashMap; import java.util.logging.Logger; +import org.apache.lucene.index.*; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.BytesRef; +import org.locationtech.spatial4j.shape.Shape; /** - * Provides access to a {@link ShapeFieldCache} for a given {@link org.apache.lucene.index.LeafReader}. + * Provides access to a {@link ShapeFieldCache} for a given {@link + * org.apache.lucene.index.LeafReader}. * - * If a Cache does not exist for the Reader, then it is built by iterating over - * the all terms for a given field, reconstructing the Shape from them, and adding - * them to the Cache. + *

    If a Cache does not exist for the Reader, then it is built by iterating over the all terms for + * a given field, reconstructing the Shape from them, and adding them to the Cache. * * @lucene.internal */ @@ -48,7 +47,7 @@ public abstract class ShapeFieldCacheProvider { this.defaultSize = defaultSize; } - protected abstract T readShape( BytesRef term ); + protected abstract T readShape(BytesRef term); public synchronized ShapeFieldCache getCache(LeafReader reader) throws IOException { ShapeFieldCache idx = sidx.get(reader); @@ -58,7 +57,7 @@ public abstract class ShapeFieldCacheProvider { long startTime = System.currentTimeMillis(); log.fine("Building Cache [" + reader.maxDoc() + "]"); - idx = new ShapeFieldCache<>(reader.maxDoc(),defaultSize); + idx = new ShapeFieldCache<>(reader.maxDoc(), defaultSize); int count = 0; PostingsEnum docs = null; Terms terms = reader.terms(shapeField); @@ -67,11 +66,11 @@ public abstract class ShapeFieldCacheProvider { BytesRef term = te.next(); while (term != null) { T shape = readShape(term); - if( shape != null ) { + if (shape != null) { docs = te.postings(docs, PostingsEnum.NONE); Integer docid = docs.nextDoc(); while (docid != DocIdSetIterator.NO_MORE_DOCS) { - idx.add( docid, shape ); + idx.add(docid, shape); docid = docs.nextDoc(); count++; } diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeValuesPredicate.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeValuesPredicate.java index 035c74e4bb1..a2c342afb31 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeValuesPredicate.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeValuesPredicate.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.util; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.SegmentCacheable; @@ -28,29 +27,29 @@ import org.apache.lucene.spatial.query.SpatialOperation; import org.locationtech.spatial4j.shape.Shape; /** - * Compares a shape from a provided {@link ShapeValuesSource} with a given Shape and sees - * if it matches a given {@link SpatialOperation} (the predicate). + * Compares a shape from a provided {@link ShapeValuesSource} with a given Shape and sees if it + * matches a given {@link SpatialOperation} (the predicate). * - * Consumers should call {@link #iterator(LeafReaderContext, DocIdSetIterator)} to obtain a - * {@link TwoPhaseIterator} over a particular {@link DocIdSetIterator}. The initial DocIdSetIterator + *

    Consumers should call {@link #iterator(LeafReaderContext, DocIdSetIterator)} to obtain a + * {@link TwoPhaseIterator} over a particular {@link DocIdSetIterator}. The initial DocIdSetIterator * will be used as the approximation, and the {@link SpatialOperation} comparison will only be * performed in {@link TwoPhaseIterator#matches()} * * @lucene.experimental */ public class ShapeValuesPredicate implements SegmentCacheable { - private final ShapeValuesSource shapeValuesource;//the left hand side + private final ShapeValuesSource shapeValuesource; // the left hand side private final SpatialOperation op; - private final Shape queryShape;//the right hand side (constant) + private final Shape queryShape; // the right hand side (constant) /** - * * @param shapeValuesource Must yield {@link Shape} instances from its objectVal(doc). If null - * then the result is false. This is the left-hand (indexed) side. + * then the result is false. This is the left-hand (indexed) side. * @param op the predicate * @param queryShape The shape on the right-hand (query) side. */ - public ShapeValuesPredicate(ShapeValuesSource shapeValuesource, SpatialOperation op, Shape queryShape) { + public ShapeValuesPredicate( + ShapeValuesSource shapeValuesource, SpatialOperation op, Shape queryShape) { this.shapeValuesource = shapeValuesource; this.op = op; this.queryShape = queryShape; @@ -61,12 +60,14 @@ public class ShapeValuesPredicate implements SegmentCacheable { return shapeValuesource + " " + op + " " + queryShape; } - public TwoPhaseIterator iterator(LeafReaderContext ctx, DocIdSetIterator approximation) throws IOException { + public TwoPhaseIterator iterator(LeafReaderContext ctx, DocIdSetIterator approximation) + throws IOException { final ShapeValues shapeValues = shapeValuesource.getValues(ctx); return new TwoPhaseIterator(approximation) { @Override public boolean matches() throws IOException { - return shapeValues.advanceExact(approximation.docID()) && op.evaluate(shapeValues.value(), queryShape); + return shapeValues.advanceExact(approximation.docID()) + && op.evaluate(shapeValues.value(), queryShape); } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/package-info.java index 21f90eba387..a167ede858f 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/package-info.java @@ -16,4 +16,4 @@ */ /** Advanced spatial utilities. */ -package org.apache.lucene.spatial.util; \ No newline at end of file +package org.apache.lucene.spatial.util; diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java index 80d61f9fa12..31cfa810ab6 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.vector; import java.io.IOException; - import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -40,9 +39,7 @@ public class DistanceValueSource extends DoubleValuesSource { private final double multiplier; private final double nullValue; - /** - * Constructor. - */ + /** Constructor. */ public DistanceValueSource(PointVectorStrategy strategy, Point from, double multiplier) { this.strategy = strategy; this.from = from; @@ -50,42 +47,40 @@ public class DistanceValueSource extends DoubleValuesSource { this.nullValue = 180 * multiplier; } - /** - * Returns the ValueSource description. - */ + /** Returns the ValueSource description. */ @Override public String toString() { - return "DistanceValueSource("+strategy+", "+from+")"; + return "DistanceValueSource(" + strategy + ", " + from + ")"; } - /** - * Returns the FunctionValues used by the function query. - */ + /** Returns the FunctionValues used by the function query. */ @Override - public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) throws IOException { + public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) + throws IOException { LeafReader reader = readerContext.reader(); final NumericDocValues ptX = DocValues.getNumeric(reader, strategy.getFieldNameX()); final NumericDocValues ptY = DocValues.getNumeric(reader, strategy.getFieldNameY()); - return DoubleValues.withDefault(new DoubleValues() { + return DoubleValues.withDefault( + new DoubleValues() { - private final Point from = DistanceValueSource.this.from; - private final DistanceCalculator calculator = strategy.getSpatialContext().getDistCalc(); + private final Point from = DistanceValueSource.this.from; + private final DistanceCalculator calculator = strategy.getSpatialContext().getDistCalc(); - @Override - public double doubleValue() throws IOException { - double x = Double.longBitsToDouble(ptX.longValue()); - double y = Double.longBitsToDouble(ptY.longValue()); - return calculator.distance(from, x, y) * multiplier; - } + @Override + public double doubleValue() throws IOException { + double x = Double.longBitsToDouble(ptX.longValue()); + double y = Double.longBitsToDouble(ptY.longValue()); + return calculator.distance(from, x, y) * multiplier; + } - @Override - public boolean advanceExact(int doc) throws IOException { - return ptX.advanceExact(doc) && ptY.advanceExact(doc); - } - - }, nullValue); + @Override + public boolean advanceExact(int doc) throws IOException { + return ptX.advanceExact(doc) && ptY.advanceExact(doc); + } + }, + nullValue); } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java index 845c4b4992b..376b39ecddf 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java @@ -18,7 +18,6 @@ package org.apache.lucene.spatial.vector; import java.io.IOException; import java.util.Objects; - import org.apache.lucene.document.DoubleDocValuesField; import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.Field; @@ -53,53 +52,47 @@ import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; /** - * Simple {@link SpatialStrategy} which represents Points in two numeric fields. - * The Strategy's best feature is decent distance sort. + * Simple {@link SpatialStrategy} which represents Points in two numeric fields. The Strategy's best + * feature is decent distance sort. + * + *

    Characteristics:
    * - *

    - * Characteristics: - *
    *

      - *
    • Only indexes points; just one per field value.
    • - *
    • Can query by a rectangle or circle.
    • - *
    • {@link - * org.apache.lucene.spatial.query.SpatialOperation#Intersects} and {@link - * SpatialOperation#IsWithin} is supported.
    • - *
    • Requires DocValues for - * {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point)} and for - * searching with a Circle.
    • + *
    • Only indexes points; just one per field value. + *
    • Can query by a rectangle or circle. + *
    • {@link org.apache.lucene.spatial.query.SpatialOperation#Intersects} and {@link + * SpatialOperation#IsWithin} is supported. + *
    • Requires DocValues for {@link + * #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point)} and for searching with a + * Circle. *
    * - *

    - * Implementation: - *

    - * This is a simple Strategy. Search works with a pair of range queries on two {@link DoublePoint}s representing - * x & y fields. A Circle query does the same bbox query but adds a - * ValueSource filter on - * {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point)}. - *

    - * One performance shortcoming with this strategy is that a scenario involving - * both a search using a Circle and sort will result in calculations for the - * spatial distance being done twice -- once for the filter and second for the - * sort. + *

    Implementation: + * + *

    This is a simple Strategy. Search works with a pair of range queries on two {@link + * DoublePoint}s representing x & y fields. A Circle query does the same bbox query but adds a + * ValueSource filter on {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point)}. + * + *

    One performance shortcoming with this strategy is that a scenario involving both a search + * using a Circle and sort will result in calculations for the spatial distance being done twice -- + * once for the filter and second for the sort. * * @lucene.experimental */ public class PointVectorStrategy extends SpatialStrategy { - // note: we use a FieldType to articulate the options we want on the field. We don't use it as-is with a Field, we + // note: we use a FieldType to articulate the options we want on the field. We don't use it as-is + // with a Field, we // create more than one Field. - /** - * pointValues, docValues, and nothing else. - */ + /** pointValues, docValues, and nothing else. */ public static FieldType DEFAULT_FIELDTYPE; static { // Default: pointValues + docValues FieldType type = new FieldType(); - type.setDimensions(1, Double.BYTES);//pointValues (assume Double) - type.setDocValuesType(DocValuesType.NUMERIC);//docValues + type.setDimensions(1, Double.BYTES); // pointValues (assume Double) + type.setDocValuesType(DocValuesType.NUMERIC); // docValues type.setStored(false); type.freeze(); DEFAULT_FIELDTYPE = type; @@ -117,21 +110,22 @@ public class PointVectorStrategy extends SpatialStrategy { private final boolean hasPointVals; /** - * Create a new {@link PointVectorStrategy} instance that uses {@link DoublePoint} and {@link DoublePoint#newRangeQuery} + * Create a new {@link PointVectorStrategy} instance that uses {@link DoublePoint} and {@link + * DoublePoint#newRangeQuery} */ public static PointVectorStrategy newInstance(SpatialContext ctx, String fieldNamePrefix) { return new PointVectorStrategy(ctx, fieldNamePrefix, DEFAULT_FIELDTYPE); } /** - * Create a new instance configured with the provided FieldType options. See {@link #DEFAULT_FIELDTYPE}. - * a field type is used to articulate the desired options (namely pointValues, docValues, stored). Legacy numerics - * is configurable this way too. + * Create a new instance configured with the provided FieldType options. See {@link + * #DEFAULT_FIELDTYPE}. a field type is used to articulate the desired options (namely + * pointValues, docValues, stored). Legacy numerics is configurable this way too. */ public PointVectorStrategy(SpatialContext ctx, String fieldNamePrefix, FieldType fieldType) { super(ctx, fieldNamePrefix); - this.fieldNameX = fieldNamePrefix+SUFFIX_X; - this.fieldNameY = fieldNamePrefix+SUFFIX_Y; + this.fieldNameX = fieldNamePrefix + SUFFIX_X; + this.fieldNameY = fieldNamePrefix + SUFFIX_Y; int numPairs = 0; if ((this.hasStored = fieldType.stored())) { @@ -146,7 +140,6 @@ public class PointVectorStrategy extends SpatialStrategy { this.fieldsLen = numPairs * 2; } - String getFieldNameX() { return fieldNameX; } @@ -157,8 +150,7 @@ public class PointVectorStrategy extends SpatialStrategy { @Override public Field[] createIndexableFields(Shape shape) { - if (shape instanceof Point) - return createIndexableFields((Point) shape); + if (shape instanceof Point) return createIndexableFields((Point) shape); throw new UnsupportedOperationException("Can only index Point, not " + shape); } @@ -189,35 +181,36 @@ public class PointVectorStrategy extends SpatialStrategy { @Override public Query makeQuery(SpatialArgs args) { - if(! SpatialOperation.is( args.getOperation(), - SpatialOperation.Intersects, - SpatialOperation.IsWithin )) + if (!SpatialOperation.is( + args.getOperation(), SpatialOperation.Intersects, SpatialOperation.IsWithin)) throw new UnsupportedSpatialOperation(args.getOperation()); Shape shape = args.getShape(); if (shape instanceof Rectangle) { Rectangle bbox = (Rectangle) shape; return new ConstantScoreQuery(makeWithin(bbox)); } else if (shape instanceof Circle) { - Circle circle = (Circle)shape; + Circle circle = (Circle) shape; Rectangle bbox = circle.getBoundingBox(); - return new DistanceRangeQuery(makeWithin(bbox), makeDistanceValueSource(circle.getCenter()), circle.getRadius()); + return new DistanceRangeQuery( + makeWithin(bbox), makeDistanceValueSource(circle.getCenter()), circle.getRadius()); } else { - throw new UnsupportedOperationException("Only Rectangles and Circles are currently supported, " + - "found [" + shape.getClass() + "]");//TODO + throw new UnsupportedOperationException( + "Only Rectangles and Circles are currently supported, " + + "found [" + + shape.getClass() + + "]"); // TODO } } - /** - * Constructs a query to retrieve documents that fully contain the input envelope. - */ + /** Constructs a query to retrieve documents that fully contain the input envelope. */ private Query makeWithin(Rectangle bbox) { BooleanQuery.Builder bq = new BooleanQuery.Builder(); BooleanClause.Occur MUST = BooleanClause.Occur.MUST; if (bbox.getCrossesDateLine()) { - //use null as performance trick since no data will be beyond the world bounds - bq.add(rangeQuery(fieldNameX, null/*-180*/, bbox.getMaxX()), BooleanClause.Occur.SHOULD ); - bq.add(rangeQuery(fieldNameX, bbox.getMinX(), null/*+180*/), BooleanClause.Occur.SHOULD ); - bq.setMinimumNumberShouldMatch(1);//must match at least one of the SHOULD + // use null as performance trick since no data will be beyond the world bounds + bq.add(rangeQuery(fieldNameX, null /*-180*/, bbox.getMaxX()), BooleanClause.Occur.SHOULD); + bq.add(rangeQuery(fieldNameX, bbox.getMinX(), null /*+180*/), BooleanClause.Occur.SHOULD); + bq.setMinimumNumberShouldMatch(1); // must match at least one of the SHOULD } else { bq.add(rangeQuery(fieldNameX, bbox.getMinX(), bbox.getMaxX()), MUST); } @@ -226,8 +219,8 @@ public class PointVectorStrategy extends SpatialStrategy { } /** - * Returns a numeric range query based on FieldType - * {@link DoublePoint#newRangeQuery} is used for indexes created using {@link DoublePoint} fields + * Returns a numeric range query based on FieldType {@link DoublePoint#newRangeQuery} is used for + * indexes created using {@link DoublePoint} fields */ private Query rangeQuery(String fieldName, Double min, Double max) { if (hasPointVals) { @@ -240,9 +233,8 @@ public class PointVectorStrategy extends SpatialStrategy { } return DoublePoint.newRangeQuery(fieldName, min, max); - } - //TODO try doc-value range query? + // TODO try doc-value range query? throw new UnsupportedOperationException("An index is required for this operation."); } @@ -261,8 +253,7 @@ public class PointVectorStrategy extends SpatialStrategy { @Override public Query rewrite(IndexReader reader) throws IOException { Query rewritten = inner.rewrite(reader); - if (rewritten == inner) - return this; + if (rewritten == inner) return this; return new DistanceRangeQuery(rewritten, distanceSource, limit); } @@ -272,27 +263,28 @@ public class PointVectorStrategy extends SpatialStrategy { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { Weight w = inner.createWeight(searcher, scoreMode, 1f); return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { Scorer in = w.scorer(context); - if (in == null) - return null; + if (in == null) return null; DoubleValues v = distanceSource.getValues(context, DoubleValuesSource.fromScorer(in)); DocIdSetIterator approximation = in.iterator(); - TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { - @Override - public boolean matches() throws IOException { - return v.advanceExact(approximation.docID()) && v.doubleValue() <= limit; - } + TwoPhaseIterator twoPhase = + new TwoPhaseIterator(approximation) { + @Override + public boolean matches() throws IOException { + return v.advanceExact(approximation.docID()) && v.doubleValue() <= limit; + } - @Override - public float matchCost() { - return 100; // distance calculation can be heavy! - } - }; + @Override + public float matchCost() { + return 100; // distance calculation can be heavy! + } + }; return new ConstantScoreScorer(this, score(), scoreMode, twoPhase); } @@ -300,13 +292,18 @@ public class PointVectorStrategy extends SpatialStrategy { public boolean isCacheable(LeafReaderContext ctx) { return distanceSource.isCacheable(ctx); } - }; } @Override public String toString(String field) { - return "DistanceRangeQuery(" + inner.toString(field) + "; " + distanceSource.toString() + " < " + limit + ")"; + return "DistanceRangeQuery(" + + inner.toString(field) + + "; " + + distanceSource.toString() + + " < " + + limit + + ")"; } @Override @@ -314,8 +311,9 @@ public class PointVectorStrategy extends SpatialStrategy { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DistanceRangeQuery that = (DistanceRangeQuery) o; - return Objects.equals(inner, that.inner) && - Objects.equals(distanceSource, that.distanceSource) && limit == that.limit; + return Objects.equals(inner, that.inner) + && Objects.equals(distanceSource, that.distanceSource) + && limit == that.limit; } @Override diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/package-info.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/package-info.java index f8dffe25dd1..993e6b3f07f 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/package-info.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Spatial strategy that uses two fields. - */ +/** Spatial strategy that uses two fields. */ package org.apache.lucene.spatial.vector; diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialExample.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialExample.java index f01c8199a39..f406a7b4946 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialExample.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialExample.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial; import java.io.IOException; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; @@ -42,20 +41,16 @@ import org.apache.lucene.spatial.query.SpatialArgsParser; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; - import org.apache.lucene.util.LuceneTestCase; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.distance.DistanceUtils; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; -/** - * This class serves as example code to show how to use the Lucene spatial - * module. - */ +/** This class serves as example code to show how to use the Lucene spatial module. */ public class SpatialExample extends LuceneTestCase { - //Note: Test invoked via TestTestFramework.spatialExample() + // Note: Test invoked via TestTestFramework.spatialExample() public static void main(String[] args) throws Exception { new SpatialExample().test(); @@ -68,32 +63,31 @@ public class SpatialExample extends LuceneTestCase { } /** - * The Spatial4j {@link SpatialContext} is a sort of global-ish singleton - * needed by Lucene spatial. It's a facade to the rest of Spatial4j, acting - * as a factory for {@link Shape}s and provides access to reading and writing - * them from Strings. + * The Spatial4j {@link SpatialContext} is a sort of global-ish singleton needed by Lucene + * spatial. It's a facade to the rest of Spatial4j, acting as a factory for {@link Shape}s and + * provides access to reading and writing them from Strings. */ - private SpatialContext ctx;//"ctx" is the conventional variable name + private SpatialContext ctx; // "ctx" is the conventional variable name /** - * The Lucene spatial {@link SpatialStrategy} encapsulates an approach to - * indexing and searching shapes, and providing distance values for them. - * It's a simple API to unify different approaches. You might use more than - * one strategy for a shape as each strategy has its strengths and weaknesses. - *

    - * Note that these are initialized with a field name. + * The Lucene spatial {@link SpatialStrategy} encapsulates an approach to indexing and searching + * shapes, and providing distance values for them. It's a simple API to unify different + * approaches. You might use more than one strategy for a shape as each strategy has its strengths + * and weaknesses. + * + *

    Note that these are initialized with a field name. */ private SpatialStrategy strategy; private Directory directory; protected void init() { - //Typical geospatial context + // Typical geospatial context // These can also be constructed from SpatialContextFactory this.ctx = SpatialContext.GEO; - int maxLevels = 11;//results in sub-meter precision for geohash - //TODO demo lookup by detail distance + int maxLevels = 11; // results in sub-meter precision for geohash + // TODO demo lookup by detail distance // This can also be constructed from SpatialPrefixTreeFactory SpatialPrefixTree grid = new GeohashPrefixTree(ctx, maxLevels); @@ -106,16 +100,16 @@ public class SpatialExample extends LuceneTestCase { IndexWriterConfig iwConfig = new IndexWriterConfig(null); IndexWriter indexWriter = new IndexWriter(directory, iwConfig); - //Spatial4j is x-y order for arguments - indexWriter.addDocument(newSampleDocument( - 2, ctx.getShapeFactory().pointXY(-80.93, 33.77))); + // Spatial4j is x-y order for arguments + indexWriter.addDocument(newSampleDocument(2, ctx.getShapeFactory().pointXY(-80.93, 33.77))); - //Spatial4j has a WKT parser which is also "x y" order - indexWriter.addDocument(newSampleDocument( - 4, ctx.readShapeFromWkt("POINT(60.9289094 -50.7693246)"))); + // Spatial4j has a WKT parser which is also "x y" order + indexWriter.addDocument( + newSampleDocument(4, ctx.readShapeFromWkt("POINT(60.9289094 -50.7693246)"))); - indexWriter.addDocument(newSampleDocument( - 20, ctx.getShapeFactory().pointXY(0.1,0.1), ctx.getShapeFactory().pointXY(0, 0))); + indexWriter.addDocument( + newSampleDocument( + 20, ctx.getShapeFactory().pointXY(0.1, 0.1), ctx.getShapeFactory().pointXY(0, 0))); indexWriter.close(); } @@ -124,16 +118,16 @@ public class SpatialExample extends LuceneTestCase { Document doc = new Document(); doc.add(new StoredField("id", id)); doc.add(new NumericDocValuesField("id", id)); - //Potentially more than one shape in this field is supported by some + // Potentially more than one shape in this field is supported by some // strategies; see the javadocs of the SpatialStrategy impl to see. for (Shape shape : shapes) { for (Field f : strategy.createIndexableFields(shape)) { doc.add(f); } - //store it too; the format is up to you + // store it too; the format is up to you // (assume point in this example) Point pt = (Point) shape; - doc.add(new StoredField(strategy.getFieldName(), pt.getX()+" "+pt.getY())); + doc.add(new StoredField(strategy.getFieldName(), pt.getX() + " " + pt.getY())); } return doc; @@ -144,60 +138,70 @@ public class SpatialExample extends LuceneTestCase { IndexSearcher indexSearcher = new IndexSearcher(indexReader); Sort idSort = new Sort(new SortField("id", SortField.Type.INT)); - //--Filter by circle (<= distance from a point) + // --Filter by circle (<= distance from a point) { - //Search with circle - //note: SpatialArgs can be parsed from a string - SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, - ctx.getShapeFactory().circle(-80.0, 33.0, DistanceUtils.dist2Degrees(200, DistanceUtils.EARTH_MEAN_RADIUS_KM))); + // Search with circle + // note: SpatialArgs can be parsed from a string + SpatialArgs args = + new SpatialArgs( + SpatialOperation.Intersects, + ctx.getShapeFactory() + .circle( + -80.0, + 33.0, + DistanceUtils.dist2Degrees(200, DistanceUtils.EARTH_MEAN_RADIUS_KM))); Query query = strategy.makeQuery(args); TopDocs docs = indexSearcher.search(query, 10, idSort); assertDocMatchedIds(indexSearcher, docs, 2); - //Now, lets get the distance for the 1st doc via computing from stored point value: + // Now, lets get the distance for the 1st doc via computing from stored point value: // (this computation is usually not redundant) Document doc1 = indexSearcher.doc(docs.scoreDocs[0].doc); String doc1Str = doc1.getField(strategy.getFieldName()).stringValue(); - //assume doc1Str is "x y" as written in newSampleDocument() + // assume doc1Str is "x y" as written in newSampleDocument() int spaceIdx = doc1Str.indexOf(' '); double x = Double.parseDouble(doc1Str.substring(0, spaceIdx)); - double y = Double.parseDouble(doc1Str.substring(spaceIdx+1)); + double y = Double.parseDouble(doc1Str.substring(spaceIdx + 1)); double doc1DistDEG = ctx.calcDistance(args.getShape().getCenter(), x, y); - assertEquals(121.6d, DistanceUtils.degrees2Dist(doc1DistDEG, DistanceUtils.EARTH_MEAN_RADIUS_KM), 0.1); - //or more simply: + assertEquals( + 121.6d, DistanceUtils.degrees2Dist(doc1DistDEG, DistanceUtils.EARTH_MEAN_RADIUS_KM), 0.1); + // or more simply: assertEquals(121.6d, doc1DistDEG * DistanceUtils.DEG_TO_KM, 0.1); } - //--Match all, order by distance ascending + // --Match all, order by distance ascending { Point pt = ctx.getShapeFactory().pointXY(60, -50); - DoubleValuesSource valueSource = strategy.makeDistanceValueSource(pt, DistanceUtils.DEG_TO_KM);//the distance (in km) - Sort distSort = new Sort(valueSource.getSortField(false)).rewrite(indexSearcher);//false=asc dist + DoubleValuesSource valueSource = + strategy.makeDistanceValueSource(pt, DistanceUtils.DEG_TO_KM); // the distance (in km) + Sort distSort = + new Sort(valueSource.getSortField(false)).rewrite(indexSearcher); // false=asc dist TopDocs docs = indexSearcher.search(new MatchAllDocsQuery(), 10, distSort); assertDocMatchedIds(indexSearcher, docs, 4, 20, 2); - //To get the distance, we could compute from stored values like earlier. + // To get the distance, we could compute from stored values like earlier. // However in this example we sorted on it, and the distance will get // computed redundantly. If the distance is only needed for the top-X // search results then that's not a big deal. Alternatively, try wrapping // the ValueSource with CachingDoubleValueSource then retrieve the value // from the ValueSource now. See LUCENE-4541 for an example. } - //demo arg parsing + // demo arg parsing { - SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, - ctx.getShapeFactory().circle(-80.0, 33.0, 1)); + SpatialArgs args = + new SpatialArgs( + SpatialOperation.Intersects, ctx.getShapeFactory().circle(-80.0, 33.0, 1)); SpatialArgs args2 = new SpatialArgsParser().parse("Intersects(BUFFER(POINT(-80 33),1))", ctx); - assertEquals(args.toString(),args2.toString()); + assertEquals(args.toString(), args2.toString()); } indexReader.close(); } - private void assertDocMatchedIds(IndexSearcher indexSearcher, TopDocs docs, int... ids) throws IOException { + private void assertDocMatchedIds(IndexSearcher indexSearcher, TopDocs docs, int... ids) + throws IOException { assert docs.totalHits.relation == Relation.EQUAL_TO; int[] gotIds = new int[Math.toIntExact(docs.totalHits.value)]; for (int i = 0; i < gotIds.length; i++) { gotIds[i] = indexSearcher.doc(docs.scoreDocs[i].doc).getField("id").numericValue().intValue(); } - assertArrayEquals(ids,gotIds); + assertArrayEquals(ids, gotIds); } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialMatchConcern.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialMatchConcern.java index e995ee18987..336a4c34fe3 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialMatchConcern.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialMatchConcern.java @@ -18,14 +18,15 @@ package org.apache.lucene.spatial; public class SpatialMatchConcern { public final boolean orderIsImportant; - public final boolean resultsAreSuperset; // if the strategy can not give exact answers, but used to limit results + public final boolean + resultsAreSuperset; // if the strategy can not give exact answers, but used to limit results - private SpatialMatchConcern( boolean order, boolean superset ) { + private SpatialMatchConcern(boolean order, boolean superset) { this.orderIsImportant = order; this.resultsAreSuperset = superset; } - public static final SpatialMatchConcern EXACT = new SpatialMatchConcern( true, false ); - public static final SpatialMatchConcern FILTER = new SpatialMatchConcern( false, false ); - public static final SpatialMatchConcern SUPERSET = new SpatialMatchConcern( false, true ); + public static final SpatialMatchConcern EXACT = new SpatialMatchConcern(true, false); + public static final SpatialMatchConcern FILTER = new SpatialMatchConcern(false, false); + public static final SpatialMatchConcern SUPERSET = new SpatialMatchConcern(false, true); } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java index 3d33fc38ff3..3a3e69a0b6f 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java @@ -16,11 +16,12 @@ */ package org.apache.lucene.spatial; +import static com.carrotsearch.randomizedtesting.RandomizedTest.*; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.logging.Logger; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -39,10 +40,6 @@ import org.locationtech.spatial4j.distance.DistanceUtils; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Rectangle; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomGaussian; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; - /** A base test class for spatial lucene. It's mostly Lucene generic. */ @SuppressSysoutChecks(bugUrl = "These tests use JUL extensively.") public abstract class SpatialTestCase extends LuceneTestCase { @@ -55,14 +52,16 @@ public abstract class SpatialTestCase extends LuceneTestCase { private Analyzer analyzer; protected IndexSearcher indexSearcher; - protected SpatialContext ctx;//subclass must initialize + protected SpatialContext ctx; // subclass must initialize @Override public void setUp() throws Exception { super.setUp(); directory = newDirectory(); analyzer = new MockAnalyzer(random()); - indexWriter = new RandomIndexWriter(random(), directory, LuceneTestCase.newIndexWriterConfig(random(), analyzer)); + indexWriter = + new RandomIndexWriter( + random(), directory, LuceneTestCase.newIndexWriterConfig(random(), analyzer)); indexReader = indexWriter.getReader(); indexSearcher = newSearcher(indexReader); } @@ -118,9 +117,10 @@ public abstract class SpatialTestCase extends LuceneTestCase { protected Point randomPoint() { final Rectangle WB = ctx.getWorldBounds(); - return ctx.getShapeFactory().pointXY( - randomIntBetween((int) WB.getMinX(), (int) WB.getMaxX()), - randomIntBetween((int) WB.getMinY(), (int) WB.getMaxY())); + return ctx.getShapeFactory() + .pointXY( + randomIntBetween((int) WB.getMinX(), (int) WB.getMaxX()), + randomIntBetween((int) WB.getMinY(), (int) WB.getMaxY())); } protected Rectangle randomRectangle() { @@ -152,11 +152,12 @@ public abstract class SpatialTestCase extends LuceneTestCase { int intBoundLen = intBoundEnd - intBoundStart; int newLen = (int) randomGaussianMeanMax(intBoundLen / 16.0, intBoundLen); int newStart = intBoundStart + randomIntBetween(0, intBoundLen - newLen); - return new double[]{newStart, newLen}; + return new double[] {newStart, newLen}; } else { // (no int rounding) double newLen = randomGaussianMeanMax(boundLen / 16, boundLen); - double newStart = boundStart + (boundLen - newLen == 0 ? 0 : (randomDouble() % (boundLen - newLen))); - return new double[]{newStart, newLen}; + double newStart = + boundStart + (boundLen - newLen == 0 ? 0 : (randomDouble() % (boundLen - newLen))); + return new double[] {newStart, newLen}; } } @@ -166,11 +167,10 @@ public abstract class SpatialTestCase extends LuceneTestCase { } /** - * Within one standard deviation (68% of the time) the result is "close" to - * mean. By "close": when greater than mean, it's the lesser of 2*mean or half - * way to max, when lesser than mean, it's the greater of max-2*mean or half - * way to 0. The other 32% of the time it's in the rest of the range, touching - * either 0 or max but never exceeding. + * Within one standard deviation (68% of the time) the result is "close" to mean. By "close": when + * greater than mean, it's the lesser of 2*mean or half way to max, when lesser than mean, it's + * the greater of max-2*mean or half way to 0. The other 32% of the time it's in the rest of the + * range, touching either 0 or max but never exceeding. */ private double randomGaussianMeanMax(double mean, double max) { // DWS: I verified the results empirically @@ -186,16 +186,19 @@ public abstract class SpatialTestCase extends LuceneTestCase { // pivot is the distance from mean2 towards max where the boundary of // 1 standard deviation alters the calculation double pivotMax = max - mean2; - double pivot = Math.min(mean2, pivotMax / 2);//from 0 to max-mean2 + double pivot = Math.min(mean2, pivotMax / 2); // from 0 to max-mean2 assert pivot >= 0 && pivotMax >= pivot && g >= 0; double pivotResult; - if (g <= 1) + if (g <= 1) { pivotResult = pivot * g; - else + } else { pivotResult = Math.min(pivotMax, (g - 1) * (pivotMax - pivot) + pivot); + } double result = mean + flip * pivotResult; - return (result < 0 || result > max) ? mean : result; // due this due to computational numerical precision + return (result < 0 || result > max) + ? mean + : result; // due this due to computational numerical precision } protected static class SearchResults { @@ -211,7 +214,7 @@ public abstract class SpatialTestCase extends LuceneTestCase { public StringBuilder toDebugString() { StringBuilder str = new StringBuilder(); str.append("found: ").append(numFound).append('['); - for(SearchResult r : results) { + for (SearchResult r : results) { String id = r.getId(); str.append(id).append(", "); } @@ -221,7 +224,7 @@ public abstract class SpatialTestCase extends LuceneTestCase { @Override public String toString() { - return "[found:"+numFound+" "+results+"]"; + return "[found:" + numFound + " " + results + "]"; } } @@ -241,7 +244,7 @@ public abstract class SpatialTestCase extends LuceneTestCase { @Override public String toString() { - return "["+score+"="+document+"]"; + return "[" + score + "=" + document + "]"; } } } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestData.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestData.java index 06a68ce72e9..6ef5fa9e596 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestData.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestData.java @@ -16,9 +16,6 @@ */ package org.apache.lucene.spatial; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.shape.Shape; - import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -28,6 +25,8 @@ import java.text.ParseException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.shape.Shape; // This class is modelled after SpatialTestQuery. // Before Lucene 4.7, this was a bit different in Spatial4j as SampleData & SampleDataReader. @@ -37,23 +36,25 @@ public class SpatialTestData { public String name; public Shape shape; - /** Reads the stream, consuming a format that is a tab-separated values of 3 columns: - * an "id", a "name" and the "shape". Empty lines and lines starting with a '#' are skipped. - * The stream is closed. + /** + * Reads the stream, consuming a format that is a tab-separated values of 3 columns: an "id", a + * "name" and the "shape". Empty lines and lines starting with a '#' are skipped. The stream is + * closed. */ - public static Iterator getTestData(InputStream in, SpatialContext ctx) throws IOException { + public static Iterator getTestData(InputStream in, SpatialContext ctx) + throws IOException { List results = new ArrayList<>(); BufferedReader bufInput = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); try { String line; while ((line = bufInput.readLine()) != null) { - if (line.length() == 0 || line.charAt(0) == '#') - continue; + if (line.length() == 0 || line.charAt(0) == '#') continue; SpatialTestData data = new SpatialTestData(); String[] vals = line.split("\t"); if (vals.length != 3) - throw new RuntimeException("bad format; expecting 3 tab-separated values for line: "+line); + throw new RuntimeException( + "bad format; expecting 3 tab-separated values for line: " + line); data.id = vals[0]; data.name = vals[1]; try { diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestQuery.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestQuery.java index 47e9c120eec..b0febafe9f6 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestQuery.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestQuery.java @@ -16,11 +16,6 @@ */ package org.apache.lucene.spatial; -import org.locationtech.spatial4j.context.SpatialContext; - -import org.apache.lucene.spatial.query.SpatialArgs; -import org.apache.lucene.spatial.query.SpatialArgsParser; - import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -30,10 +25,11 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.StringTokenizer; +import org.apache.lucene.spatial.query.SpatialArgs; +import org.apache.lucene.spatial.query.SpatialArgsParser; +import org.locationtech.spatial4j.context.SpatialContext; -/** - * Helper class to execute queries - */ +/** Helper class to execute queries */ public class SpatialTestQuery { public String testname; public String line; @@ -41,14 +37,13 @@ public class SpatialTestQuery { public SpatialArgs args; public List ids = new ArrayList<>(); - /** - * Get Test Queries. The InputStream is closed. - */ + /** Get Test Queries. The InputStream is closed. */ public static Iterator getTestQueries( final SpatialArgsParser parser, final SpatialContext ctx, final String name, - final InputStream in ) throws IOException { + final InputStream in) + throws IOException { List results = new ArrayList<>(); @@ -62,10 +57,10 @@ public class SpatialTestQuery { try { // skip a comment - if( line.startsWith( "[" ) ) { - int idx = line.indexOf( ']' ); - if( idx > 0 ) { - line = line.substring( idx+1 ); + if (line.startsWith("[")) { + int idx = line.indexOf(']'); + if (idx > 0) { + line = line.substring(idx + 1); } } @@ -76,9 +71,8 @@ public class SpatialTestQuery { } test.args = parser.parse(line.substring(idx + 1).trim(), ctx); results.add(test); - } - catch( Exception ex ) { - throw new RuntimeException( "invalid query line: "+test.line, ex ); + } catch (Exception ex) { + throw new RuntimeException("invalid query line: " + test.line, ex); } } } finally { @@ -89,8 +83,7 @@ public class SpatialTestQuery { @Override public String toString() { - if (line != null) - return line; - return args.toString()+" "+ids; + if (line != null) return line; + return args.toString() + " " + ids; } } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/StrategyTestCase.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/StrategyTestCase.java index 2b23a656eae..295fb7cc801 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/StrategyTestCase.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/StrategyTestCase.java @@ -16,7 +16,6 @@ */ package org.apache.lucene.spatial; - import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; @@ -28,7 +27,6 @@ import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.logging.Logger; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StoredField; @@ -54,7 +52,7 @@ public abstract class StrategyTestCase extends SpatialTestCase { public static final String DATA_COUNTRIES_BBOX = "countries-bbox.txt"; public static final String DATA_WORLD_CITIES_POINTS = "world-cities-points.txt"; - public static final String QTEST_States_IsWithin_BBox = "states-IsWithin-BBox.txt"; + public static final String QTEST_States_IsWithin_BBox = "states-IsWithin-BBox.txt"; public static final String QTEST_States_Intersects_BBox = "states-Intersects-BBox.txt"; public static final String QTEST_Cities_Intersects_BBox = "cities-Intersects-BBox.txt"; public static final String QTEST_Simple_Queries_BBox = "simple-Queries-BBox.txt"; @@ -66,9 +64,10 @@ public abstract class StrategyTestCase extends SpatialTestCase { protected SpatialStrategy strategy; protected boolean storeShape = true; - protected void executeQueries(SpatialMatchConcern concern, String... testQueryFile) throws IOException { - log.info("testing queried for strategy "+strategy); // nowarn - for( String path : testQueryFile ) { + protected void executeQueries(SpatialMatchConcern concern, String... testQueryFile) + throws IOException { + log.info("testing queried for strategy " + strategy); // nowarn + for (String path : testQueryFile) { Iterator testQueryIterator = getTestQueries(path, ctx); runTestQueries(testQueryIterator, concern); } @@ -97,8 +96,8 @@ public abstract class StrategyTestCase extends SpatialTestCase { for (Field f : strategy.createIndexableFields(shape)) { document.add(f); } - if (storeShape)//just for diagnostics - document.add(new StoredField(strategy.getFieldName(), shape.toString())); + if (storeShape) // just for diagnostics + document.add(new StoredField(strategy.getFieldName(), shape.toString())); } documents.add(document); @@ -114,20 +113,18 @@ public abstract class StrategyTestCase extends SpatialTestCase { protected Iterator getSampleData(String testDataFile) throws IOException { String path = "data/" + testDataFile; InputStream stream = getClass().getClassLoader().getResourceAsStream(path); - if (stream == null) - throw new FileNotFoundException("classpath resource not found: "+path); - return SpatialTestData.getTestData(stream, ctx);//closes the InputStream + if (stream == null) throw new FileNotFoundException("classpath resource not found: " + path); + return SpatialTestData.getTestData(stream, ctx); // closes the InputStream } - protected Iterator getTestQueries(String testQueryFile, SpatialContext ctx) throws IOException { + protected Iterator getTestQueries(String testQueryFile, SpatialContext ctx) + throws IOException { InputStream in = getClass().getClassLoader().getResourceAsStream(testQueryFile); return SpatialTestQuery.getTestQueries( - argsParser, ctx, testQueryFile, in );//closes the InputStream + argsParser, ctx, testQueryFile, in); // closes the InputStream } - public void runTestQueries( - Iterator queries, - SpatialMatchConcern concern) { + public void runTestQueries(Iterator queries, SpatialMatchConcern concern) { while (queries.hasNext()) { SpatialTestQuery q = queries.next(); runTestQuery(concern, q); @@ -135,10 +132,10 @@ public abstract class StrategyTestCase extends SpatialTestCase { } public void runTestQuery(SpatialMatchConcern concern, SpatialTestQuery q) { - String msg = q.toString(); //"Query: " + q.args.toString(ctx); - SearchResults got = executeQuery(makeQuery(q), Math.max(100, q.ids.size()+1)); + String msg = q.toString(); // "Query: " + q.args.toString(ctx); + SearchResults got = executeQuery(makeQuery(q), Math.max(100, q.ids.size() + 1)); if (storeShape && got.numFound > 0) { - //check stored value is there + // check stored value is there assertNotNull(got.results.get(0).document.get(strategy.getFieldName())); } if (concern.orderIsImportant) { @@ -146,7 +143,12 @@ public abstract class StrategyTestCase extends SpatialTestCase { for (SearchResult r : got.results) { String id = r.document.get("id"); if (!ids.hasNext()) { - fail(msg + " :: Did not get enough results. Expect" + q.ids + ", got: " + got.toDebugString()); + fail( + msg + + " :: Did not get enough results. Expect" + + q.ids + + ", got: " + + got.toDebugString()); } assertEquals("out of order: " + msg, ids.next(), id); } @@ -185,9 +187,10 @@ public abstract class StrategyTestCase extends SpatialTestCase { } protected void adoc(String id, String shapeStr) throws IOException, ParseException { - Shape shape = shapeStr==null ? null : ctx.readShapeFromWkt(shapeStr); + Shape shape = shapeStr == null ? null : ctx.readShapeFromWkt(shapeStr); addDocument(newDoc(id, shape)); } + protected void adoc(String id, Shape shape) throws IOException { addDocument(newDoc(id, shape)); } @@ -200,7 +203,9 @@ public abstract class StrategyTestCase extends SpatialTestCase { doc.add(f); } if (storeShape) - doc.add(new StoredField(strategy.getFieldName(), shape.toString()));//not to be parsed; just for debug + doc.add( + new StoredField( + strategy.getFieldName(), shape.toString())); // not to be parsed; just for debug } return doc; } @@ -210,7 +215,8 @@ public abstract class StrategyTestCase extends SpatialTestCase { } /** scores[] are in docId order */ - protected void checkValueSource(DoubleValuesSource vs, float scores[], float delta) throws IOException { + protected void checkValueSource(DoubleValuesSource vs, float scores[], float delta) + throws IOException { for (LeafReaderContext ctx : indexSearcher.getTopReaderContext().leaves()) { DoubleValues v = vs.getValues(ctx, null); @@ -221,23 +227,23 @@ public abstract class StrategyTestCase extends SpatialTestCase { assertEquals("Not equal for doc " + doc, v.doubleValue(), (double) scores[doc], delta); } } - } - protected void testOperation(Shape indexedShape, SpatialOperation operation, - Shape queryShape, boolean match) throws IOException { - assertTrue("Faulty test", - operation.evaluate(indexedShape, queryShape) == match || - indexedShape.equals(queryShape) && - (operation == SpatialOperation.Contains || operation == SpatialOperation.IsWithin)); + protected void testOperation( + Shape indexedShape, SpatialOperation operation, Shape queryShape, boolean match) + throws IOException { + assertTrue( + "Faulty test", + operation.evaluate(indexedShape, queryShape) == match + || indexedShape.equals(queryShape) + && (operation == SpatialOperation.Contains + || operation == SpatialOperation.IsWithin)); adoc("0", indexedShape); commit(); Query query = strategy.makeQuery(new SpatialArgs(operation, queryShape)); SearchResults got = executeQuery(query, 1); assert got.numFound <= 1 : "unclean test env"; - if ((got.numFound == 1) != match) - fail(operation+" I:" + indexedShape + " Q:" + queryShape); - deleteAll();//clean up after ourselves + if ((got.numFound == 1) != match) fail(operation + " I:" + indexedShape + " Q:" + queryShape); + deleteAll(); // clean up after ourselves } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestDistanceStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestDistanceStrategy.java index a4b3ec397bf..e3cf440d2b1 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestDistanceStrategy.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestDistanceStrategy.java @@ -16,11 +16,10 @@ */ package org.apache.lucene.spatial; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.spatial.bbox.BBoxStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.TermQueryPrefixTreeStrategy; @@ -46,31 +45,31 @@ public class TestDistanceStrategy extends StrategyTestCase { SpatialPrefixTree grid; SpatialStrategy strategy; - grid = new QuadPrefixTree(ctx,25); + grid = new QuadPrefixTree(ctx, 25); strategy = new RecursivePrefixTreeStrategy(grid, "recursive_quad"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); - grid = new GeohashPrefixTree(ctx,12); + grid = new GeohashPrefixTree(ctx, 12); strategy = new TermQueryPrefixTreeStrategy(grid, "termquery_geohash"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); - grid = new PackedQuadPrefixTree(ctx,25); + grid = new PackedQuadPrefixTree(ctx, 25); strategy = new RecursivePrefixTreeStrategy(grid, "recursive_packedquad"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); strategy = PointVectorStrategy.newInstance(ctx, "pointvector"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); -// Can't test this without un-inverting since PVS legacy config didn't have docValues. -// However, note that Solr's tests use UninvertingReader and thus test this. -// strategy = PointVectorStrategy.newLegacyInstance(ctx, "pointvector_legacy"); -// ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + // Can't test this without un-inverting since PVS legacy config didn't have docValues. + // However, note that Solr's tests use UninvertingReader and thus test this. + // strategy = PointVectorStrategy.newLegacyInstance(ctx, "pointvector_legacy"); + // ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); strategy = BBoxStrategy.newInstance(ctx, "bbox"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); strategy = new SerializedDVStrategy(ctx, "serialized"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); return ctorArgs; } @@ -85,9 +84,9 @@ public class TestDistanceStrategy extends StrategyTestCase { ShapeFactory shapeFactory = ctx.getShapeFactory(); adoc("100", shapeFactory.pointXY(2, 1)); adoc("101", shapeFactory.pointXY(-1, 4)); - adoc("103", (Shape)null);//test score for nothing + adoc("103", (Shape) null); // test score for nothing commit(); - //FYI distances are in docid order + // FYI distances are in docid order checkDistValueSource(shapeFactory.pointXY(4, 3), 2.8274937f, 5.0898066f, 180f); checkDistValueSource(shapeFactory.pointXY(0, 4), 3.6043684f, 0.9975641f, 180f); } @@ -98,13 +97,13 @@ public class TestDistanceStrategy extends StrategyTestCase { adoc("100", p100); Point p101 = ctx.getShapeFactory().pointXY(-1.001, 4.001); adoc("101", p101); - adoc("103", (Shape)null);//test score for nothing + adoc("103", (Shape) null); // test score for nothing commit(); double dist = ctx.getDistCalc().distance(p100, p101); Shape queryShape = ctx.makeCircle(2.01, 0.99, dist); - checkValueSource(strategy.makeRecipDistanceValueSource(queryShape), - new float[]{1.00f, 0.10f, 0f}, 0.09f); + checkValueSource( + strategy.makeRecipDistanceValueSource(queryShape), new float[] {1.00f, 0.10f, 0f}, 0.09f); } void checkDistValueSource(Point pt, float... distances) throws IOException { diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestPortedSolr3.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestPortedSolr3.java index bce5cf0bcef..b2fad42d385 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestPortedSolr3.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestPortedSolr3.java @@ -16,12 +16,11 @@ */ package org.apache.lucene.spatial; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.search.Query; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.TermQueryPrefixTreeStrategy; @@ -38,9 +37,7 @@ import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.ShapeFactory; -/** - * Based off of Solr 3's SpatialFilterTest. - */ +/** Based off of Solr 3's SpatialFilterTest. */ public class TestPortedSolr3 extends StrategyTestCase { private ShapeFactory shapeFactory; @@ -53,23 +50,23 @@ public class TestPortedSolr3 extends StrategyTestCase { SpatialPrefixTree grid; SpatialStrategy strategy; - grid = new GeohashPrefixTree(ctx,12); + grid = new GeohashPrefixTree(ctx, 12); strategy = new RecursivePrefixTreeStrategy(grid, "recursive_geohash"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); - grid = new QuadPrefixTree(ctx,25); + grid = new QuadPrefixTree(ctx, 25); strategy = new RecursivePrefixTreeStrategy(grid, "recursive_quad"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); - grid = new GeohashPrefixTree(ctx,12); + grid = new GeohashPrefixTree(ctx, 12); strategy = new TermQueryPrefixTreeStrategy(grid, "termquery_geohash"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); strategy = PointVectorStrategy.newInstance(ctx, "pointvector"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); strategy = PointVectorStrategy.newInstance(ctx, "pointvector_legacy"); - ctorArgs.add(new Object[]{strategy.getFieldName(), strategy}); + ctorArgs.add(new Object[] {strategy.getFieldName(), strategy}); return ctorArgs; } @@ -98,29 +95,32 @@ public class TestPortedSolr3 extends StrategyTestCase { commit(); } - @Test public void testIntersections() throws Exception { setupDocs(); - //Try some edge cases - //NOTE: 2nd arg is distance in kilometers + // Try some edge cases + // NOTE: 2nd arg is distance in kilometers checkHitsCircle(shapeFactory.pointXY(1, 1), 175, 3, 5, 6, 7); checkHitsCircle(shapeFactory.pointXY(179.8, 0), 200, 2, 8, 9); - checkHitsCircle(shapeFactory.pointXY(50, 89.8), 200, 2, 10, 11);//this goes over the north pole - checkHitsCircle(shapeFactory.pointXY(50, -89.8), 200, 2, 12, 13);//this goes over the south pole - //try some normal cases + checkHitsCircle( + shapeFactory.pointXY(50, 89.8), 200, 2, 10, 11); // this goes over the north pole + checkHitsCircle( + shapeFactory.pointXY(50, -89.8), 200, 2, 12, 13); // this goes over the south pole + // try some normal cases checkHitsCircle(shapeFactory.pointXY(-80.0, 33.0), 300, 2); - //large distance + // large distance checkHitsCircle(shapeFactory.pointXY(1, 1), 5000, 3, 5, 6, 7); - //Because we are generating a box based on the west/east longitudes and the south/north latitudes, which then - //translates to a range query, which is slightly more inclusive. Thus, even though 0.0 is 15.725 kms away, - //it will be included, b/zScaling of the box calculation. + // Because we are generating a box based on the west/east longitudes and the south/north + // latitudes, which then + // translates to a range query, which is slightly more inclusive. Thus, even though 0.0 is + // 15.725 kms away, + // it will be included, b/zScaling of the box calculation. checkHitsBBox(shapeFactory.pointXY(0.1, 0.1), 15, 2, 5, 6); - //try some more + // try some more deleteAll(); adoc("14", shapeFactory.pointXY(5, 0)); adoc("15", shapeFactory.pointXY(15, 0)); - //3000KM from 0,0, see http://www.movable-type.co.uk/scripts/latlong.html + // 3000KM from 0,0, see http://www.movable-type.co.uk/scripts/latlong.html adoc("16", shapeFactory.pointXY(19.79750, 18.71111)); adoc("17", shapeFactory.pointXY(-95.436643, 44.043900)); commit(); @@ -131,7 +131,8 @@ public class TestPortedSolr3 extends StrategyTestCase { checkHitsCircle(shapeFactory.pointXY(0, 0), 3001, 3, 14, 15, 16); checkHitsCircle(shapeFactory.pointXY(0, 0), 3000.1, 3, 14, 15, 16); - //really fine grained distance and reflects some of the vagaries of how we are calculating the box + // really fine grained distance and reflects some of the vagaries of how we are calculating the + // box checkHitsCircle(shapeFactory.pointXY(-96.789603, 43.517030), 109, 0); // falls outside of the real distance, but inside the bounding box @@ -139,27 +140,28 @@ public class TestPortedSolr3 extends StrategyTestCase { checkHitsBBox(shapeFactory.pointXY(-96.789603, 43.517030), 110, 1, 17); } - //---- these are similar to Solr test methods + // ---- these are similar to Solr test methods private void checkHitsCircle(Point pt, double distKM, int assertNumFound, int... assertIds) { _checkHits(false, pt, distKM, assertNumFound, assertIds); } + private void checkHitsBBox(Point pt, double distKM, int assertNumFound, int... assertIds) { _checkHits(true, pt, distKM, assertNumFound, assertIds); } - private void _checkHits(boolean bbox, Point pt, double distKM, int assertNumFound, int... assertIds) { + private void _checkHits( + boolean bbox, Point pt, double distKM, int assertNumFound, int... assertIds) { SpatialOperation op = SpatialOperation.Intersects; double distDEG = DistanceUtils.dist2Degrees(distKM, DistanceUtils.EARTH_MEAN_RADIUS_KM); Shape shape = shapeFactory.circle(pt, distDEG); - if (bbox) - shape = shape.getBoundingBox(); + if (bbox) shape = shape.getBoundingBox(); - SpatialArgs args = new SpatialArgs(op,shape); - //args.setDistPrecision(0.025); + SpatialArgs args = new SpatialArgs(op, shape); + // args.setDistPrecision(0.025); Query query = strategy.makeQuery(args); SearchResults results = executeQuery(query, 100); - assertEquals(""+shape,assertNumFound,results.numFound); + assertEquals("" + shape, assertNumFound, results.numFound); if (assertIds != null) { Set resultIds = new HashSet<>(); for (SearchResult result : results.results) { @@ -170,5 +172,4 @@ public class TestPortedSolr3 extends StrategyTestCase { } } } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestQueryEqualsHashCode.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestQueryEqualsHashCode.java index 4605d4d60cd..cd531663d9c 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestQueryEqualsHashCode.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestQueryEqualsHashCode.java @@ -18,7 +18,6 @@ package org.apache.lucene.spatial; import java.util.ArrayList; import java.util.Collection; - import org.apache.lucene.spatial.bbox.BBoxStrategy; import org.apache.lucene.spatial.composite.CompositeSpatialStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; @@ -44,17 +43,24 @@ public class TestQueryEqualsHashCode extends LuceneTestCase { @Test public void testEqualsHashCode() { - switch (random().nextInt(4)) {//0-3 - case 0: predicate = SpatialOperation.Contains; break; - case 1: predicate = SpatialOperation.IsWithin; break; + switch (random().nextInt(4)) { // 0-3 + case 0: + predicate = SpatialOperation.Contains; + break; + case 1: + predicate = SpatialOperation.IsWithin; + break; - default: predicate = SpatialOperation.Intersects; break; + default: + predicate = SpatialOperation.Intersects; + break; } - final SpatialPrefixTree gridQuad = new QuadPrefixTree(ctx,10); - final SpatialPrefixTree gridGeohash = new GeohashPrefixTree(ctx,10); + final SpatialPrefixTree gridQuad = new QuadPrefixTree(ctx, 10); + final SpatialPrefixTree gridGeohash = new GeohashPrefixTree(ctx, 10); Collection strategies = new ArrayList<>(); - RecursivePrefixTreeStrategy recursive_geohash = new RecursivePrefixTreeStrategy(gridGeohash, "recursive_geohash"); + RecursivePrefixTreeStrategy recursive_geohash = + new RecursivePrefixTreeStrategy(gridGeohash, "recursive_geohash"); strategies.add(recursive_geohash); strategies.add(new TermQueryPrefixTreeStrategy(gridQuad, "termquery_quad")); strategies.add(PointVectorStrategy.newInstance(ctx, "pointvector")); @@ -70,18 +76,24 @@ public class TestQueryEqualsHashCode extends LuceneTestCase { private void testEqualsHashcode(final SpatialStrategy strategy) { final SpatialArgs args1 = makeArgs1(); final SpatialArgs args2 = makeArgs2(); - testEqualsHashcode(args1, args2, new ObjGenerator() { - @Override - public Object gen(SpatialArgs args) { - return strategy.makeQuery(args); - } - }); - testEqualsHashcode(args1, args2, new ObjGenerator() { - @Override - public Object gen(SpatialArgs args) { - return strategy.makeDistanceValueSource(args.getShape().getCenter()); - } - }); + testEqualsHashcode( + args1, + args2, + new ObjGenerator() { + @Override + public Object gen(SpatialArgs args) { + return strategy.makeQuery(args); + } + }); + testEqualsHashcode( + args1, + args2, + new ObjGenerator() { + @Override + public Object gen(SpatialArgs args) { + return strategy.makeDistanceValueSource(args.getShape().getCenter()); + } + }); } private void testEqualsHashcode(SpatialArgs args1, SpatialArgs args2, ObjGenerator generator) { @@ -91,13 +103,12 @@ public class TestQueryEqualsHashCode extends LuceneTestCase { } catch (UnsupportedOperationException e) { return; } - if (first == null) - return;//unsupported op? - Object second = generator.gen(args1);//should be the same + if (first == null) return; // unsupported op? + Object second = generator.gen(args1); // should be the same assertEquals(first, second); assertEquals(first.hashCode(), second.hashCode()); assertTrue(args1.equals(args2) == false); - second = generator.gen(args2);//now should be different + second = generator.gen(args2); // now should be different assertTrue(first.equals(second) == false); assertTrue(first.hashCode() != second.hashCode()); } @@ -115,5 +126,4 @@ public class TestQueryEqualsHashCode extends LuceneTestCase { interface ObjGenerator { Object gen(SpatialArgs args); } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestSpatialArgs.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestSpatialArgs.java index ba771af7cfa..752497cc8a2 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestSpatialArgs.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestSpatialArgs.java @@ -27,14 +27,15 @@ public class TestSpatialArgs extends LuceneTestCase { @Test public void calcDistanceFromErrPct() { - final SpatialContext ctx = usually() ? SpatialContext.GEO : new Geo3dSpatialContextFactory().newSpatialContext(); - final double DEP = 0.5;//distErrPct + final SpatialContext ctx = + usually() ? SpatialContext.GEO : new Geo3dSpatialContextFactory().newSpatialContext(); + final double DEP = 0.5; // distErrPct - //the result is the diagonal distance from the center to the closest corner, + // the result is the diagonal distance from the center to the closest corner, // times distErrPct Shape superwide = ctx.makeRectangle(-180, 180, 0, 0); - //0 distErrPct means 0 distance always + // 0 distErrPct means 0 distance always assertEquals(0, SpatialArgs.calcDistanceFromErrPct(superwide, 0, ctx), 0); assertEquals(180 * DEP, SpatialArgs.calcDistanceFromErrPct(superwide, DEP, ctx), 0); diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestTestFramework.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestTestFramework.java index 6af7467c2ca..6d47b456169 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestTestFramework.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestTestFramework.java @@ -16,24 +16,20 @@ */ package org.apache.lucene.spatial; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.shape.Rectangle; -import org.apache.lucene.spatial.query.SpatialArgsParser; -import org.apache.lucene.spatial.query.SpatialOperation; -import org.apache.lucene.util.LuceneTestCase; -import org.junit.Assert; -import org.junit.Test; - import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import org.apache.lucene.spatial.query.SpatialArgsParser; +import org.apache.lucene.spatial.query.SpatialOperation; +import org.apache.lucene.util.LuceneTestCase; +import org.junit.Assert; +import org.junit.Test; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.shape.Rectangle; - -/** - * Make sure we are reading the tests as expected - */ +/** Make sure we are reading the tests as expected */ public class TestTestFramework extends LuceneTestCase { @Test @@ -42,27 +38,27 @@ public class TestTestFramework extends LuceneTestCase { InputStream in = getClass().getClassLoader().getResourceAsStream(name); SpatialContext ctx = SpatialContext.GEO; - Iterator iter = SpatialTestQuery.getTestQueries( - new SpatialArgsParser(), ctx, name, in );//closes the InputStream + Iterator iter = + SpatialTestQuery.getTestQueries( + new SpatialArgsParser(), ctx, name, in); // closes the InputStream List tests = new ArrayList<>(); - while( iter.hasNext() ) { - tests.add( iter.next() ); + while (iter.hasNext()) { + tests.add(iter.next()); } - Assert.assertEquals( 3, tests.size() ); + Assert.assertEquals(3, tests.size()); SpatialTestQuery sf = tests.get(0); // assert - assertEquals( 1, sf.ids.size() ); - Assert.assertTrue( sf.ids.get(0).equals( "G5391959" ) ); - Assert.assertTrue( sf.args.getShape() instanceof Rectangle); + assertEquals(1, sf.ids.size()); + Assert.assertTrue(sf.ids.get(0).equals("G5391959")); + Assert.assertTrue(sf.args.getShape() instanceof Rectangle); assertEquals(SpatialOperation.Intersects, sf.args.getOperation()); } @Test public void spatialExample() throws Exception { - //kind of a hack so that SpatialExample is tested despite + // kind of a hack so that SpatialExample is tested despite // it not starting or ending with "Test". SpatialExample.main(null); } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java index bca89b51591..0df4883d8a2 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.bbox; import java.io.IOException; - import org.apache.lucene.document.FieldType; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.search.Query; @@ -41,7 +40,7 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { protected Shape randomIndexedShape() { Rectangle world = ctx.getWorldBounds(); if (random().nextInt(10) == 0) // increased chance of getting one of these - return world; + return world; int worldWidth = (int) Math.round(world.getWidth()); int deltaLeft = nextIntInclusive(worldWidth); @@ -50,16 +49,19 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { int deltaTop = nextIntInclusive(worldHeight); int deltaBottom = nextIntInclusive(worldHeight - deltaTop); if (ctx.isGeo() && (deltaLeft != 0 || deltaRight != 0)) { - //if geo & doesn't world-wrap, we shift randomly to potentially cross dateline + // if geo & doesn't world-wrap, we shift randomly to potentially cross dateline int shift = nextIntInclusive(360); - return ctx.getShapeFactory().rect( - DistanceUtils.normLonDEG(world.getMinX() + deltaLeft + shift), - DistanceUtils.normLonDEG(world.getMaxX() - deltaRight + shift), - world.getMinY() + deltaBottom, world.getMaxY() - deltaTop); + return ctx.getShapeFactory() + .rect( + DistanceUtils.normLonDEG(world.getMinX() + deltaLeft + shift), + DistanceUtils.normLonDEG(world.getMaxX() - deltaRight + shift), + world.getMinY() + deltaBottom, + world.getMaxY() - deltaTop); } else { - return ctx.getShapeFactory().rect( - world.getMinX() + deltaLeft, world.getMaxX() - deltaRight, - world.getMinY() + deltaBottom, world.getMaxY() - deltaTop); + return ctx.getShapeFactory() + .rect( + world.getMinX() + deltaLeft, world.getMaxX() - deltaRight, + world.getMinY() + deltaBottom, world.getMaxY() - deltaTop); } } @@ -67,7 +69,7 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { private int nextIntInclusive(int toInc) { final int DIVIS = 10; if (toInc % DIVIS == 0) { - return random().nextInt(toInc/DIVIS + 1) * DIVIS; + return random().nextInt(toInc / DIVIS + 1) * DIVIS; } else { return random().nextInt(toInc + 1); } @@ -80,8 +82,8 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { @Test public void testOperations() throws IOException { - //setup - if (random().nextInt(4) > 0) {//75% of the time choose geo (more interesting to test) + // setup + if (random().nextInt(4) > 0) { // 75% of the time choose geo (more interesting to test) this.ctx = SpatialContext.GEO; } else { SpatialContextFactory factory = new SpatialContextFactory(); @@ -90,15 +92,14 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { this.ctx = factory.newSpatialContext(); } this.strategy = BBoxStrategy.newInstance(ctx, "bbox"); - //test we can disable docValues for predicate tests + // test we can disable docValues for predicate tests if (random().nextBoolean()) { - FieldType fieldType = new FieldType(((BBoxStrategy)strategy).getFieldType()); + FieldType fieldType = new FieldType(((BBoxStrategy) strategy).getFieldType()); fieldType.setDocValuesType(DocValuesType.NONE); strategy = new BBoxStrategy(ctx, strategy.getFieldName(), fieldType); } for (SpatialOperation operation : SpatialOperation.values()) { - if (operation == SpatialOperation.Overlaps) - continue;//unsupported + if (operation == SpatialOperation.Overlaps) continue; // unsupported testOperationRandomShapes(operation); deleteAll(); @@ -112,7 +113,8 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { testOperation( ctx.getShapeFactory().rect(160, 180, -10, 10), SpatialOperation.Intersects, - ctx.getShapeFactory().rect(-180, -160, -10, 10), true); + ctx.getShapeFactory().rect(-180, -160, -10, 10), + true); } @Test @@ -121,7 +123,8 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { testOperation( ctx.getShapeFactory().rect(-180, 180, -10, 10), SpatialOperation.Intersects, - ctx.getShapeFactory().rect(180, 180, -10, 10), true); + ctx.getShapeFactory().rect(180, 180, -10, 10), + true); } @Test @@ -130,7 +133,8 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { testOperation( ctx.getShapeFactory().rect(180, 180, -10, 10), SpatialOperation.IsWithin, - ctx.getShapeFactory().rect(-180, -100, -10, 10), true); + ctx.getShapeFactory().rect(-180, -100, -10, 10), + true); } @Test @@ -139,7 +143,8 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { testOperation( ctx.getShapeFactory().rect(-180, -150, -10, 10), SpatialOperation.Contains, - ctx.getShapeFactory().rect(180, 180, -10, 10), true); + ctx.getShapeFactory().rect(180, 180, -10, 10), + true); } @Test @@ -148,7 +153,8 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { testOperation( ctx.getShapeFactory().rect(-180, 180, -10, 10), SpatialOperation.Contains, - ctx.getShapeFactory().rect(170, -170, -10, 10), true); + ctx.getShapeFactory().rect(170, -170, -10, 10), + true); } /** See https://github.com/spatial4j/spatial4j/issues/85 */ @@ -159,23 +165,22 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { // SpatialOperation.IsWithin, // queryShape, true); - //both on dateline but expressed using opposite signs + // both on dateline but expressed using opposite signs setupGeo(); final Rectangle indexedShape = ctx.getShapeFactory().rect(180, 180, -10, 10); final Rectangle queryShape = ctx.getShapeFactory().rect(-180, -180, -20, 20); final SpatialOperation operation = SpatialOperation.IsWithin; - final boolean match = true;//yes it is within + final boolean match = true; // yes it is within - //the rest is super.testOperation without leading assert: + // the rest is super.testOperation without leading assert: adoc("0", indexedShape); commit(); Query query = strategy.makeQuery(new SpatialArgs(operation, queryShape)); SearchResults got = executeQuery(query, 1); assert got.numFound <= 1 : "unclean test env"; - if ((got.numFound == 1) != match) - fail(operation+" I:" + indexedShape + " Q:" + queryShape); - deleteAll();//clean up after ourselves + if ((got.numFound == 1) != match) fail(operation + " I:" + indexedShape + " Q:" + queryShape); + deleteAll(); // clean up after ourselves } private void setupGeo() { @@ -185,7 +190,8 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { // OLD STATIC TESTS (worthless?) - @Test @Ignore("Overlaps not supported") + @Test + @Ignore("Overlaps not supported") public void testBasicOperaions() throws IOException { setupGeo(); getAddAndVerifyIndexedDocuments(DATA_SIMPLE_BBOX); @@ -227,60 +233,91 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { } strategy = new BBoxStrategy(ctx, FIELD_PREFIX, fieldType); - return (BBoxStrategy)strategy; + return (BBoxStrategy) strategy; } public void testOverlapRatio() throws IOException { setupNeedsDocValuesOnly(); - //Simply assert null shape results in 0 + // Simply assert null shape results in 0 adoc("999", (Shape) null); commit(); BBoxStrategy bboxStrategy = (BBoxStrategy) strategy; - checkValueSource(bboxStrategy.makeOverlapRatioValueSource(randomRectangle(), 0.0), new float[]{0f}, 0f); + checkValueSource( + bboxStrategy.makeOverlapRatioValueSource(randomRectangle(), 0.0), new float[] {0f}, 0f); - //we test raw BBoxOverlapRatioValueSource without actual indexing + // we test raw BBoxOverlapRatioValueSource without actual indexing for (int SHIFT = 0; SHIFT < 360; SHIFT += 10) { - Rectangle queryBox = shiftedRect(0, 40, -20, 20, SHIFT);//40x40, 1600 area + Rectangle queryBox = shiftedRect(0, 40, -20, 20, SHIFT); // 40x40, 1600 area final boolean MSL = random().nextBoolean(); final double minSideLength = MSL ? 0.1 : 0.0; - BBoxOverlapRatioValueSource sim = new BBoxOverlapRatioValueSource(null, true, queryBox, 0.5, minSideLength); - int nudge = SHIFT == 0 ? 0 : random().nextInt(3) * 10 - 10;//-10, 0, or 10. Keep 0 on first round. + BBoxOverlapRatioValueSource sim = + new BBoxOverlapRatioValueSource(null, true, queryBox, 0.5, minSideLength); + int nudge = + SHIFT == 0 ? 0 : random().nextInt(3) * 10 - 10; // -10, 0, or 10. Keep 0 on first round. final double EPS = 0.0000001; - assertEquals("within", (200d/1600d * 0.5) + (0.5), sim.score(shiftedRect(10, 30, 0, 10, SHIFT + nudge), null), EPS); + assertEquals( + "within", + (200d / 1600d * 0.5) + (0.5), + sim.score(shiftedRect(10, 30, 0, 10, SHIFT + nudge), null), + EPS); assertEquals("in25%", 0.25, sim.score(shiftedRect(30, 70, -20, 20, SHIFT), null), EPS); - assertEquals("wrap", 0.2794117, sim.score(shiftedRect(30, 10, -20, 20, SHIFT + nudge), null), EPS); + assertEquals( + "wrap", 0.2794117, sim.score(shiftedRect(30, 10, -20, 20, SHIFT + nudge), null), EPS); - assertEquals("no intersection H", 0.0, sim.score(shiftedRect(-10, -10, -20, 20, SHIFT), null), EPS); - assertEquals("no intersection V", 0.0, sim.score(shiftedRect(0, 20, -30, -30, SHIFT), null), EPS); + assertEquals( + "no intersection H", 0.0, sim.score(shiftedRect(-10, -10, -20, 20, SHIFT), null), EPS); + assertEquals( + "no intersection V", 0.0, sim.score(shiftedRect(0, 20, -30, -30, SHIFT), null), EPS); - assertEquals("point", 0.5 + (MSL?(0.1*0.1/1600.0/2.0):0), sim.score(shiftedRect(0, 0, 0, 0, SHIFT), null), EPS); + assertEquals( + "point", + 0.5 + (MSL ? (0.1 * 0.1 / 1600.0 / 2.0) : 0), + sim.score(shiftedRect(0, 0, 0, 0, SHIFT), null), + EPS); - assertEquals("line 25% intersection", 0.25/2 + (MSL?(10.0*0.1/1600.0/2.0):0.0), sim.score(shiftedRect(-30, 10, 0, 0, SHIFT), null), EPS); + assertEquals( + "line 25% intersection", + 0.25 / 2 + (MSL ? (10.0 * 0.1 / 1600.0 / 2.0) : 0.0), + sim.score(shiftedRect(-30, 10, 0, 0, SHIFT), null), + EPS); - //test with point query - sim = new BBoxOverlapRatioValueSource(null, true, shiftedRect(0, 0, 0, 0, SHIFT), 0.5, minSideLength); + // test with point query + sim = + new BBoxOverlapRatioValueSource( + null, true, shiftedRect(0, 0, 0, 0, SHIFT), 0.5, minSideLength); assertEquals("same", 1.0, sim.score(shiftedRect(0, 0, 0, 0, SHIFT), null), EPS); - assertEquals("contains", 0.5 + (MSL?(0.1*0.1/(30*10)/2.0):0.0), sim.score(shiftedRect(0, 30, 0, 10, SHIFT), null), EPS); + assertEquals( + "contains", + 0.5 + (MSL ? (0.1 * 0.1 / (30 * 10) / 2.0) : 0.0), + sim.score(shiftedRect(0, 30, 0, 10, SHIFT), null), + EPS); - //test with line query (vertical this time) - sim = new BBoxOverlapRatioValueSource(null, true, shiftedRect(0, 0, 20, 40, SHIFT), 0.5, minSideLength); + // test with line query (vertical this time) + sim = + new BBoxOverlapRatioValueSource( + null, true, shiftedRect(0, 0, 20, 40, SHIFT), 0.5, minSideLength); assertEquals("line 50%", 0.5, sim.score(shiftedRect(0, 0, 10, 30, SHIFT), null), EPS); - assertEquals("point", 0.5 + (MSL?(0.1*0.1/(20*0.1)/2.0):0.0), sim.score(shiftedRect(0, 0, 30, 30, SHIFT), null), EPS); + assertEquals( + "point", + 0.5 + (MSL ? (0.1 * 0.1 / (20 * 0.1) / 2.0) : 0.0), + sim.score(shiftedRect(0, 0, 30, 30, SHIFT), null), + EPS); } - } private Rectangle shiftedRect(double minX, double maxX, double minY, double maxY, int xShift) { - return ctx.getShapeFactory().rect( - DistanceUtils.normLonDEG(minX + xShift), - DistanceUtils.normLonDEG(maxX + xShift), - minY, maxY); + return ctx.getShapeFactory() + .rect( + DistanceUtils.normLonDEG(minX + xShift), + DistanceUtils.normLonDEG(maxX + xShift), + minY, + maxY); } public void testAreaValueSource() throws IOException { @@ -289,12 +326,17 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase { adoc("100", ctx.getShapeFactory().rect(0, 20, 40, 80)); adoc("999", (Shape) null); commit(); - checkValueSource(new ShapeAreaValueSource(bboxStrategy.makeShapeValueSource(), ctx, false, 1.0), - new float[]{800f, 0f}, 0f); - checkValueSource(new ShapeAreaValueSource(bboxStrategy.makeShapeValueSource(), ctx, true, 1.0),//geo - new float[]{391.93f, 0f}, 0.01f); - checkValueSource(new ShapeAreaValueSource(bboxStrategy.makeShapeValueSource(), ctx, true, 2.0), - new float[]{783.86f, 0f}, 0.01f); // testing with a different multiplier + checkValueSource( + new ShapeAreaValueSource(bboxStrategy.makeShapeValueSource(), ctx, false, 1.0), + new float[] {800f, 0f}, + 0f); + checkValueSource( + new ShapeAreaValueSource(bboxStrategy.makeShapeValueSource(), ctx, true, 1.0), // geo + new float[] {391.93f, 0f}, + 0.01f); + checkValueSource( + new ShapeAreaValueSource(bboxStrategy.makeShapeValueSource(), ctx, true, 2.0), + new float[] {783.86f, 0f}, + 0.01f); // testing with a different multiplier } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/composite/TestCompositeStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/composite/TestCompositeStrategy.java index 5b0b5347321..19abb86f7e4 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/composite/TestCompositeStrategy.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/composite/TestCompositeStrategy.java @@ -16,8 +16,11 @@ */ package org.apache.lucene.spatial.composite; -import java.io.IOException; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; +import java.io.IOException; import org.apache.lucene.spatial.prefix.RandomSpatialOpStrategyTestCase; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; @@ -33,64 +36,62 @@ import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.impl.RectangleImpl; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; - public class TestCompositeStrategy extends RandomSpatialOpStrategyTestCase { private SpatialPrefixTree grid; private RecursivePrefixTreeStrategy rptStrategy; private void setupQuadGrid(int maxLevels) { - //non-geospatial makes this test a little easier (in gridSnap), and using boundary values 2^X raises - // the prospect of edge conditions we want to test, plus makes for simpler numbers (no decimals). + // non-geospatial makes this test a little easier (in gridSnap), and using boundary values 2^X + // raises + // the prospect of edge conditions we want to test, plus makes for simpler numbers (no + // decimals). SpatialContextFactory factory = new SpatialContextFactory(); factory.geo = false; factory.worldBounds = new RectangleImpl(0, 256, -128, 128, null); this.ctx = factory.newSpatialContext(); - //A fairly shallow grid - if (maxLevels == -1) - maxLevels = randomIntBetween(1, 8);//max 64k cells (4^8), also 256*256 + // A fairly shallow grid + if (maxLevels == -1) maxLevels = randomIntBetween(1, 8); // max 64k cells (4^8), also 256*256 this.grid = new QuadPrefixTree(ctx, maxLevels); this.rptStrategy = newRPT(); } private void setupGeohashGrid(int maxLevels) { this.ctx = SpatialContext.GEO; - //A fairly shallow grid - if (maxLevels == -1) - maxLevels = randomIntBetween(1, 3);//max 16k cells (32^3) + // A fairly shallow grid + if (maxLevels == -1) maxLevels = randomIntBetween(1, 3); // max 16k cells (32^3) this.grid = new GeohashPrefixTree(ctx, maxLevels); this.rptStrategy = newRPT(); } protected RecursivePrefixTreeStrategy newRPT() { - final RecursivePrefixTreeStrategy rpt = new RecursivePrefixTreeStrategy(this.grid, - getClass().getSimpleName() + "_rpt"); - rpt.setDistErrPct(0.10);//not too many cells + final RecursivePrefixTreeStrategy rpt = + new RecursivePrefixTreeStrategy(this.grid, getClass().getSimpleName() + "_rpt"); + rpt.setDistErrPct(0.10); // not too many cells return rpt; } @Test public void testOperations() throws IOException { - //setup + // setup if (randomBoolean()) { setupQuadGrid(-1); } else { setupGeohashGrid(-1); } - SerializedDVStrategy serializedDVStrategy = new SerializedDVStrategy(ctx, getClass().getSimpleName() + "_sdv"); - this.strategy = new CompositeSpatialStrategy("composite_" + getClass().getSimpleName(), - rptStrategy, serializedDVStrategy); + SerializedDVStrategy serializedDVStrategy = + new SerializedDVStrategy(ctx, getClass().getSimpleName() + "_sdv"); + this.strategy = + new CompositeSpatialStrategy( + "composite_" + getClass().getSimpleName(), rptStrategy, serializedDVStrategy); - //Do it! + // Do it! for (SpatialOperation pred : SpatialOperation.values()) { if (pred == SpatialOperation.BBoxIntersects || pred == SpatialOperation.BBoxWithin) { continue; } - if (pred == SpatialOperation.IsDisjointTo) {//TODO + if (pred == SpatialOperation.IsDisjointTo) { // TODO continue; } testOperationRandomShapes(pred); @@ -113,15 +114,15 @@ public class TestCompositeStrategy extends RandomSpatialOpStrategyTestCase { return random().nextBoolean() ? randomCircle() : randomRectangle(); } - //TODO move up + // TODO move up private Shape randomCircle() { final Point point = randomPoint(); - //TODO pick using gaussian + // TODO pick using gaussian double radius; if (ctx.isGeo()) { radius = randomDouble() * 100; } else { - //find distance to closest edge + // find distance to closest edge final Rectangle worldBounds = ctx.getWorldBounds(); double maxRad = point.getX() - worldBounds.getMinX(); maxRad = Math.min(maxRad, worldBounds.getMaxX() - point.getX()); diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpStrategyTestCase.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpStrategyTestCase.java index 8aebafe877a..bd6ae9fa5b3 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpStrategyTestCase.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpStrategyTestCase.java @@ -16,26 +16,26 @@ */ package org.apache.lucene.spatial.prefix; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; + import java.io.IOException; import java.util.ArrayList; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; - -import org.locationtech.spatial4j.shape.Shape; import org.apache.lucene.search.Query; import org.apache.lucene.spatial.StrategyTestCase; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; +import org.locationtech.spatial4j.shape.Shape; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; - -/** Base test harness, ideally for SpatialStrategy impls that have exact results - * (not grid approximated), hence "not fuzzy". +/** + * Base test harness, ideally for SpatialStrategy impls that have exact results (not grid + * approximated), hence "not fuzzy". */ public abstract class RandomSpatialOpStrategyTestCase extends StrategyTestCase { - //Note: this is partially redundant with StrategyTestCase.runTestQuery & testOperation + // Note: this is partially redundant with StrategyTestCase.runTestQuery & testOperation protected void testOperationRandomShapes(final SpatialOperation operation) throws IOException { @@ -51,31 +51,35 @@ public abstract class RandomSpatialOpStrategyTestCase extends StrategyTestCase { queryShapes.add(randomQueryShape()); } - testOperation(operation, indexedShapes, queryShapes, true/*havoc*/); + testOperation(operation, indexedShapes, queryShapes, true /*havoc*/); } - protected void testOperation(final SpatialOperation operation, - List indexedShapes, List queryShapes, boolean havoc) throws IOException { - //first show that when there's no data, a query will result in no results + protected void testOperation( + final SpatialOperation operation, + List indexedShapes, + List queryShapes, + boolean havoc) + throws IOException { + // first show that when there's no data, a query will result in no results { Query query = strategy.makeQuery(new SpatialArgs(operation, randomQueryShape())); SearchResults searchResults = executeQuery(query, 1); assertEquals(0, searchResults.numFound); } - //Main index loop: + // Main index loop: for (int i = 0; i < indexedShapes.size(); i++) { Shape shape = indexedShapes.get(i); - adoc(""+i, shape); + adoc("" + i, shape); if (havoc && random().nextInt(10) == 0) - commit();//intermediate commit, produces extra segments + commit(); // intermediate commit, produces extra segments } if (havoc) { - //delete some documents randomly + // delete some documents randomly for (int id = 0; id < indexedShapes.size(); id++) { if (random().nextInt(10) == 0) { - deleteDoc(""+id); + deleteDoc("" + id); indexedShapes.set(id, null); } } @@ -83,27 +87,25 @@ public abstract class RandomSpatialOpStrategyTestCase extends StrategyTestCase { commit(); - //Main query loop: + // Main query loop: for (int queryIdx = 0; queryIdx < queryShapes.size(); queryIdx++) { final Shape queryShape = queryShapes.get(queryIdx); - if (havoc) - preQueryHavoc(); + if (havoc) preQueryHavoc(); - //Generate truth via brute force: + // Generate truth via brute force: // We ensure true-positive matches (if the predicate on the raw shapes match // then the search should find those same matches). - Set expectedIds = new LinkedHashSet<>();//true-positives + Set expectedIds = new LinkedHashSet<>(); // true-positives for (int id = 0; id < indexedShapes.size(); id++) { Shape indexedShape = indexedShapes.get(id); - if (indexedShape == null) - continue; + if (indexedShape == null) continue; if (operation.evaluate(indexedShape, queryShape)) { - expectedIds.add(""+id); + expectedIds.add("" + id); } } - //Search and verify results + // Search and verify results SpatialArgs args = new SpatialArgs(operation, queryShape); Query query = strategy.makeQuery(args); SearchResults got = executeQuery(query, 100); @@ -121,9 +123,16 @@ public abstract class RandomSpatialOpStrategyTestCase extends StrategyTestCase { } } - private void fail(String label, String id, List indexedShapes, Shape queryShape, SpatialOperation operation) { - fail("[" + operation + "] " + label - + " I#" + id + ":" + indexedShapes.get(Integer.parseInt(id)) + " Q:" + queryShape); + private void fail( + String label, + String id, + List indexedShapes, + Shape queryShape, + SpatialOperation operation) { + fail( + ("[" + operation + "] ") + + (label + " I#" + id + ":" + indexedShapes.get(Integer.parseInt(id))) + + (" Q:" + queryShape)); } protected void preQueryHavoc() { diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestDateNRStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestDateNRStrategy.java index 1f98d47e7bc..6af95f2936f 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestDateNRStrategy.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestDateNRStrategy.java @@ -16,10 +16,12 @@ */ package org.apache.lucene.spatial.prefix; -import java.io.IOException; -import java.util.Calendar; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; import com.carrotsearch.randomizedtesting.annotations.Repeat; +import java.io.IOException; +import java.util.Calendar; import org.apache.lucene.spatial.prefix.tree.DateRangePrefixTree; import org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape; import org.apache.lucene.spatial.query.SpatialOperation; @@ -27,9 +29,6 @@ import org.junit.Before; import org.junit.Test; import org.locationtech.spatial4j.shape.Shape; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; - public class TestDateNRStrategy extends RandomSpatialOpStrategyTestCase { static final int ITERATIONS = 10; @@ -43,7 +42,7 @@ public class TestDateNRStrategy extends RandomSpatialOpStrategyTestCase { super.setUp(); tree = new DateRangePrefixTree(DateRangePrefixTree.DEFAULT_CAL); strategy = new NumberRangePrefixTreeStrategy(tree, "dateRange"); - ((NumberRangePrefixTreeStrategy)strategy).setPointsOnly(randomInt() % 5 == 0); + ((NumberRangePrefixTreeStrategy) strategy).setPointsOnly(randomInt() % 5 == 0); Calendar tmpCal = tree.newCal(); int randomCalWindowField = randomIntBetween(Calendar.YEAR, Calendar.MILLISECOND); tmpCal.add(randomCalWindowField, 2_000); @@ -71,43 +70,43 @@ public class TestDateNRStrategy extends RandomSpatialOpStrategyTestCase { @Test public void testWithinSame() throws IOException { Shape shape = randomIndexedShape(); - testOperation( - shape, - SpatialOperation.IsWithin, - shape, true);//is within itself + testOperation(shape, SpatialOperation.IsWithin, shape, true); // is within itself } @Test public void testWorld() throws IOException { - ((NumberRangePrefixTreeStrategy)strategy).setPointsOnly(false); + ((NumberRangePrefixTreeStrategy) strategy).setPointsOnly(false); testOperation( - tree.toShape(tree.newCal()),//world matches everything + tree.toShape(tree.newCal()), // world matches everything SpatialOperation.Contains, - tree.toShape(randomCalendar()), true); + tree.toShape(randomCalendar()), + true); } @Test public void testBugInitIterOptimization() throws Exception { - ((NumberRangePrefixTreeStrategy)strategy).setPointsOnly(false); - //bug due to fast path initIter() optimization + ((NumberRangePrefixTreeStrategy) strategy).setPointsOnly(false); + // bug due to fast path initIter() optimization testOperation( tree.parseShape("[2014-03-27T23 TO 2014-04-01T01]"), SpatialOperation.Intersects, - tree.parseShape("[2014-04 TO 2014-04-01T02]"), true); + tree.parseShape("[2014-04 TO 2014-04-01T02]"), + true); } @Test public void testLastMillionYearPeriod() throws Exception { testOperation( - tree.parseShape("+292220922-05-17T18:01:57.572"), // a year in the last million year period (>=292M) + tree.parseShape( + "+292220922-05-17T18:01:57.572"), // a year in the last million year period (>=292M) SpatialOperation.Intersects, - tree.parseShape("[1970 TO *]"), true - ); + tree.parseShape("[1970 TO *]"), + true); } @Override protected Shape randomIndexedShape() { - if (((NumberRangePrefixTreeStrategy)strategy).isPointsOnly()) { + if (((NumberRangePrefixTreeStrategy) strategy).isPointsOnly()) { Calendar cal = tree.newCal(); cal.setTimeInMillis(random().nextLong()); return tree.toShape(cal); @@ -145,10 +144,9 @@ public class TestDateNRStrategy extends RandomSpatialOpStrategyTestCase { Calendar cal = tree.newCal(); cal.setTimeInMillis(random().nextLong() % randomCalWindowMs); try { - tree.clearFieldsAfter(cal, random().nextInt(Calendar.FIELD_COUNT+1)-1); + tree.clearFieldsAfter(cal, random().nextInt(Calendar.FIELD_COUNT + 1) - 1); } catch (AssertionError e) { - if (!e.getMessage().equals("Calendar underflow")) - throw e; + if (!e.getMessage().equals("Calendar underflow")) throw e; } return cal; } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestHeatmapFacetCounter.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestHeatmapFacetCounter.java index 10805465b2c..6a801dd330c 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestHeatmapFacetCounter.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestHeatmapFacetCounter.java @@ -16,11 +16,13 @@ */ package org.apache.lucene.spatial.prefix; +import static com.carrotsearch.randomizedtesting.RandomizedTest.atMost; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; + +import com.carrotsearch.randomizedtesting.annotations.Repeat; import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.apache.lucene.search.Query; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.spatial.StrategyTestCase; @@ -41,9 +43,6 @@ import org.locationtech.spatial4j.shape.ShapeFactory; import org.locationtech.spatial4j.shape.SpatialRelation; import org.locationtech.spatial4j.shape.impl.RectangleImpl; -import static com.carrotsearch.randomizedtesting.RandomizedTest.atMost; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; - public class TestHeatmapFacetCounter extends StrategyTestCase { SpatialPrefixTree grid; @@ -67,21 +66,23 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { @After public void after() { - log.info("Validated " + cellsValidated + " cells, " + cellValidatedNonZero + " non-zero"); // nowarn + log.info( + "Validated " + cellsValidated + " cells, " + cellValidatedNonZero + " non-zero"); // nowarn } @Test public void testStatic() throws IOException { - //Some specific tests (static, not random). - adoc("0", shapeFactory.rect(179.8, -170, -90, -80));//barely crosses equator - adoc("1", shapeFactory.pointXY(-180, -85));//a pt within the above rect - adoc("2", shapeFactory.pointXY(172, -85));//a pt to left of rect + // Some specific tests (static, not random). + adoc("0", shapeFactory.rect(179.8, -170, -90, -80)); // barely crosses equator + adoc("1", shapeFactory.pointXY(-180, -85)); // a pt within the above rect + adoc("2", shapeFactory.pointXY(172, -85)); // a pt to left of rect commit(); validateHeatmapResultLoop(shapeFactory.rect(+170, +180, -90, -85), 1, 100); validateHeatmapResultLoop(shapeFactory.rect(-180, -160, -89, -50), 1, 100); - validateHeatmapResultLoop(shapeFactory.rect(179, 179, -89, -50), 1, 100);//line - // We could test anything and everything at this point... I prefer we leave that to random testing and then + validateHeatmapResultLoop(shapeFactory.rect(179, 179, -89, -50), 1, 100); // line + // We could test anything and everything at this point... I prefer we leave that to random + // testing and then // add specific tests if we find a bug. } @@ -91,12 +92,13 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { strategy = new RecursivePrefixTreeStrategy(grid, getTestClass().getSimpleName()); adoc("0", shapeFactory.rect(-102, -83, 43, 52)); commit(); - validateHeatmapResultLoop(shapeFactory.rect(179, -179, 62, 63), 2, 100);// HM crosses dateline + validateHeatmapResultLoop(shapeFactory.rect(179, -179, 62, 63), 2, 100); // HM crosses dateline } @Test public void testQueryCircle() throws IOException { - //overwrite setUp; non-geo bounds is more straight-forward; otherwise 88,88 would actually be practically north, + // overwrite setUp; non-geo bounds is more straight-forward; otherwise 88,88 would actually be + // practically north, final SpatialContextFactory spatialContextFactory = new SpatialContextFactory(); spatialContextFactory.geo = false; spatialContextFactory.worldBounds = new RectangleImpl(-90, 90, -90, 90, null); @@ -106,19 +108,25 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { grid = new QuadPrefixTree(ctx, LEVEL); strategy = new RecursivePrefixTreeStrategy(grid, getTestClass().getSimpleName()); Circle circle = shapeFactory.circle(0, 0, 89); - adoc("0", shapeFactory.pointXY(88, 88));//top-right, inside bbox of circle but not the circle - adoc("1", shapeFactory.pointXY(0, 0));//clearly inside; dead center in fact + adoc("0", shapeFactory.pointXY(88, 88)); // top-right, inside bbox of circle but not the circle + adoc("1", shapeFactory.pointXY(0, 0)); // clearly inside; dead center in fact commit(); - final HeatmapFacetCounter.Heatmap heatmap = HeatmapFacetCounter.calcFacets( - (PrefixTreeStrategy) strategy, indexSearcher.getTopReaderContext(), null, - circle, LEVEL, 1000); - //assert that only one point is found, not 2 + final HeatmapFacetCounter.Heatmap heatmap = + HeatmapFacetCounter.calcFacets( + (PrefixTreeStrategy) strategy, + indexSearcher.getTopReaderContext(), + null, + circle, + LEVEL, + 1000); + // assert that only one point is found, not 2 boolean foundOne = false; for (int count : heatmap.counts) { switch (count) { - case 0: break; + case 0: + break; case 1: - assertFalse(foundOne);//this is the first + assertFalse(foundOne); // this is the first foundOne = true; break; default: @@ -128,19 +136,27 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { assertTrue(foundOne); } - /** Recursively facet & validate at higher resolutions until we've seen enough. We assume there are - * some non-zero cells. */ - private void validateHeatmapResultLoop(Rectangle inputRange, int facetLevel, int cellCountRecursThreshold) - throws IOException { + /** + * Recursively facet & validate at higher resolutions until we've seen enough. We assume there are + * some non-zero cells. + */ + private void validateHeatmapResultLoop( + Rectangle inputRange, int facetLevel, int cellCountRecursThreshold) throws IOException { if (facetLevel > grid.getMaxLevels()) { return; } final int maxCells = 10_000; - final HeatmapFacetCounter.Heatmap heatmap = HeatmapFacetCounter.calcFacets( - (PrefixTreeStrategy) strategy, indexSearcher.getTopReaderContext(), null, inputRange, facetLevel, maxCells); + final HeatmapFacetCounter.Heatmap heatmap = + HeatmapFacetCounter.calcFacets( + (PrefixTreeStrategy) strategy, + indexSearcher.getTopReaderContext(), + null, + inputRange, + facetLevel, + maxCells); int preNonZero = cellValidatedNonZero; validateHeatmapResult(inputRange, facetLevel, heatmap); - assert cellValidatedNonZero - preNonZero > 0;//we validated more non-zero cells + assert cellValidatedNonZero - preNonZero > 0; // we validated more non-zero cells if (heatmap.counts.length < cellCountRecursThreshold) { validateHeatmapResultLoop(inputRange, facetLevel + 1, cellCountRecursThreshold); } @@ -149,7 +165,8 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { @Test @Repeat(iterations = 20) public void testRandom() throws IOException { - // Tests using random index shapes & query shapes. This has found all sorts of edge case bugs (e.g. dateline, + // Tests using random index shapes & query shapes. This has found all sorts of edge case bugs + // (e.g. dateline, // cell border, overflow(?)). final int numIndexedShapes = 1 + atMost(9); @@ -158,15 +175,16 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { indexedShapes.add(randomIndexedShape()); } - //Main index loop: + // Main index loop: for (int i = 0; i < indexedShapes.size(); i++) { Shape shape = indexedShapes.get(i); adoc("" + i, shape); - if (random().nextInt(10) == 0) - commit();//intermediate commit, produces extra segments + if (random().nextInt(10) == 0) { + commit(); // intermediate commit, produces extra segments + } } - //delete some documents randomly + // delete some documents randomly for (int id = 0; id < indexedShapes.size(); id++) { if (random().nextInt(10) == 0) { deleteDoc("" + id); @@ -182,10 +200,12 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { // and once with dateline wrap if (rect.getWidth() > 0) { double shift = random().nextDouble() % rect.getWidth(); - queryHeatmapRecursive(shapeFactory.rect( + queryHeatmapRecursive( + shapeFactory.rect( DistanceUtils.normLonDEG(rect.getMinX() - shift), DistanceUtils.normLonDEG(rect.getMaxX() - shift), - rect.getMinY(), rect.getMaxY()), + rect.getMinY(), + rect.getMaxY()), 1); } } @@ -193,15 +213,24 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { /** Build heatmap, validate results, then descend recursively to another facet level. */ private boolean queryHeatmapRecursive(Rectangle inputRange, int facetLevel) throws IOException { if (!inputRange.hasArea()) { - // Don't test line inputs. It's not that we don't support it but it is more challenging to test if per-chance it - // coincides with a grid line due due to edge overlap issue for some grid implementations (geo & quad). + // Don't test line inputs. It's not that we don't support it but it is more challenging to + // test if per-chance it + // coincides with a grid line due due to edge overlap issue for some grid implementations (geo + // & quad). return false; } - Bits filter = null; //FYI testing filtering of underlying PrefixTreeFacetCounter is done in another test - //Calculate facets + Bits filter = + null; // FYI testing filtering of underlying PrefixTreeFacetCounter is done in another test + // Calculate facets final int maxCells = 10_000; - final HeatmapFacetCounter.Heatmap heatmap = HeatmapFacetCounter.calcFacets( - (PrefixTreeStrategy) strategy, indexSearcher.getTopReaderContext(), filter, inputRange, facetLevel, maxCells); + final HeatmapFacetCounter.Heatmap heatmap = + HeatmapFacetCounter.calcFacets( + (PrefixTreeStrategy) strategy, + indexSearcher.getTopReaderContext(), + filter, + inputRange, + facetLevel, + maxCells); validateHeatmapResult(inputRange, facetLevel, heatmap); @@ -213,21 +242,26 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { } } - //Test again recursively to higher facetLevel (more detailed cells) - if (foundNonZeroCount && cellsValidated <= 500 && facetLevel != grid.getMaxLevels() && inputRange.hasArea()) { - for (int i = 0; i < 5; i++) {//try multiple times until we find non-zero counts + // Test again recursively to higher facetLevel (more detailed cells) + if (foundNonZeroCount + && cellsValidated <= 500 + && facetLevel != grid.getMaxLevels() + && inputRange.hasArea()) { + for (int i = 0; i < 5; i++) { // try multiple times until we find non-zero counts if (queryHeatmapRecursive(randomRectangle(inputRange), facetLevel + 1)) { - break;//we found data here so we needn't try again + break; // we found data here so we needn't try again } } } return foundNonZeroCount; } - private void validateHeatmapResult(Rectangle inputRange, int facetLevel, HeatmapFacetCounter.Heatmap heatmap) + private void validateHeatmapResult( + Rectangle inputRange, int facetLevel, HeatmapFacetCounter.Heatmap heatmap) throws IOException { final Rectangle heatRect = heatmap.region; - assertTrue(heatRect.relate(inputRange) == SpatialRelation.CONTAINS || heatRect.equals(inputRange)); + assertTrue( + heatRect.relate(inputRange) == SpatialRelation.CONTAINS || heatRect.equals(inputRange)); final double cellWidth = heatRect.getWidth() / heatmap.columns; final double cellHeight = heatRect.getHeight() / heatmap.rows; for (int c = 0; c < heatmap.columns; c++) { @@ -235,7 +269,7 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { final int facetCount = heatmap.getCount(c, r); double x = DistanceUtils.normLonDEG(heatRect.getMinX() + c * cellWidth + cellWidth / 2); double y = DistanceUtils.normLatDEG(heatRect.getMinY() + r * cellHeight + cellHeight / 2); - Point pt = shapeFactory.pointXY(x, y); + Point pt = shapeFactory.pointXY(x, y); assertEquals(countMatchingDocsAtLevel(pt, facetLevel), facetCount); } } @@ -244,8 +278,9 @@ public class TestHeatmapFacetCounter extends StrategyTestCase { private int countMatchingDocsAtLevel(Point pt, int facetLevel) throws IOException { // we use IntersectsPrefixTreeFilter directly so that we can specify the level to go to exactly. RecursivePrefixTreeStrategy strategy = (RecursivePrefixTreeStrategy) this.strategy; - Query filter = new IntersectsPrefixTreeQuery( - pt, strategy.getFieldName(), grid, facetLevel, grid.getMaxLevels()); + Query filter = + new IntersectsPrefixTreeQuery( + pt, strategy.getFieldName(), grid, facetLevel, grid.getMaxLevels()); final TotalHitCountCollector collector = new TotalHitCountCollector(); indexSearcher.search(filter, collector); cellsValidated++; diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestJtsPolygon.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestJtsPolygon.java index dc3c8e53b10..6cdb3935895 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestJtsPolygon.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestJtsPolygon.java @@ -16,9 +16,8 @@ */ package org.apache.lucene.spatial.prefix; -import org.locationtech.spatial4j.context.SpatialContextFactory; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Shape; +import java.text.ParseException; +import java.util.HashMap; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Store; @@ -33,43 +32,47 @@ import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.junit.Test; - -import java.text.ParseException; -import java.util.HashMap; +import org.locationtech.spatial4j.context.SpatialContextFactory; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Shape; public class TestJtsPolygon extends StrategyTestCase { - private static final double LUCENE_4464_distErrPct = SpatialArgs.DEFAULT_DISTERRPCT;//DEFAULT 2.5% + private static final double LUCENE_4464_distErrPct = + SpatialArgs.DEFAULT_DISTERRPCT; // DEFAULT 2.5% public TestJtsPolygon() { try { HashMap args = new HashMap<>(); - args.put("spatialContextFactory", + args.put( + "spatialContextFactory", "org.locationtech.spatial4j.context.jts.JtsSpatialContextFactory"); ctx = SpatialContextFactory.makeSpatialContext(args, getClass().getClassLoader()); } catch (NoClassDefFoundError e) { - assumeTrue("This test requires JTS jar: "+e, false); + assumeTrue("This test requires JTS jar: " + e, false); } - GeohashPrefixTree grid = new GeohashPrefixTree(ctx, 11);//< 1 meter == 11 maxLevels + GeohashPrefixTree grid = new GeohashPrefixTree(ctx, 11); // < 1 meter == 11 maxLevels this.strategy = new RecursivePrefixTreeStrategy(grid, getClass().getSimpleName()); - ((RecursivePrefixTreeStrategy)this.strategy).setDistErrPct(LUCENE_4464_distErrPct);//1% radius (small!) + ((RecursivePrefixTreeStrategy) this.strategy) + .setDistErrPct(LUCENE_4464_distErrPct); // 1% radius (small!) } @Test /** LUCENE-4464 */ public void testCloseButNoMatch() throws Exception { getAddAndVerifyIndexedDocuments("LUCENE-4464.txt"); - SpatialArgs args = q( - "POLYGON((-93.18100824442227 45.25676372469945," + - "-93.23182001200654 45.21421290799412," + - "-93.16315546122038 45.23742639412364," + - "-93.18100824442227 45.25676372469945))", - LUCENE_4464_distErrPct); + SpatialArgs args = + q( + "POLYGON((-93.18100824442227 45.25676372469945," + + "-93.23182001200654 45.21421290799412," + + "-93.16315546122038 45.23742639412364," + + "-93.18100824442227 45.25676372469945))", + LUCENE_4464_distErrPct); SearchResults got = executeQuery(strategy.makeQuery(args), 100); assertEquals(1, got.numFound); assertEquals("poly2", got.results.get(0).document.get("id")); - //did not find poly 1 ! + // did not find poly 1 ! } private SpatialArgs q(String shapeStr, double distErrPct) throws ParseException { @@ -80,13 +83,15 @@ public class TestJtsPolygon extends StrategyTestCase { } /** - * A PrefixTree pruning optimization gone bad. - * See LUCENE-4770. + * A PrefixTree pruning optimization gone bad. See LUCENE-4770. */ @Test public void testBadPrefixTreePrune() throws Exception { - Shape area = ctx.readShapeFromWkt("POLYGON((-122.83 48.57, -122.77 48.56, -122.79 48.53, -122.83 48.57))"); + Shape area = + ctx.readShapeFromWkt( + "POLYGON((-122.83 48.57, -122.77 48.56, -122.79 48.53, -122.83 48.57))"); SpatialPrefixTree trie = new QuadPrefixTree(ctx, 12); TermQueryPrefixTreeStrategy strategy = new TermQueryPrefixTreeStrategy(trie, "geo"); @@ -102,7 +107,10 @@ public class TestJtsPolygon extends StrategyTestCase { Point upperleft = ctx.getShapeFactory().pointXY(-122.88, 48.54); Point lowerright = ctx.getShapeFactory().pointXY(-122.82, 48.62); - Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Intersects, ctx.getShapeFactory().rect(upperleft, lowerright))); + Query query = + strategy.makeQuery( + new SpatialArgs( + SpatialOperation.Intersects, ctx.getShapeFactory().rect(upperleft, lowerright))); commit(); TopDocs search = indexSearcher.search(query, 10); @@ -113,5 +121,4 @@ public class TestJtsPolygon extends StrategyTestCase { assertEquals(1, search.totalHits.value); } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestNumberRangeFacets.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestNumberRangeFacets.java index 6de4b8228a1..1e9ca8d7794 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestNumberRangeFacets.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestNumberRangeFacets.java @@ -16,13 +16,14 @@ */ package org.apache.lucene.spatial.prefix; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; + +import com.carrotsearch.randomizedtesting.annotations.Repeat; import java.io.IOException; import java.util.ArrayList; import java.util.Calendar; import java.util.Collections; import java.util.List; - -import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; @@ -42,8 +43,6 @@ import org.junit.Before; import org.junit.Test; import org.locationtech.spatial4j.shape.Shape; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; - public class TestNumberRangeFacets extends StrategyTestCase { DateRangePrefixTree tree; @@ -57,7 +56,8 @@ public class TestNumberRangeFacets extends StrategyTestCase { tree = new DateRangePrefixTree(DateRangePrefixTree.DEFAULT_CAL); strategy = new NumberRangePrefixTreeStrategy(tree, "dateRange"); Calendar tmpCal = tree.newCal(); - randomCalWindowField = randomIntBetween(1, Calendar.ZONE_OFFSET - 1);//we're not allowed to add zone offset + randomCalWindowField = + randomIntBetween(1, Calendar.ZONE_OFFSET - 1); // we're not allowed to add zone offset tmpCal.add(randomCalWindowField, 2_000); randomCalWindowMs = Math.max(2000L, tmpCal.getTimeInMillis()); } @@ -65,38 +65,41 @@ public class TestNumberRangeFacets extends StrategyTestCase { @Repeat(iterations = 20) @Test public void test() throws IOException { - //generate test data + // generate test data List indexedShapes = new ArrayList<>(); final int numIndexedShapes = random().nextInt(15); for (int i = 0; i < numIndexedShapes; i++) { indexedShapes.add(randomShape()); } - //Main index loop: + // Main index loop: for (int i = 0; i < indexedShapes.size(); i++) { Shape shape = indexedShapes.get(i); - adoc(""+i, shape); + adoc("" + i, shape); - if (random().nextInt(10) == 0) - commit();//intermediate commit, produces extra segments + if (random().nextInt(10) == 0) { + commit(); // intermediate commit, produces extra segments + } } - //delete some documents randomly + // delete some documents randomly for (int id = 0; id < indexedShapes.size(); id++) { if (random().nextInt(10) == 0) { - deleteDoc(""+id); + deleteDoc("" + id); indexedShapes.set(id, null); } } commit(); - //Main query loop: + // Main query loop: for (int queryIdx = 0; queryIdx < 10; queryIdx++) { preQueryHavoc(); - // We need to have a facet range window to do the facets between (a start time & end time). We randomly - // pick a date, decide the level we want to facet on, and then pick a right end time that is up to 2 thousand + // We need to have a facet range window to do the facets between (a start time & end time). We + // randomly + // pick a date, decide the level we want to facet on, and then pick a right end time that is + // up to 2 thousand // values later. int calFieldFacet = randomCalWindowField - 1; if (calFieldFacet > 1 && rarely()) { @@ -108,15 +111,16 @@ public class TestNumberRangeFacets extends StrategyTestCase { rightCal.add(calFieldFacet, randomIntBetween(0, 2000)); // Pick facet detail level based on cal field. int detailLevel = tree.getTreeLevelForCalendarField(calFieldFacet); - if (detailLevel < 0) {//no exact match + if (detailLevel < 0) { // no exact match detailLevel = -1 * detailLevel; } - //Randomly pick a filter/acceptDocs + // Randomly pick a filter/acceptDocs Bits topAcceptDocs = null; List acceptFieldIds = new ArrayList<>(); if (usually()) { - //get all possible IDs into a list, random shuffle it, then randomly choose how many of the first we use to + // get all possible IDs into a list, random shuffle it, then randomly choose how many of the + // first we use to // replace the list. for (int i = 0; i < indexedShapes.size(); i++) { if (indexedShapes.get(i) == null) { // we deleted this one @@ -136,24 +140,28 @@ public class TestNumberRangeFacets extends StrategyTestCase { } } - //Lets do it! - NumberRangePrefixTree.NRShape facetRange = tree.toRangeShape(tree.toShape(leftCal), tree.toShape(rightCal)); - Facets facets = ((NumberRangePrefixTreeStrategy) strategy) - .calcFacets(indexSearcher.getTopReaderContext(), topAcceptDocs, facetRange, detailLevel); + // Lets do it! + NumberRangePrefixTree.NRShape facetRange = + tree.toRangeShape(tree.toShape(leftCal), tree.toShape(rightCal)); + Facets facets = + ((NumberRangePrefixTreeStrategy) strategy) + .calcFacets( + indexSearcher.getTopReaderContext(), topAcceptDocs, facetRange, detailLevel); - //System.out.println("Q: " + queryIdx + " " + facets); + // System.out.println("Q: " + queryIdx + " " + facets); - //Verify results. We do it by looping over indexed shapes and reducing the facet counts. + // Verify results. We do it by looping over indexed shapes and reducing the facet counts. Shape facetShapeRounded = facetRange.roundToLevel(detailLevel); for (int indexedShapeId = 0; indexedShapeId < indexedShapes.size(); indexedShapeId++) { if (topAcceptDocs != null && !acceptFieldIds.contains(indexedShapeId)) { - continue;// this doc was filtered out via acceptDocs + continue; // this doc was filtered out via acceptDocs } Shape indexedShape = indexedShapes.get(indexedShapeId); - if (indexedShape == null) {//was deleted + if (indexedShape == null) { // was deleted continue; } - Shape indexedShapeRounded = ((NumberRangePrefixTree.NRShape) indexedShape).roundToLevel(detailLevel); + Shape indexedShapeRounded = + ((NumberRangePrefixTree.NRShape) indexedShape).roundToLevel(detailLevel); if (!indexedShapeRounded.relate(facetShapeRounded).intersects()) { // no intersection at all continue; } @@ -162,15 +170,15 @@ public class TestNumberRangeFacets extends StrategyTestCase { while (cellIterator.hasNext()) { Cell cell = cellIterator.next(); if (!cell.getShape().relate(facetShapeRounded).intersects()) { - cellIterator.remove();//no intersection; prune + cellIterator.remove(); // no intersection; prune continue; } assert cell.getLevel() <= detailLevel; if (cell.getLevel() == detailLevel) { - //count it + // count it UnitNRShape shape = (UnitNRShape) cell.getShape(); - final UnitNRShape parentShape = shape.getShapeAtLevel(detailLevel - 1);//get parent + final UnitNRShape parentShape = shape.getShapeAtLevel(detailLevel - 1); // get parent final Facets.FacetParentVal facetParentVal = facets.parents.get(parentShape); assertNotNull(facetParentVal); int index = shape.getValAtLevel(shape.getLevel()); @@ -179,13 +187,13 @@ public class TestNumberRangeFacets extends StrategyTestCase { facetParentVal.childCounts[index]--; } else if (cell.isLeaf()) { - //count it, and remove/prune. + // count it, and remove/prune. if (cell.getLevel() < detailLevel - 1) { assert facets.topLeaves > 0; facets.topLeaves--; } else { UnitNRShape shape = (UnitNRShape) cell.getShape(); - final UnitNRShape parentShape = shape.getShapeAtLevel(detailLevel - 1);//get parent + final UnitNRShape parentShape = shape.getShapeAtLevel(detailLevel - 1); // get parent final Facets.FacetParentVal facetParentVal = facets.parents.get(parentShape); assertNotNull(facetParentVal); assert facetParentVal.parentLeaves > 0; @@ -206,15 +214,16 @@ public class TestNumberRangeFacets extends StrategyTestCase { } } } - } } private Bits searchForDocBits(Query query) throws IOException { FixedBitSet bitSet = new FixedBitSet(indexSearcher.getIndexReader().maxDoc()); - indexSearcher.search(query, + indexSearcher.search( + query, new SimpleCollector() { int leafDocBase; + @Override public void collect(int doc) throws IOException { bitSet.set(leafDocBase + doc); @@ -265,10 +274,9 @@ public class TestNumberRangeFacets extends StrategyTestCase { Calendar cal = tree.newCal(); cal.setTimeInMillis(random().nextLong() % randomCalWindowMs); try { - tree.clearFieldsAfter(cal, random().nextInt(Calendar.FIELD_COUNT+1)-1); + tree.clearFieldsAfter(cal, random().nextInt(Calendar.FIELD_COUNT + 1) - 1); } catch (AssertionError e) { - if (!e.getMessage().equals("Calendar underflow")) - throw e; + if (!e.getMessage().equals("Calendar underflow")) throw e; } return cal; } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree.java index c6492e975ce..1556370f599 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree.java @@ -16,6 +16,10 @@ */ package org.apache.lucene.spatial.prefix; +import static com.carrotsearch.randomizedtesting.RandomizedTest.*; +import static org.locationtech.spatial4j.shape.SpatialRelation.*; + +import com.carrotsearch.randomizedtesting.annotations.Repeat; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -27,8 +31,6 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; - -import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StoredField; @@ -53,16 +55,10 @@ import org.locationtech.spatial4j.shape.ShapeCollection; import org.locationtech.spatial4j.shape.SpatialRelation; import org.locationtech.spatial4j.shape.impl.RectangleImpl; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; -import static org.locationtech.spatial4j.shape.SpatialRelation.CONTAINS; -import static org.locationtech.spatial4j.shape.SpatialRelation.DISJOINT; -import static org.locationtech.spatial4j.shape.SpatialRelation.INTERSECTS; -import static org.locationtech.spatial4j.shape.SpatialRelation.WITHIN; - -/** Randomized PrefixTree test that considers the fuzziness of the - * results introduced by grid approximation. */ +/** + * Randomized PrefixTree test that considers the fuzziness of the results introduced by grid + * approximation. + */ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { static final int ITERATIONS = 10; @@ -71,14 +67,12 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { private SpatialContext ctx2D; public void setupGrid(int maxLevels) throws IOException { - if (randomBoolean()) - setupQuadGrid(maxLevels, randomBoolean()); - else - setupGeohashGrid(maxLevels); + if (randomBoolean()) setupQuadGrid(maxLevels, randomBoolean()); + else setupGeohashGrid(maxLevels); setupCtx2D(ctx); // set prune independently on strategy & grid randomly; should work - ((RecursivePrefixTreeStrategy)strategy).setPruneLeafyBranches(randomBoolean()); + ((RecursivePrefixTreeStrategy) strategy).setPruneLeafyBranches(randomBoolean()); if (this.grid instanceof PackedQuadPrefixTree) { ((PackedQuadPrefixTree) this.grid).setPruneLeafyBranches(randomBoolean()); } @@ -87,13 +81,12 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { ((PrefixTreeStrategy) strategy).setPointsOnly(true); } - log.info("Strategy: " + strategy.toString()); // nowarn + log.info("Strategy: " + strategy.toString()); // nowarn } private void setupCtx2D(SpatialContext ctx) { - if (!ctx.isGeo()) - ctx2D = ctx; - //A non-geo version of ctx. + if (!ctx.isGeo()) ctx2D = ctx; + // A non-geo version of ctx. SpatialContextFactory ctxFactory = new SpatialContextFactory(); ctxFactory.geo = false; ctxFactory.worldBounds = ctx.getWorldBounds(); @@ -101,15 +94,16 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { } private void setupQuadGrid(int maxLevels, boolean packedQuadPrefixTree) { - //non-geospatial makes this test a little easier (in gridSnap), and using boundary values 2^X raises - // the prospect of edge conditions we want to test, plus makes for simpler numbers (no decimals). + // non-geospatial makes this test a little easier (in gridSnap), and using boundary values 2^X + // raises + // the prospect of edge conditions we want to test, plus makes for simpler numbers (no + // decimals). SpatialContextFactory factory = new SpatialContextFactory(); factory.geo = false; factory.worldBounds = new RectangleImpl(0, 256, -128, 128, null); this.ctx = factory.newSpatialContext(); - //A fairly shallow grid, and default 2.5% distErrPct - if (maxLevels == -1) - maxLevels = randomIntBetween(1, 8);//max 64k cells (4^8), also 256*256 + // A fairly shallow grid, and default 2.5% distErrPct + if (maxLevels == -1) maxLevels = randomIntBetween(1, 8); // max 64k cells (4^8), also 256*256 if (packedQuadPrefixTree) { this.grid = new PackedQuadPrefixTree(ctx, maxLevels); } else { @@ -120,9 +114,8 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { public void setupGeohashGrid(int maxLevels) { this.ctx = SpatialContext.GEO; - //A fairly shallow grid, and default 2.5% distErrPct - if (maxLevels == -1) - maxLevels = randomIntBetween(1, 3);//max 16k cells (32^3) + // A fairly shallow grid, and default 2.5% distErrPct + if (maxLevels == -1) maxLevels = randomIntBetween(1, 3); // max 16k cells (32^3) this.grid = new GeohashPrefixTree(ctx, maxLevels); this.strategy = newRPT(); } @@ -172,8 +165,9 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { Point point = ctx.makePoint(86, -127.44362190053255); adoc("0", point); commit(); - Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Intersects, - ctx.makeRectangle(point, point))); + Query query = + strategy.makeQuery( + new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(point, point))); assertEquals(1, executeQuery(query, 1).numFound); } @@ -181,10 +175,14 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { @Test public void testContainsPairOverlap() throws IOException { setupQuadGrid(3, randomBoolean()); - adoc("0", new ShapePair(ctx.makeRectangle(0, 33, -128, 128), ctx.makeRectangle(33, 128, -128, 128), true)); + adoc( + "0", + new ShapePair( + ctx.makeRectangle(0, 33, -128, 128), ctx.makeRectangle(33, 128, -128, 128), true)); commit(); - Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Contains, - ctx.makeRectangle(0, 128, -16, 128))); + Query query = + strategy.makeQuery( + new SpatialArgs(SpatialOperation.Contains, ctx.makeRectangle(0, 128, -16, 128))); SearchResults searchResults = executeQuery(query, 1); assertEquals(1, searchResults.numFound); } @@ -192,38 +190,53 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { @Test public void testWithinDisjointParts() throws IOException { setupQuadGrid(7, randomBoolean()); - //one shape comprised of two parts, quite separated apart - adoc("0", new ShapePair(ctx.makeRectangle(0, 10, -120, -100), ctx.makeRectangle(220, 240, 110, 125), false)); + // one shape comprised of two parts, quite separated apart + adoc( + "0", + new ShapePair( + ctx.makeRectangle(0, 10, -120, -100), ctx.makeRectangle(220, 240, 110, 125), false)); commit(); - //query surrounds only the second part of the indexed shape - Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.IsWithin, - ctx.makeRectangle(210, 245, 105, 128))); + // query surrounds only the second part of the indexed shape + Query query = + strategy.makeQuery( + new SpatialArgs(SpatialOperation.IsWithin, ctx.makeRectangle(210, 245, 105, 128))); SearchResults searchResults = executeQuery(query, 1); - //we shouldn't find it because it's not completely within + // we shouldn't find it because it's not completely within assertTrue(searchResults.numFound == 0); } - @Test /** LUCENE-4916 */ + @Test + /** LUCENE-4916 */ public void testWithinLeafApproxRule() throws IOException { - setupQuadGrid(2, randomBoolean());//4x4 grid - //indexed shape will simplify to entire right half (2 top cells) + setupQuadGrid(2, randomBoolean()); // 4x4 grid + // indexed shape will simplify to entire right half (2 top cells) adoc("0", ctx.makeRectangle(192, 204, -128, 128)); commit(); ((RecursivePrefixTreeStrategy) strategy).setPrefixGridScanLevel(randomInt(2)); - //query does NOT contain it; both indexed cells are leaves to the query, and + // query does NOT contain it; both indexed cells are leaves to the query, and // when expanded to the full grid cells, the top one's top row is disjoint // from the query and thus not a match. - assertTrue(executeQuery(strategy.makeQuery( - new SpatialArgs(SpatialOperation.IsWithin, ctx.makeRectangle(38, 192, -72, 56)) - ), 1).numFound==0);//no-match + assertTrue( + executeQuery( + strategy.makeQuery( + new SpatialArgs( + SpatialOperation.IsWithin, ctx.makeRectangle(38, 192, -72, 56))), + 1) + .numFound + == 0); // no-match - //this time the rect is a little bigger and is considered a match. It's + // this time the rect is a little bigger and is considered a match. It's // an acceptable false-positive because of the grid approximation. - assertTrue(executeQuery(strategy.makeQuery( - new SpatialArgs(SpatialOperation.IsWithin, ctx.makeRectangle(38, 192, -72, 80)) - ), 1).numFound==1);//match + assertTrue( + executeQuery( + strategy.makeQuery( + new SpatialArgs( + SpatialOperation.IsWithin, ctx.makeRectangle(38, 192, -72, 80))), + 1) + .numFound + == 1); // match } @Test @@ -231,12 +244,14 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { ctx = SpatialContext.GEO; setupCtx2D(ctx); - Shape leftShape = new ShapePair(ctx.makeRectangle(-74, -56, -8, 1), ctx.makeRectangle(-180, 134, -90, 90), true); + Shape leftShape = + new ShapePair( + ctx.makeRectangle(-74, -56, -8, 1), ctx.makeRectangle(-180, 134, -90, 90), true); Shape queryShape = ctx.makeRectangle(-180, 180, -90, 90); assertEquals(SpatialRelation.WITHIN, leftShape.relate(queryShape)); } - //Override so we can index parts of a pair separately, resulting in the detailLevel + // Override so we can index parts of a pair separately, resulting in the detailLevel // being independent for each shape vs the whole thing @Override protected Document newDoc(String id, Shape shape) { @@ -246,8 +261,8 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { Collection shapes; if (shape instanceof ShapePair) { shapes = new ArrayList<>(2); - shapes.add(((ShapePair)shape).shape1); - shapes.add(((ShapePair)shape).shape2); + shapes.add(((ShapePair) shape).shape1); + shapes.add(((ShapePair) shape).shape2); } else { shapes = Collections.singleton(shape); } @@ -256,15 +271,15 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { doc.add(f); } } - if (storeShape)//just for diagnostics - doc.add(new StoredField(strategy.getFieldName(), shape.toString())); + if (storeShape) // just for diagnostics + doc.add(new StoredField(strategy.getFieldName(), shape.toString())); } return doc; } @SuppressWarnings("fallthrough") private void doTest(final SpatialOperation operation) throws IOException { - //first show that when there's no data, a query will result in no results + // first show that when there's no data, a query will result in no results { Query query = strategy.makeQuery(new SpatialArgs(operation, randomRectangle())); SearchResults searchResults = executeQuery(query, 1); @@ -273,9 +288,9 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { final boolean biasContains = (operation == SpatialOperation.Contains); - //Main index loop: + // Main index loop: Map indexedShapes = new LinkedHashMap<>(); - Map indexedShapesGS = new LinkedHashMap<>();//grid snapped + Map indexedShapesGS = new LinkedHashMap<>(); // grid snapped final int numIndexedShapes = randomIntBetween(1, 6); boolean indexedAtLeastOneShapePair = false; final boolean pointsOnly = ((PrefixTreeStrategy) strategy).isPointsOnly(); @@ -283,16 +298,16 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { String id = "" + i; Shape indexedShape; int R = random().nextInt(12); - if (R == 0) {//1 in 12 + if (R == 0) { // 1 in 12 indexedShape = null; - } else if (R == 1 || pointsOnly) {//1 in 12 - indexedShape = randomPoint();//just one point - } else if (R <= 4) {//3 in 12 - //comprised of more than one shape + } else if (R == 1 || pointsOnly) { // 1 in 12 + indexedShape = randomPoint(); // just one point + } else if (R <= 4) { // 3 in 12 + // comprised of more than one shape indexedShape = randomShapePairRect(biasContains); indexedAtLeastOneShapePair = true; } else { - indexedShape = randomRectangle();//just one rect + indexedShape = randomRectangle(); // just one rect } indexedShapes.put(id, indexedShape); @@ -300,11 +315,11 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { adoc(id, indexedShape); - if (random().nextInt(10) == 0) - commit();//intermediate commit, produces extra segments - + if (random().nextInt(10) == 0) { + commit(); // intermediate commit, produces extra segments + } } - //delete some documents randomly + // delete some documents randomly Iterator idIter = indexedShapes.keySet().iterator(); while (idIter.hasNext()) { String id = idIter.next(); @@ -317,7 +332,7 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { commit(); - //Main query loop: + // Main query loop: final int numQueryShapes = atLeast(20); for (int i = 0; i < numQueryShapes; i++) { int scanLevel = randomInt(grid.getMaxLevels()); @@ -325,59 +340,64 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { final Shape queryShape; switch (randomInt(10)) { - case 0: queryShape = randomPoint(); break; -// LUCENE-5549 -//TODO debug: -Dtests.method=testWithin -Dtests.multiplier=3 -Dtests.seed=5F5294CE2E075A3E:AAD2F0F79288CA64 -// case 1:case 2:case 3: -// if (!indexedAtLeastOneShapePair) { // avoids ShapePair.relate(ShapePair), which isn't reliable -// queryShape = randomShapePairRect(!biasContains);//invert biasContains for query side -// break; -// } + case 0: + queryShape = randomPoint(); + break; + // LUCENE-5549 + // TODO debug: -Dtests.method=testWithin -Dtests.multiplier=3 + // -Dtests.seed=5F5294CE2E075A3E:AAD2F0F79288CA64 + // case 1:case 2:case 3: + // if (!indexedAtLeastOneShapePair) { + // // avoids ShapePair.relate(ShapePair), which isn't reliable + // queryShape = randomShapePairRect(!biasContains); + // // invert biasContains for query side + // break; + // } case 4: - //choose an existing indexed shape + // choose an existing indexed shape if (!indexedShapes.isEmpty()) { Shape tmp = indexedShapes.values().iterator().next(); - if (tmp instanceof Point || tmp instanceof Rectangle) {//avoids null and shapePair + if (tmp instanceof Point || tmp instanceof Rectangle) { // avoids null and shapePair queryShape = tmp; break; } - }//else fall-through + } + // fall-through - default: queryShape = randomRectangle(); + default: + queryShape = randomRectangle(); } final Shape queryShapeGS = gridSnap(queryShape); final boolean opIsDisjoint = operation == SpatialOperation.IsDisjointTo; - //Generate truth via brute force: + // Generate truth via brute force: // We ensure true-positive matches (if the predicate on the raw shapes match // then the search should find those same matches). // approximations, false-positive matches - Set expectedIds = new LinkedHashSet<>();//true-positives - Set secondaryIds = new LinkedHashSet<>();//false-positives (unless disjoint) + Set expectedIds = new LinkedHashSet<>(); // true-positives + Set secondaryIds = new LinkedHashSet<>(); // false-positives (unless disjoint) for (Map.Entry entry : indexedShapes.entrySet()) { String id = entry.getKey(); Shape indexedShapeCompare = entry.getValue(); - if (indexedShapeCompare == null) - continue; + if (indexedShapeCompare == null) continue; Shape queryShapeCompare = queryShape; if (operation.evaluate(indexedShapeCompare, queryShapeCompare)) { expectedIds.add(id); if (opIsDisjoint) { - //if no longer intersect after buffering them, for disjoint, remember this + // if no longer intersect after buffering them, for disjoint, remember this indexedShapeCompare = indexedShapesGS.get(id); queryShapeCompare = queryShapeGS; - if (!operation.evaluate(indexedShapeCompare, queryShapeCompare)) - secondaryIds.add(id); + if (!operation.evaluate(indexedShapeCompare, queryShapeCompare)) secondaryIds.add(id); } } else if (!opIsDisjoint) { - //buffer either the indexed or query shape (via gridSnap) and try again + // buffer either the indexed or query shape (via gridSnap) and try again if (operation == SpatialOperation.Intersects) { indexedShapeCompare = indexedShapesGS.get(id); queryShapeCompare = queryShapeGS; - //TODO Unfortunately, grid-snapping both can result in intersections that otherwise + // TODO Unfortunately, grid-snapping both can result in intersections that otherwise // wouldn't happen when the grids are adjacent. Not a big deal but our test is just a // bit more lenient. } else if (operation == SpatialOperation.Contains) { @@ -385,15 +405,14 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { } else if (operation == SpatialOperation.IsWithin) { queryShapeCompare = queryShapeGS; } - if (operation.evaluate(indexedShapeCompare, queryShapeCompare)) - secondaryIds.add(id); + if (operation.evaluate(indexedShapeCompare, queryShapeCompare)) secondaryIds.add(id); } } - //Search and verify results + // Search and verify results SpatialArgs args = new SpatialArgs(operation, queryShape); if (queryShape instanceof ShapePair) - args.setDistErrPct(0.0);//a hack; we want to be more detailed than gridSnap(queryShape) + args.setDistErrPct(0.0); // a hack; we want to be more detailed than gridSnap(queryShape) Query query = strategy.makeQuery(args); SearchResults got = executeQuery(query, 100); Set remainingExpectedIds = new LinkedHashSet<>(expectedIds); @@ -404,8 +423,7 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { fail("Shouldn't match", id, indexedShapes, indexedShapesGS, queryShape); } } - if (opIsDisjoint) - remainingExpectedIds.removeAll(secondaryIds); + if (opIsDisjoint) remainingExpectedIds.removeAll(secondaryIds); if (!remainingExpectedIds.isEmpty()) { String id = remainingExpectedIds.iterator().next(); fail("Should have matched", id, indexedShapes, indexedShapesGS, queryShape); @@ -419,20 +437,25 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { return new ShapePair(shape1, shape2, biasContains); } - private void fail(String label, String id, Map indexedShapes, Map indexedShapesGS, Shape queryShape) { + private void fail( + String label, + String id, + Map indexedShapes, + Map indexedShapesGS, + Shape queryShape) { System.err.println("Ig:" + indexedShapesGS.get(id) + " Qg:" + gridSnap(queryShape)); fail(label + " I#" + id + ":" + indexedShapes.get(id) + " Q:" + queryShape); } -// private Rectangle inset(Rectangle r) { -// //typically inset by 1 (whole numbers are easy to read) -// double d = Math.min(1.0, grid.getDistanceForLevel(grid.getMaxLevels()) / 4); -// return ctx.makeRectangle(r.getMinX() + d, r.getMaxX() - d, r.getMinY() + d, r.getMaxY() - d); -// } + // private Rectangle inset(Rectangle r) { + // //typically inset by 1 (whole numbers are easy to read) + // double d = Math.min(1.0, grid.getDistanceForLevel(grid.getMaxLevels()) / 4); + // return ctx.makeRectangle(r.getMinX() + d, r.getMaxX() - d, r.getMinY() + d, r.getMaxY() - + // d); + // } protected Shape gridSnap(Shape snapMe) { - if (snapMe == null) - return null; + if (snapMe == null) return null; if (snapMe instanceof ShapePair) { ShapePair me = (ShapePair) snapMe; return new ShapePair(gridSnap(me.shape1), gridSnap(me.shape2), me.biasContainsThenWithin); @@ -440,34 +463,32 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { if (snapMe instanceof Point) { snapMe = snapMe.getBoundingBox(); } - //The next 4 lines mimic PrefixTreeStrategy.createIndexableFields() + // The next 4 lines mimic PrefixTreeStrategy.createIndexableFields() double distErrPct = ((PrefixTreeStrategy) strategy).getDistErrPct(); double distErr = SpatialArgs.calcDistanceFromErrPct(snapMe, distErrPct, ctx); int detailLevel = grid.getLevelForDistance(distErr); CellIterator cells = grid.getTreeCellIterator(snapMe, detailLevel); - //calc bounding box of cells. + // calc bounding box of cells. List cellShapes = new ArrayList<>(1024); while (cells.hasNext()) { Cell cell = cells.next(); - if (!cell.isLeaf()) - continue; + if (!cell.isLeaf()) continue; cellShapes.add(cell.getShape()); } return new ShapeCollection<>(cellShapes, ctx).getBoundingBox(); } /** - * An aggregate of 2 shapes. Unfortunately we can't simply use a ShapeCollection because: - * (a) ambiguity between CONTAINS and WITHIN for equal shapes, and - * (b) adjacent pairs could as a whole contain the input shape. - * The tests here are sensitive to these matters, although in practice ShapeCollection - * is fine. + * An aggregate of 2 shapes. Unfortunately we can't simply use a ShapeCollection because: (a) + * ambiguity between CONTAINS and WITHIN for equal shapes, and (b) adjacent pairs could as a whole + * contain the input shape. The tests here are sensitive to these matters, although in practice + * ShapeCollection is fine. */ private class ShapePair extends ShapeCollection { final Shape shape1, shape2; - final Shape shape1_2D, shape2_2D;//not geo (bit of a hack) + final Shape shape1_2D, shape2_2D; // not geo (bit of a hack) final boolean biasContainsThenWithin; public ShapePair(Shape shape1, Shape shape2, boolean containsThenWithin) { @@ -480,8 +501,7 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { } private Shape toNonGeo(Shape shape) { - if (!ctx.isGeo()) - return shape;//already non-geo + if (!ctx.isGeo()) return shape; // already non-geo if (shape instanceof Rectangle) { Rectangle rect = (Rectangle) shape; if (rect.getCrossesDateLine()) { @@ -490,38 +510,37 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { ctx2D.makeRectangle(-180, rect.getMaxX(), rect.getMinY(), rect.getMaxY()), biasContainsThenWithin); } else { - return ctx2D.makeRectangle(rect.getMinX(), rect.getMaxX(), rect.getMinY(), rect.getMaxY()); + return ctx2D.makeRectangle( + rect.getMinX(), rect.getMaxX(), rect.getMinY(), rect.getMaxY()); } } - //no need to do others; this addresses the -180/+180 ambiguity corner test problem + // no need to do others; this addresses the -180/+180 ambiguity corner test problem return shape; } @Override public SpatialRelation relate(Shape other) { SpatialRelation r = relateApprox(other); - if (r == DISJOINT) - return r; - if (r == CONTAINS) - return r; - if (r == WITHIN && !biasContainsThenWithin) - return r; + if (r == DISJOINT) return r; + if (r == CONTAINS) return r; + if (r == WITHIN && !biasContainsThenWithin) return r; - //See if the correct answer is actually Contains, when the indexed shapes are adjacent, + // See if the correct answer is actually Contains, when the indexed shapes are adjacent, // creating a larger shape that contains the input shape. boolean pairTouches = shape1.relate(shape2).intersects(); - if (!pairTouches) - return r; - //test all 4 corners - // Note: awkwardly, we use a non-geo context for this because in geo, -180 & +180 are the same place, which means - // that "other" might wrap the world horizontally and yet all its corners could be in shape1 (or shape2) even - // though shape1 is only adjacent to the dateline. I couldn't think of a better way to handle this. - Rectangle oRect = (Rectangle)other; + if (!pairTouches) return r; + // test all 4 corners + // Note: awkwardly, we use a non-geo context for this because in geo, -180 & +180 are the same + // place, which means + // that "other" might wrap the world horizontally and yet all its corners could be in shape1 + // (or shape2) even + // though shape1 is only adjacent to the dateline. I couldn't think of a better way to handle + // this. + Rectangle oRect = (Rectangle) other; if (cornerContainsNonGeo(oRect.getMinX(), oRect.getMinY()) && cornerContainsNonGeo(oRect.getMinX(), oRect.getMaxY()) && cornerContainsNonGeo(oRect.getMaxX(), oRect.getMinY()) - && cornerContainsNonGeo(oRect.getMaxX(), oRect.getMaxY()) ) - return CONTAINS; + && cornerContainsNonGeo(oRect.getMaxX(), oRect.getMaxY())) return CONTAINS; return r; } @@ -532,8 +551,10 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { private SpatialRelation relateApprox(Shape other) { if (biasContainsThenWithin) { - if (shape1.relate(other) == CONTAINS || shape1.equals(other) - || shape2.relate(other) == CONTAINS || shape2.equals(other)) return CONTAINS; + if (shape1.relate(other) == CONTAINS + || shape1.equals(other) + || shape2.relate(other) == CONTAINS + || shape2.equals(other)) return CONTAINS; if (shape1.relate(other) == WITHIN && shape2.relate(other) == WITHIN) return WITHIN; @@ -544,8 +565,11 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { if (shape1.relate(other) == CONTAINS || shape2.relate(other) == CONTAINS) return CONTAINS; } - if (shape1.relate(other).intersects() || shape2.relate(other).intersects()) - return INTERSECTS;//might actually be 'CONTAINS' if the pair are adjacent but we handle that later + if (shape1.relate(other).intersects() || shape2.relate(other).intersects()) { + // might actually be 'CONTAINS' if the pair are adjacent but we handle + // that later + return INTERSECTS; + } return DISJOINT; } @@ -554,5 +578,4 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase { return "ShapePair(" + shape1 + " , " + shape2 + ")"; } } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree50.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree50.java index ec9c2e1ce85..29ea9209baa 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree50.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree50.java @@ -16,7 +16,9 @@ */ package org.apache.lucene.spatial.prefix; -/** Test RandomSpatialOpFuzzyPrefixTreeTest using the PrefixTree index format found in 5.0 and prior. */ +/** + * Test RandomSpatialOpFuzzyPrefixTreeTest using the PrefixTree index format found in 5.0 and prior. + */ public class TestRandomSpatialOpFuzzyPrefixTree50 extends TestRandomSpatialOpFuzzyPrefixTree { protected RecursivePrefixTreeStrategy newRPT() { @@ -27,5 +29,4 @@ public class TestRandomSpatialOpFuzzyPrefixTree50 extends TestRandomSpatialOpFuz } }; } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java index 14e2140e352..91f21fce0bc 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java @@ -16,10 +16,9 @@ */ package org.apache.lucene.spatial.prefix; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Shape; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; import org.apache.lucene.spatial.SpatialMatchConcern; import org.apache.lucene.spatial.StrategyTestCase; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; @@ -27,17 +26,17 @@ import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Shape; @LuceneTestCase.SuppressCodecs("SimpleText") public class TestRecursivePrefixTreeStrategy extends StrategyTestCase { private int maxLength; - //Tests should call this first. + // Tests should call this first. private void init(int maxLength) { this.maxLength = maxLength; this.ctx = SpatialContext.GEO; @@ -50,9 +49,9 @@ public class TestRecursivePrefixTreeStrategy extends StrategyTestCase { init(GeohashPrefixTree.getMaxLevelsPossible()); getAddAndVerifyIndexedDocuments(DATA_WORLD_CITIES_POINTS); - //execute queries for each prefix grid scan level - for(int i = 0; i <= maxLength; i++) { - ((RecursivePrefixTreeStrategy)strategy).setPrefixGridScanLevel(i); + // execute queries for each prefix grid scan level + for (int i = 0; i <= maxLength; i++) { + ((RecursivePrefixTreeStrategy) strategy).setPrefixGridScanLevel(i); executeQueries(SpatialMatchConcern.FILTER, QTEST_Cities_Intersects_BBox); } } @@ -61,16 +60,16 @@ public class TestRecursivePrefixTreeStrategy extends StrategyTestCase { public void testOneMeterPrecision() { init(GeohashPrefixTree.getMaxLevelsPossible()); GeohashPrefixTree grid = (GeohashPrefixTree) ((RecursivePrefixTreeStrategy) strategy).getGrid(); - //DWS: I know this to be true. 11 is needed for one meter + // DWS: I know this to be true. 11 is needed for one meter double degrees = DistanceUtils.dist2Degrees(0.001, DistanceUtils.EARTH_MEAN_RADIUS_KM); assertEquals(11, grid.getLevelForDistance(degrees)); } @Test - public void testPrecision() throws IOException{ + public void testPrecision() throws IOException { init(GeohashPrefixTree.getMaxLevelsPossible()); - Point iPt = ctx.getShapeFactory().pointXY(2.8028712999999925, 48.3708044);//lon, lat + Point iPt = ctx.getShapeFactory().pointXY(2.8028712999999925, 48.3708044); // lon, lat addDocument(newDoc("iPt", iPt)); commit(); @@ -79,30 +78,30 @@ public class TestRecursivePrefixTreeStrategy extends StrategyTestCase { final double KM2DEG = DistanceUtils.dist2Degrees(1, DistanceUtils.EARTH_MEAN_RADIUS_KM); final double DEG2KM = 1 / KM2DEG; - final double DIST = 35.75;//35.7499... + final double DIST = 35.75; // 35.7499... assertEquals(DIST, ctx.getDistCalc().distance(iPt, qPt) * DEG2KM, 0.001); - //distErrPct will affect the query shape precision. The indexed precision + // distErrPct will affect the query shape precision. The indexed precision // was set to nearly zilch via init(GeohashPrefixTree.getMaxLevelsPossible()); - final double distErrPct = 0.025; //the suggested default, by the way - final double distMult = 1+distErrPct; + final double distErrPct = 0.025; // the suggested default, by the way + final double distMult = 1 + distErrPct; - assertTrue(35.74*distMult >= DIST); + assertTrue(35.74 * distMult >= DIST); checkHits(q(qPt, 35.74 * KM2DEG, distErrPct), 1, null); - assertTrue(30*distMult < DIST); + assertTrue(30 * distMult < DIST); checkHits(q(qPt, 30 * KM2DEG, distErrPct), 0, null); - assertTrue(33*distMult < DIST); + assertTrue(33 * distMult < DIST); checkHits(q(qPt, 33 * KM2DEG, distErrPct), 0, null); - assertTrue(34*distMult < DIST); + assertTrue(34 * distMult < DIST); checkHits(q(qPt, 34 * KM2DEG, distErrPct), 0, null); } private SpatialArgs q(Point pt, double distDEG, double distErrPct) { Shape shape = ctx.getShapeFactory().circle(pt, distDEG); - SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects,shape); + SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, shape); args.setDistErrPct(distErrPct); return args; } @@ -116,9 +115,8 @@ public class TestRecursivePrefixTreeStrategy extends StrategyTestCase { gotIds.add(Integer.valueOf(result.document.get("id"))); } for (int assertId : assertIds) { - assertTrue("has "+assertId,gotIds.contains(assertId)); + assertTrue("has " + assertId, gotIds.contains(assertId)); } } } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java index fc131c5d788..306c9f8f19f 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java @@ -16,8 +16,8 @@ */ package org.apache.lucene.spatial.prefix; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.shape.Shape; +import java.io.IOException; +import java.util.Arrays; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StoredField; @@ -26,17 +26,16 @@ import org.apache.lucene.spatial.SpatialTestCase; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.query.SpatialArgsParser; import org.junit.Test; - -import java.io.IOException; -import java.util.Arrays; - +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.shape.Shape; public class TestTermQueryPrefixGridStrategy extends SpatialTestCase { @Test public void testNGramPrefixGridLosAngeles() throws IOException { SpatialContext ctx = SpatialContext.GEO; - TermQueryPrefixTreeStrategy prefixGridStrategy = new TermQueryPrefixTreeStrategy(new QuadPrefixTree(ctx), "geo"); + TermQueryPrefixTreeStrategy prefixGridStrategy = + new TermQueryPrefixTreeStrategy(new QuadPrefixTree(ctx), "geo"); Shape point = ctx.makePoint(-118.243680, 34.052230); @@ -45,19 +44,22 @@ public class TestTermQueryPrefixGridStrategy extends SpatialTestCase { for (Field field : prefixGridStrategy.createIndexableFields(point)) { losAngeles.add(field); } - losAngeles.add(new StoredField(prefixGridStrategy.getFieldName(), point.toString()));//just for diagnostics + losAngeles.add( + new StoredField( + prefixGridStrategy.getFieldName(), point.toString())); // just for diagnostics addDocumentsAndCommit(Arrays.asList(losAngeles)); // This won't work with simple spatial context... SpatialArgsParser spatialArgsParser = new SpatialArgsParser(); // TODO... use a non polygon query -// SpatialArgs spatialArgs = spatialArgsParser.parse( -// "Intersects(POLYGON((-127.00390625 39.8125,-112.765625 39.98828125,-111.53515625 31.375,-125.94921875 30.14453125,-127.00390625 39.8125)))", -// new SimpleSpatialContext()); + // SpatialArgs spatialArgs = spatialArgsParser.parse( + // "Intersects(POLYGON((-127.00390625 39.8125,-112.765625 39.98828125,-111.53515625 + // 31.375,-125.94921875 30.14453125,-127.00390625 39.8125)))", + // new SimpleSpatialContext()); -// Query query = prefixGridStrategy.makeQuery(spatialArgs, fieldInfo); -// SearchResults searchResults = executeQuery(query, 1); -// assertEquals(1, searchResults.numFound); + // Query query = prefixGridStrategy.makeQuery(spatialArgs, fieldInfo); + // SearchResults searchResults = executeQuery(query, 1); + // assertEquals(1, searchResults.numFound); } } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestDateRangePrefixTree.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestDateRangePrefixTree.java index 95cd4b8344e..44bc6b04ed4 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestDateRangePrefixTree.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestDateRangePrefixTree.java @@ -16,6 +16,9 @@ */ package org.apache.lucene.spatial.prefix.tree; +import static java.time.format.DateTimeFormatter.ISO_DATE_TIME; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import java.text.ParseException; import java.time.Instant; import java.time.OffsetDateTime; @@ -25,24 +28,21 @@ import java.time.temporal.ChronoField; import java.util.Arrays; import java.util.Calendar; import java.util.GregorianCalendar; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.SpatialRelation; -import static java.time.format.DateTimeFormatter.ISO_DATE_TIME; - public class TestDateRangePrefixTree extends LuceneTestCase { @ParametersFactory(argumentFormatting = "calendar=%s") public static Iterable parameters() { - return Arrays.asList(new Object[][]{ - {"default", DateRangePrefixTree.DEFAULT_CAL}, - {"compat", DateRangePrefixTree.JAVA_UTIL_TIME_COMPAT_CAL} - }); + return Arrays.asList( + new Object[][] { + {"default", DateRangePrefixTree.DEFAULT_CAL}, + {"compat", DateRangePrefixTree.JAVA_UTIL_TIME_COMPAT_CAL} + }); } private final DateRangePrefixTree tree; @@ -56,45 +56,45 @@ public class TestDateRangePrefixTree extends LuceneTestCase { assertEquals("*", tree.toString(cal)); - //test no underflow - assertTrue(tree.toShape(new int[]{0}, 1).toString().startsWith("-")); + // test no underflow + assertTrue(tree.toShape(new int[] {0}, 1).toString().startsWith("-")); - //Some arbitrary date + // Some arbitrary date cal.set(2014, Calendar.MAY, 9); roundTrip(cal); - assertEquals("2014-05-09",tree.toString(cal)); + assertEquals("2014-05-09", tree.toString(cal)); - //Earliest date + // Earliest date cal.setTimeInMillis(Long.MIN_VALUE); roundTrip(cal); - //Farthest date + // Farthest date cal.setTimeInMillis(Long.MAX_VALUE); roundTrip(cal); - //1BC is "0000". + // 1BC is "0000". cal.clear(); cal.set(Calendar.ERA, GregorianCalendar.BC); cal.set(Calendar.YEAR, 1); roundTrip(cal); assertEquals("0000", tree.toString(cal)); - //adding a "+" parses to the same; and a trailing 'Z' is fine too + // adding a "+" parses to the same; and a trailing 'Z' is fine too assertEquals(cal, tree.parseCalendar("+0000Z")); - //2BC is "-0001" + // 2BC is "-0001" cal.clear(); cal.set(Calendar.ERA, GregorianCalendar.BC); cal.set(Calendar.YEAR, 2); roundTrip(cal); assertEquals("-0001", tree.toString(cal)); - //1AD is "0001" + // 1AD is "0001" cal.clear(); cal.set(Calendar.YEAR, 1); roundTrip(cal); assertEquals("0001", tree.toString(cal)); - //test random + // test random cal.setTimeInMillis(random().nextLong()); roundTrip(cal); } @@ -102,49 +102,85 @@ public class TestDateRangePrefixTree extends LuceneTestCase { public void testToStringISO8601() throws ParseException { Calendar cal = tree.newCal(); cal.setTimeInMillis(random().nextLong()); - // create ZonedDateTime from the calendar, then get toInstant.toString which is the ISO8601 we emulate - // note: we don't simply init off of millisEpoch because of possible GregorianChangeDate discrepancy. + // create ZonedDateTime from the calendar, then get toInstant.toString which is the ISO8601 we + // emulate + // note: we don't simply init off of millisEpoch because of possible GregorianChangeDate + // discrepancy. int year = cal.get(Calendar.YEAR); if (cal.get(Calendar.ERA) == 0) { // BC year = -year + 1; } String expectedISO8601 = - ZonedDateTime.of(year, cal.get(Calendar.MONTH) + 1, cal.get(Calendar.DAY_OF_MONTH), - cal.get(Calendar.HOUR_OF_DAY), cal.get(Calendar.MINUTE), cal.get(Calendar.SECOND), - cal.get(Calendar.MILLISECOND) * 1_000_000, ZoneOffset.UTC) - .toInstant().toString(); + ZonedDateTime.of( + year, + cal.get(Calendar.MONTH) + 1, + cal.get(Calendar.DAY_OF_MONTH), + cal.get(Calendar.HOUR_OF_DAY), + cal.get(Calendar.MINUTE), + cal.get(Calendar.SECOND), + cal.get(Calendar.MILLISECOND) * 1_000_000, + ZoneOffset.UTC) + .toInstant() + .toString(); String resultToString = tree.toString(cal) + 'Z'; assertEquals(expectedISO8601, resultToString); assertEquals(cal, tree.parseCalendar(expectedISO8601)); } public void testParseCalendar() throws ParseException { - Instant expected = OffsetDateTime.of(1984, 12, 18, 12, 34, 56, 100000000, ZoneOffset.UTC).toInstant(); + Instant expected = + OffsetDateTime.of(1984, 12, 18, 12, 34, 56, 100000000, ZoneOffset.UTC).toInstant(); assertEquals(expected, tree.parseCalendar("1984-12-18T12:34:56.1Z").toInstant()); - assertEquals(expected.with(ChronoField.MILLI_OF_SECOND, 10), tree.parseCalendar("1984-12-18T12:34:56.01Z").toInstant()); - assertEquals(expected.with(ChronoField.MILLI_OF_SECOND, 1), tree.parseCalendar("1984-12-18T12:34:56.001Z").toInstant()); + assertEquals( + expected.with(ChronoField.MILLI_OF_SECOND, 10), + tree.parseCalendar("1984-12-18T12:34:56.01Z").toInstant()); + assertEquals( + expected.with(ChronoField.MILLI_OF_SECOND, 1), + tree.parseCalendar("1984-12-18T12:34:56.001Z").toInstant()); assertEquals(expected, tree.parseCalendar("1984-12-18T12:34:56.1000Z").toInstant()); assertEquals(expected, tree.parseCalendar("1984-12-18T12:34:56.100000000Z").toInstant()); - assertEquals(expected.with(ChronoField.NANO_OF_SECOND, 0), tree.parseCalendar("1984-12-18T12:34:56Z").toInstant()); - // decimal places are simply cut off as rounding may affect the "seconds" part of the calender which was set before - assertEquals(expected.with(ChronoField.MILLI_OF_SECOND, 999), tree.parseCalendar("1984-12-18T12:34:56.9999Z").toInstant()); + assertEquals( + expected.with(ChronoField.NANO_OF_SECOND, 0), + tree.parseCalendar("1984-12-18T12:34:56Z").toInstant()); + // decimal places are simply cut off as rounding may affect the "seconds" part of the calender + // which was set before + assertEquals( + expected.with(ChronoField.MILLI_OF_SECOND, 999), + tree.parseCalendar("1984-12-18T12:34:56.9999Z").toInstant()); assertEquals(expected, tree.parseCalendar("1984-12-18T12:34:56.1").toInstant()); - assertEquals(expected.with(ChronoField.MILLI_OF_SECOND, 10), tree.parseCalendar("1984-12-18T12:34:56.01").toInstant()); - assertEquals(expected.with(ChronoField.MILLI_OF_SECOND, 1), tree.parseCalendar("1984-12-18T12:34:56.001").toInstant()); + assertEquals( + expected.with(ChronoField.MILLI_OF_SECOND, 10), + tree.parseCalendar("1984-12-18T12:34:56.01").toInstant()); + assertEquals( + expected.with(ChronoField.MILLI_OF_SECOND, 1), + tree.parseCalendar("1984-12-18T12:34:56.001").toInstant()); assertEquals(expected, tree.parseCalendar("1984-12-18T12:34:56.1000").toInstant()); assertEquals(expected, tree.parseCalendar("1984-12-18T12:34:56.100000000").toInstant()); - assertEquals(expected.with(ChronoField.NANO_OF_SECOND, 0), tree.parseCalendar("1984-12-18T12:34:56").toInstant()); - assertEquals(expected.with(ChronoField.MILLI_OF_SECOND, 999), tree.parseCalendar("1984-12-18T12:34:56.9999").toInstant()); - - assertEquals(OffsetDateTime.parse("1984-12-18T12:34:56.01Z", ISO_DATE_TIME).get(ChronoField.MILLI_OF_SECOND), 10); + assertEquals( + expected.with(ChronoField.NANO_OF_SECOND, 0), + tree.parseCalendar("1984-12-18T12:34:56").toInstant()); + assertEquals( + expected.with(ChronoField.MILLI_OF_SECOND, 999), + tree.parseCalendar("1984-12-18T12:34:56.9999").toInstant()); + + assertEquals( + OffsetDateTime.parse("1984-12-18T12:34:56.01Z", ISO_DATE_TIME) + .get(ChronoField.MILLI_OF_SECOND), + 10); } - //copies from DateRangePrefixTree + // copies from DateRangePrefixTree private static final int[] CAL_FIELDS = { - Calendar.YEAR, Calendar.MONTH, Calendar.DAY_OF_MONTH, - Calendar.HOUR_OF_DAY, Calendar.MINUTE, Calendar.SECOND, Calendar.MILLISECOND}; + Calendar.YEAR, + Calendar.MONTH, + Calendar.DAY_OF_MONTH, + Calendar.HOUR_OF_DAY, + Calendar.MINUTE, + Calendar.SECOND, + Calendar.MILLISECOND + }; private void roundTrip(Calendar calOrig) throws ParseException { Calendar cal = (Calendar) calOrig.clone(); @@ -154,19 +190,19 @@ public class TestDateRangePrefixTree extends LuceneTestCase { { Calendar preToStringCalClone = (Calendar) cal.clone(); calString = tree.toString(cal); - assertEquals(preToStringCalClone, cal);//ensure toString doesn't modify cal state + assertEquals(preToStringCalClone, cal); // ensure toString doesn't modify cal state } - //test parseCalendar + // test parseCalendar assertEquals(cal, tree.parseCalendar(calString)); - //to Shape and back to Cal + // to Shape and back to Cal UnitNRShape shape = tree.toShape(cal); Calendar cal2 = tree.toCalendar(shape); assertEquals(calString, tree.toString(cal2)); - if (!calString.equals("*")) {//not world cell - //to Term and back to Cell + if (!calString.equals("*")) { // not world cell + // to Term and back to Cell Cell cell = (Cell) shape; BytesRef term = cell.getTokenBytesNoLeaf(null); Cell cell2 = tree.readCell(BytesRef.deepCopyOf(term), null); @@ -183,18 +219,16 @@ public class TestDateRangePrefixTree extends LuceneTestCase { assertTrue(cell.isPrefixOf(cell2)); } - //end of loop; decide if should loop again with lower precision + // end of loop; decide if should loop again with lower precision final int calPrecField = tree.getCalPrecisionField(cal); - if (calPrecField == -1) - break; + if (calPrecField == -1) break; int fieldIdx = Arrays.binarySearch(CAL_FIELDS, calPrecField); assert fieldIdx >= 0; int prevPrecField = (fieldIdx == 0 ? -1 : CAL_FIELDS[--fieldIdx]); try { tree.clearFieldsAfter(cal, prevPrecField); } catch (AssertionError e) { - if (e.getMessage().equals("Calendar underflow")) - return; + if (e.getMessage().equals("Calendar underflow")) return; throw e; } lastString = calString; @@ -202,8 +236,9 @@ public class TestDateRangePrefixTree extends LuceneTestCase { } public void testShapeRelations() throws ParseException { - //note: left range is 264000 at the thousand year level whereas right value is exact year - assertEquals(SpatialRelation.WITHIN, + // note: left range is 264000 at the thousand year level whereas right value is exact year + assertEquals( + SpatialRelation.WITHIN, tree.parseShape("[-264000 TO -264000-11-20]").relate(tree.parseShape("-264000"))); Shape shapeA = tree.parseShape("[3122-01-23 TO 3122-11-27]"); @@ -224,7 +259,7 @@ public class TestDateRangePrefixTree extends LuceneTestCase { assertEquals("2014", tree.parseShape("[2014-01-01 TO 2014-12-31]").toString()); - assertEquals("2014", tree.parseShape("[2014-01 TO 2014]").toString()); + assertEquals("2014", tree.parseShape("[2014-01 TO 2014]").toString()); assertEquals("2014-01", tree.parseShape("[2014 TO 2014-01]").toString()); assertEquals("2014-12", tree.parseShape("[2014-12 TO 2014]").toString()); @@ -245,45 +280,70 @@ public class TestDateRangePrefixTree extends LuceneTestCase { assertEquals(187183960, jurasic.get(Calendar.YEAR)); assertEquals(0, jurasic.get(Calendar.ERA)); } - expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-11T13"); - }); - expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-11-10T13-1"); - }); + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-11T13"); + }); + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-11-10T13-1"); + }); { - String causeMessage = expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-11-10T13Z1"); - }).getCause().getMessage(); - assertTrue(causeMessage +" has actual delimeter", causeMessage.contains("Z")); - assertTrue(causeMessage +" has expected delimeter",causeMessage.contains(":")); - assertFalse(causeMessage +" has no input",causeMessage.contains("2000-11-10")); + String causeMessage = + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-11-10T13Z1"); + }) + .getCause() + .getMessage(); + assertTrue(causeMessage + " has actual delimeter", causeMessage.contains("Z")); + assertTrue(causeMessage + " has expected delimeter", causeMessage.contains(":")); + assertFalse(causeMessage + " has no input", causeMessage.contains("2000-11-10")); } - expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000T13Z"); - }); - expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-11T13Z"); - }); + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000T13Z"); + }); + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-11T13Z"); + }); { - String causeMessage = expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-13-12"); - }).getCause().getMessage(); - assertTrue(causeMessage +" has actual value",causeMessage.contains("13")); - assertFalse(causeMessage +" has no input",causeMessage.contains("2000-13-12")); + String causeMessage = + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-13-12"); + }) + .getCause() + .getMessage(); + assertTrue(causeMessage + " has actual value", causeMessage.contains("13")); + assertFalse(causeMessage + " has no input", causeMessage.contains("2000-13-12")); } - expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-13-41T13Z"); - }); - expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-11-12T25Z"); - }); - expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-11-12T25:61Z"); - }); - expectThrows(ParseException.class, () -> { - tree.parseCalendar("2000-11-12T25:14:61Z"); - }); + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-13-41T13Z"); + }); + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-11-12T25Z"); + }); + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-11-12T25:61Z"); + }); + expectThrows( + ParseException.class, + () -> { + tree.parseCalendar("2000-11-12T25:14:61Z"); + }); } - -} \ No newline at end of file +} diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestS2PrefixTree.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestS2PrefixTree.java index 9593e3cb10f..96aa41b0d1d 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestS2PrefixTree.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestS2PrefixTree.java @@ -27,10 +27,8 @@ import org.junit.Test; import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.shape.Point; -/** - * Test for S2 Spatial prefix tree. - */ -public class TestS2PrefixTree extends LuceneTestCase{ +/** Test for S2 Spatial prefix tree. */ +public class TestS2PrefixTree extends LuceneTestCase { @Test @Repeat(iterations = 10) @@ -50,12 +48,16 @@ public class TestS2PrefixTree extends LuceneTestCase{ if (pos == 2) continue; id = id.next(); } - S2PrefixTree tree = new S2PrefixTree(new Geo3dSpatialContextFactory().newSpatialContext(), S2PrefixTree.getMaxLevels(arity), arity); + S2PrefixTree tree = + new S2PrefixTree( + new Geo3dSpatialContextFactory().newSpatialContext(), + S2PrefixTree.getMaxLevels(arity), + arity); S2PrefixTreeCell cell = new S2PrefixTreeCell(tree, id); BytesRef ref = cell.getTokenBytesWithLeaf(null); if (random().nextBoolean()) { int newOffset = random().nextInt(10) + 1; - byte[] newBytes = new byte[ref.bytes.length + newOffset]; + byte[] newBytes = new byte[ref.bytes.length + newOffset]; for (int i = 0; i < ref.bytes.length; i++) { newBytes[i + newOffset] = ref.bytes[i]; } @@ -70,32 +72,36 @@ public class TestS2PrefixTree extends LuceneTestCase{ @Test @Repeat(iterations = 10) public void testDistanceAndLevels() { - S2PrefixTree tree = new S2PrefixTree(new Geo3dSpatialContextFactory().newSpatialContext(), S2PrefixTree.getMaxLevels(1), 1); + S2PrefixTree tree = + new S2PrefixTree( + new Geo3dSpatialContextFactory().newSpatialContext(), S2PrefixTree.getMaxLevels(1), 1); double randomDist = random().nextDouble() * 5; int levelDistance = tree.getLevelForDistance(randomDist); double distanceLevel = tree.getDistanceForLevel(levelDistance); assertTrue(randomDist > distanceLevel); - - tree = new S2PrefixTree(new Geo3dSpatialContextFactory().newSpatialContext(), S2PrefixTree.getMaxLevels(2), 2); + tree = + new S2PrefixTree( + new Geo3dSpatialContextFactory().newSpatialContext(), S2PrefixTree.getMaxLevels(2), 2); levelDistance = tree.getLevelForDistance(randomDist); distanceLevel = tree.getDistanceForLevel(levelDistance); assertTrue(randomDist > distanceLevel); - tree = new S2PrefixTree(new Geo3dSpatialContextFactory().newSpatialContext(), S2PrefixTree.getMaxLevels(3), 3); + tree = + new S2PrefixTree( + new Geo3dSpatialContextFactory().newSpatialContext(), S2PrefixTree.getMaxLevels(3), 3); levelDistance = tree.getLevelForDistance(randomDist); distanceLevel = tree.getDistanceForLevel(levelDistance); assertTrue(randomDist > distanceLevel); - } @Test @Repeat(iterations = 10) public void testPrecision() { - int arity = random().nextInt(3) +1; + int arity = random().nextInt(3) + 1; SpatialContext context = new Geo3dSpatialContextFactory().newSpatialContext(); S2PrefixTree tree = new S2PrefixTree(context, S2PrefixTree.getMaxLevels(arity), arity); double precision = random().nextDouble(); @@ -104,10 +110,10 @@ public class TestS2PrefixTree extends LuceneTestCase{ CellIterator iterator = tree.getTreeCellIterator(point, level); S2PrefixTreeCell cell = null; while (iterator.hasNext()) { - cell = (S2PrefixTreeCell)iterator.next(); + cell = (S2PrefixTreeCell) iterator.next(); } assertTrue(cell.getLevel() == level); double precisionCell = S2Projections.MAX_WIDTH.getValue(cell.cellId.level()); assertTrue(precision > precisionCell); } -} \ No newline at end of file +} diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestSpatialPrefixTree.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestSpatialPrefixTree.java index 3325e50c03a..ad83d5c253b 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestSpatialPrefixTree.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestSpatialPrefixTree.java @@ -16,10 +16,8 @@ */ package org.apache.lucene.spatial.prefix.tree; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Rectangle; -import org.locationtech.spatial4j.shape.Shape; +import java.util.ArrayList; +import java.util.List; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Store; @@ -33,13 +31,14 @@ import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.junit.Before; import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.Shape; public class TestSpatialPrefixTree extends SpatialTestCase { - //TODO plug in others and test them + // TODO plug in others and test them private SpatialContext ctx; private SpatialPrefixTree trie; @@ -52,7 +51,7 @@ public class TestSpatialPrefixTree extends SpatialTestCase { @Test public void testCellTraverse() { - trie = new GeohashPrefixTree(ctx,4); + trie = new GeohashPrefixTree(ctx, 4); Cell prevC = null; Cell c = trie.getWorldCell(); @@ -65,9 +64,9 @@ public class TestSpatialPrefixTree extends SpatialTestCase { while (subCellsIter.hasNext()) { subCells.add(subCellsIter.next()); } - c = subCells.get(random().nextInt(subCells.size()-1)); + c = subCells.get(random().nextInt(subCells.size() - 1)); - assertEquals(prevC.getLevel()+1,c.getLevel()); + assertEquals(prevC.getLevel() + 1, c.getLevel()); Rectangle prevNShape = (Rectangle) prevC.getShape(); Shape s = c.getShape(); Rectangle sbox = s.getBoundingBox(); @@ -76,8 +75,8 @@ public class TestSpatialPrefixTree extends SpatialTestCase { } } /** - * A PrefixTree pruning optimization gone bad, applicable when optimize=true. - * See LUCENE-4770. + * A PrefixTree pruning optimization gone bad, applicable when optimize=true. See LUCENE-4770. */ @Test public void testBadPrefixTreePrune() throws Exception { @@ -98,7 +97,9 @@ public class TestSpatialPrefixTree extends SpatialTestCase { Point upperleft = ctx.makePoint(-122.88, 48.54); Point lowerright = ctx.makePoint(-122.82, 48.62); - Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(upperleft, lowerright))); + Query query = + strategy.makeQuery( + new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(upperleft, lowerright))); commit(); @@ -110,5 +111,4 @@ public class TestSpatialPrefixTree extends SpatialTestCase { assertEquals(1, search.totalHits.value); } - -} \ No newline at end of file +} diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/query/TestSpatialArgsParser.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/query/TestSpatialArgsParser.java index c3b5d160e81..015a16317d9 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/query/TestSpatialArgsParser.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/query/TestSpatialArgsParser.java @@ -16,19 +16,18 @@ */ package org.apache.lucene.spatial.query; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.shape.Rectangle; +import java.text.ParseException; import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.shape.Rectangle; -import java.text.ParseException; - -//Tests SpatialOperation somewhat too +// Tests SpatialOperation somewhat too public class TestSpatialArgsParser extends LuceneTestCase { private SpatialContext ctx = SpatialContext.GEO; - //The args parser is only dependent on the ctx for IO so I don't care to test + // The args parser is only dependent on the ctx for IO so I don't care to test // with other implementations. @Test @@ -48,14 +47,18 @@ public class TestSpatialArgsParser extends LuceneTestCase { assertEquals(SpatialOperation.IsDisjointTo, out.getOperation()); // spatial operations need args - expectThrows(Exception.class, () -> { - parser.parse(SpatialOperation.IsDisjointTo + "[ ]", ctx); - }); + expectThrows( + Exception.class, + () -> { + parser.parse(SpatialOperation.IsDisjointTo + "[ ]", ctx); + }); // unknown operation - expectThrows(Exception.class, () -> { - parser.parse("XXXX(Envelope(-10, 10, 20, -20))", ctx); - }); + expectThrows( + Exception.class, + () -> { + parser.parse("XXXX(Envelope(-10, 10, 20, -20))", ctx); + }); assertAlias(SpatialOperation.IsWithin, "CoveredBy"); assertAlias(SpatialOperation.IsWithin, "COVEREDBY"); @@ -73,5 +76,4 @@ public class TestSpatialArgsParser extends LuceneTestCase { out = new SpatialArgsParser().parse(arg, ctx); assertEquals(op, out.getOperation()); } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/serialized/TestSerializedStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/serialized/TestSerializedStrategy.java index 88f712a8af8..b1ea3adbb57 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/serialized/TestSerializedStrategy.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/serialized/TestSerializedStrategy.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.serialized; import java.io.IOException; - import org.apache.lucene.spatial.SpatialMatchConcern; import org.apache.lucene.spatial.StrategyTestCase; import org.junit.Before; @@ -56,6 +55,6 @@ public class TestSerializedStrategy extends StrategyTestCase { executeQueries(SpatialMatchConcern.FILTER, QTEST_Cities_Intersects_BBox); } - //sorting is tested in DistanceStrategyTest + // sorting is tested in DistanceStrategyTest } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/RandomizedShapeTestCase.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/RandomizedShapeTestCase.java index eb3563bad1f..7bc582db0e1 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/RandomizedShapeTestCase.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/RandomizedShapeTestCase.java @@ -16,32 +16,28 @@ */ package org.apache.lucene.spatial.spatial4j; +import org.apache.lucene.util.LuceneTestCase; import org.locationtech.spatial4j.context.SpatialContext; -import org.apache.lucene.util.LuceneTestCase; - -/** - * A base test class with utility methods to help test shapes. - * Extends from RandomizedTest. - */ +/** A base test class with utility methods to help test shapes. Extends from RandomizedTest. */ public abstract class RandomizedShapeTestCase extends LuceneTestCase { - protected SpatialContext ctx;//needs to be set ASAP + protected SpatialContext ctx; // needs to be set ASAP public RandomizedShapeTestCase(SpatialContext ctx) { this.ctx = ctx; } @SuppressWarnings("unchecked") - public static void checkShapesImplementEquals( Class[] classes ) { - for( Class clazz : classes ) { + public static void checkShapesImplementEquals(Class[] classes) { + for (Class clazz : classes) { try { - clazz.getDeclaredMethod( "equals", Object.class ); + clazz.getDeclaredMethod("equals", Object.class); } catch (Exception e) { fail("Shape needs to define 'equals' : " + clazz.getName()); } try { - clazz.getDeclaredMethod( "hashCode" ); + clazz.getDeclaredMethod("hashCode"); } catch (Exception e) { fail("Shape needs to define 'hashCode' : " + clazz.getName()); } @@ -51,5 +47,4 @@ public abstract class RandomizedShapeTestCase extends LuceneTestCase { public static double divisible(double v, double divisible) { return (int) (Math.round(v / divisible) * divisible); } - } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/ShapeRectRelationTestCase.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/ShapeRectRelationTestCase.java index 59f7b42a470..63eddb31b24 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/ShapeRectRelationTestCase.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/ShapeRectRelationTestCase.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.spatial.spatial4j; +import static org.locationtech.spatial4j.distance.DistanceUtils.DEGREES_TO_RADIANS; + import org.junit.Rule; import org.junit.Test; import org.locationtech.spatial4j.TestLog; @@ -26,13 +28,10 @@ import org.locationtech.spatial4j.shape.RectIntersectionTestHelper; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.ShapeFactory; -import static org.locationtech.spatial4j.distance.DistanceUtils.DEGREES_TO_RADIANS; - public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase { - protected final static double RADIANS_PER_DEGREE = Math.PI/180.0; + protected static final double RADIANS_PER_DEGREE = Math.PI / 180.0; - @Rule - public final TestLog testLog = TestLog.instance; + @Rule public final TestLog testLog = TestLog.instance; protected int maxRadius = 180; @@ -40,13 +39,14 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase super(SpatialContext.GEO); } - public abstract class AbstractRectIntersectionTestHelper extends RectIntersectionTestHelper { + public abstract class AbstractRectIntersectionTestHelper + extends RectIntersectionTestHelper { public AbstractRectIntersectionTestHelper(SpatialContext ctx) { super(ctx); } - //2 times each -- should be plenty + // 2 times each -- should be plenty protected int getContainsMinimum(int laps) { return 2; @@ -56,7 +56,8 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase return 2; } - // producing "within" cases in Geo3D based on our random shapes doesn't happen often. It'd be nice to increase this. + // producing "within" cases in Geo3D based on our random shapes doesn't happen often. It'd be + // nice to increase this. protected int getWithinMinimum(int laps) { return 2; } @@ -76,7 +77,7 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase @Override protected Shape generateRandomShape(Point nearP) { - final int circleRadius = maxRadius - random().nextInt(maxRadius);//no 0-radius + final int circleRadius = maxRadius - random().nextInt(maxRadius); // no 0-radius return ctx.getShapeFactory().circle(nearP, circleRadius); } @@ -84,7 +85,6 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase protected Point randomPointInEmptyShape(Shape shape) { return shape.getCenter(); } - }.testRelateWithRectangle(); } @@ -102,7 +102,7 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase Point upperRight = randomPoint(); Point lowerLeft = randomPoint(); if (upperRight.getY() < lowerLeft.getY()) { - //swap + // swap Point temp = upperRight; upperRight = lowerLeft; lowerLeft = temp; @@ -125,7 +125,7 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase @Override protected Shape generateRandomShape(Point nearP) { final Point centerPoint = randomPoint(); - final int maxDistance = random().nextInt(maxRadius -20) + 20; + final int maxDistance = random().nextInt(maxRadius - 20) + 20; final Circle pointZone = ctx.getShapeFactory().circle(centerPoint, maxDistance); final int vertexCount = random().nextInt(3) + 3; while (true) { @@ -137,8 +137,10 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase try { return builder.build(); } catch (IllegalArgumentException e) { - // This is what happens when we create a shape that is invalid. Although it is conceivable that there are cases where - // the exception is thrown incorrectly, we aren't going to be able to do that in this random test. + // This is what happens when we create a shape that is invalid. Although it is + // conceivable that there are cases where + // the exception is thrown incorrectly, we aren't going to be able to do that in this + // random test. continue; } } @@ -154,7 +156,6 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase // Long/thin so lets just find 1. return 1; } - }.testRelateWithRectangle(); } @@ -165,10 +166,10 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase @Override protected Shape generateRandomShape(Point nearP) { final Point centerPoint = randomPoint(); - final int maxDistance = random().nextInt(maxRadius -20) + 20; + final int maxDistance = random().nextInt(maxRadius - 20) + 20; final Circle pointZone = ctx.getShapeFactory().circle(centerPoint, maxDistance); final int pointCount = random().nextInt(5) + 1; - final double width = (random().nextInt(89)+1) * DEGREES_TO_RADIANS; + final double width = (random().nextInt(89) + 1) * DEGREES_TO_RADIANS; final ShapeFactory.LineStringBuilder builder = ctx.getShapeFactory().lineString(); while (true) { for (int i = 0; i < pointCount; i++) { @@ -179,8 +180,10 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase try { return builder.build(); } catch (IllegalArgumentException e) { - // This is what happens when we create a shape that is invalid. Although it is conceivable that there are cases where - // the exception is thrown incorrectly, we aren't going to be able to do that in this random test. + // This is what happens when we create a shape that is invalid. Although it is + // conceivable that there are cases where + // the exception is thrown incorrectly, we aren't going to be able to do that in this + // random test. continue; } } @@ -196,7 +199,6 @@ public abstract class ShapeRectRelationTestCase extends RandomizedShapeTestCase // Long/thin so lets just find 1. return 1; } - }.testRelateWithRectangle(); } } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3d.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3d.java index d146861a217..1ce896ae256 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3d.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3d.java @@ -31,37 +31,38 @@ public class TestGeo3d extends LuceneTestCase { SpatialContext ctx = factory.newSpatialContext(); String wkt = "POLYGON ((20.0 -60.4, 20.1 -60.4, 20.1 -60.3, 20.0 -60.3,20.0 -60.4))"; Shape s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); wkt = "POINT (30 10)"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); wkt = "LINESTRING (30 10, 10 30, 40 40)"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); wkt = "POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 35 35, 30 20, 20 30))"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); wkt = "MULTIPOINT ((10 40), (40 30), (20 20), (30 10))"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); wkt = "MULTILINESTRING ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); - wkt = "MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)))"; + assertTrue(s instanceof Geo3dShape); + wkt = + "MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)))"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); wkt = "GEOMETRYCOLLECTION(POINT(4 6),LINESTRING(4 6,7 10))"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); wkt = "ENVELOPE(1, 2, 4, 3)"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); wkt = "BUFFER(POINT(-10 30), 5.2)"; s = ctx.getFormats().getWktReader().read(wkt); - assertTrue(s instanceof Geo3dShape); - //wkt = "BUFFER(LINESTRING(1 2, 3 4), 0.5)"; - //s = ctx.getFormats().getWktReader().read(wkt); - //assertTrue(s instanceof Geo3dShape); + assertTrue(s instanceof Geo3dShape); + // wkt = "BUFFER(LINESTRING(1 2, 3 4), 0.5)"; + // s = ctx.getFormats().getWktReader().read(wkt); + // assertTrue(s instanceof Geo3dShape); } @Test @@ -72,14 +73,20 @@ public class TestGeo3d extends LuceneTestCase { final String polygon = "POLYGON ((-180 90, -180 -90, 180 -90, 180 90,-180 -90))"; expectThrows(InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(polygon)); - final String polygonWithHole = "POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 20 30, 20 30))"; - expectThrows(InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(polygonWithHole)); + final String polygonWithHole = + "POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 20 30, 20 30))"; + expectThrows( + InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(polygonWithHole)); - final String geometryCollection = "GEOMETRYCOLLECTION(POINT(4 6), LINESTRING(4 6,7 10), POLYGON ((-180 90, -180 -90, 180 -90, 180 90,-180 -90)))"; - expectThrows(InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(geometryCollection)); - - final String multiPolygon = "MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)), ((180 90, 90 90, 180 90)))"; - expectThrows(InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(multiPolygon)); + final String geometryCollection = + "GEOMETRYCOLLECTION(POINT(4 6), LINESTRING(4 6,7 10), POLYGON ((-180 90, -180 -90, 180 -90, 180 90,-180 -90)))"; + expectThrows( + InvalidShapeException.class, + () -> ctx.getFormats().getWktReader().read(geometryCollection)); + final String multiPolygon = + "MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)), ((180 90, 90 90, 180 90)))"; + expectThrows( + InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(multiPolygon)); } } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dRpt.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dRpt.java index 22313514045..20ed2173cb8 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dRpt.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dRpt.java @@ -16,11 +16,12 @@ */ package org.apache.lucene.spatial.spatial4j; +import static org.locationtech.spatial4j.distance.DistanceUtils.DEGREES_TO_RADIANS; + import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; - import org.apache.lucene.spatial.SpatialTestData; import org.apache.lucene.spatial.composite.CompositeSpatialStrategy; import org.apache.lucene.spatial.prefix.RandomSpatialOpStrategyTestCase; @@ -43,8 +44,6 @@ import org.junit.Test; import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.Shape; -import static org.locationtech.spatial4j.distance.DistanceUtils.DEGREES_TO_RADIANS; - public class TestGeo3dRpt extends RandomSpatialOpStrategyTestCase { private PlanetModel planetModel; @@ -67,9 +66,9 @@ public class TestGeo3dRpt extends RandomSpatialOpStrategyTestCase { } protected RecursivePrefixTreeStrategy newRPT() { - final RecursivePrefixTreeStrategy rpt = new RecursivePrefixTreeStrategy(this.grid, - getClass().getSimpleName() + "_rpt"); - rpt.setDistErrPct(0.10);//not too many cells + final RecursivePrefixTreeStrategy rpt = + new RecursivePrefixTreeStrategy(this.grid, getClass().getSimpleName() + "_rpt"); + rpt.setDistErrPct(0.10); // not too many cells return rpt; } @@ -82,9 +81,11 @@ public class TestGeo3dRpt extends RandomSpatialOpStrategyTestCase { setupGrid(); - SerializedDVStrategy serializedDVStrategy = new SerializedDVStrategy(ctx, getClass().getSimpleName() + "_sdv"); - this.strategy = new CompositeSpatialStrategy("composite_" + getClass().getSimpleName(), - rptStrategy, serializedDVStrategy); + SerializedDVStrategy serializedDVStrategy = + new SerializedDVStrategy(ctx, getClass().getSimpleName() + "_sdv"); + this.strategy = + new CompositeSpatialStrategy( + "composite_" + getClass().getSimpleName(), rptStrategy, serializedDVStrategy); } @Test @@ -96,9 +97,10 @@ public class TestGeo3dRpt extends RandomSpatialOpStrategyTestCase { points.add(new GeoPoint(planetModel, 14 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS)); points.add(new GeoPoint(planetModel, -15 * DEGREES_TO_RADIANS, 153 * DEGREES_TO_RADIANS)); - final Shape triangle = new Geo3dShape<>(GeoPolygonFactory.makeGeoPolygon(planetModel, points),ctx); + final Shape triangle = + new Geo3dShape<>(GeoPolygonFactory.makeGeoPolygon(planetModel, points), ctx); final Rectangle rect = ctx.makeRectangle(-49, -45, 73, 86); - testOperation(rect,SpatialOperation.Intersects,triangle, false); + testOperation(rect, SpatialOperation.Intersects, triangle, false); } @Test @@ -110,15 +112,18 @@ public class TestGeo3dRpt extends RandomSpatialOpStrategyTestCase { points.add(new GeoPoint(planetModel, -57 * DEGREES_TO_RADIANS, 146 * DEGREES_TO_RADIANS)); points.add(new GeoPoint(planetModel, 14 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS)); points.add(new GeoPoint(planetModel, -15 * DEGREES_TO_RADIANS, 153 * DEGREES_TO_RADIANS)); - final GeoPoint[] pathPoints = new GeoPoint[] { - new GeoPoint(planetModel, 55.0 * DEGREES_TO_RADIANS, -26.0 * DEGREES_TO_RADIANS), - new GeoPoint(planetModel, -90.0 * DEGREES_TO_RADIANS, 0.0), - new GeoPoint(planetModel, 54.0 * DEGREES_TO_RADIANS, 165.0 * DEGREES_TO_RADIANS), - new GeoPoint(planetModel, -90.0 * DEGREES_TO_RADIANS, 0.0)}; - final GeoPath path = GeoPathFactory.makeGeoPath(planetModel, 29 * DEGREES_TO_RADIANS, pathPoints); - final Shape shape = new Geo3dShape<>(path,ctx); + final GeoPoint[] pathPoints = + new GeoPoint[] { + new GeoPoint(planetModel, 55.0 * DEGREES_TO_RADIANS, -26.0 * DEGREES_TO_RADIANS), + new GeoPoint(planetModel, -90.0 * DEGREES_TO_RADIANS, 0.0), + new GeoPoint(planetModel, 54.0 * DEGREES_TO_RADIANS, 165.0 * DEGREES_TO_RADIANS), + new GeoPoint(planetModel, -90.0 * DEGREES_TO_RADIANS, 0.0) + }; + final GeoPath path = + GeoPathFactory.makeGeoPath(planetModel, 29 * DEGREES_TO_RADIANS, pathPoints); + final Shape shape = new Geo3dShape<>(path, ctx); final Rectangle rect = ctx.makeRectangle(131, 143, 39, 54); - testOperation(rect,SpatialOperation.Intersects,shape,true); + testOperation(rect, SpatialOperation.Intersects, shape, true); } @Test @@ -148,14 +153,14 @@ public class TestGeo3dRpt extends RandomSpatialOpStrategyTestCase { @Test public void testOperationsFromFile() throws IOException { setupStrategy(); - final Iterator indexedSpatialData = getSampleData( "states-poly.txt"); + final Iterator indexedSpatialData = getSampleData("states-poly.txt"); final List indexedShapes = new ArrayList<>(); - while(indexedSpatialData.hasNext()) { + while (indexedSpatialData.hasNext()) { indexedShapes.add(indexedSpatialData.next().shape); } - final Iterator querySpatialData = getSampleData( "states-bbox.txt"); + final Iterator querySpatialData = getSampleData("states-bbox.txt"); final List queryShapes = new ArrayList<>(); - while(querySpatialData.hasNext()) { + while (querySpatialData.hasNext()) { queryShapes.add(querySpatialData.next().shape); if (TEST_NIGHTLY) { queryShapes.add(randomQueryShape()); diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeSphereModelRectRelation.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeSphereModelRectRelation.java index afc81ded754..82d47117bb8 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeSphereModelRectRelation.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeSphereModelRectRelation.java @@ -18,7 +18,6 @@ package org.apache.lucene.spatial.spatial4j; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.spatial3d.geom.GeoArea; import org.apache.lucene.spatial3d.geom.GeoBBox; import org.apache.lucene.spatial3d.geom.GeoBBoxFactory; @@ -46,16 +45,33 @@ public class TestGeo3dShapeSphereModelRectRelation extends ShapeRectRelationTest @Test public void testFailure1() { - final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(planetModel, 88 * RADIANS_PER_DEGREE, 30 * RADIANS_PER_DEGREE, -30 * RADIANS_PER_DEGREE, 62 * RADIANS_PER_DEGREE); + final GeoBBox rect = + GeoBBoxFactory.makeGeoBBox( + planetModel, + 88 * RADIANS_PER_DEGREE, + 30 * RADIANS_PER_DEGREE, + -30 * RADIANS_PER_DEGREE, + 62 * RADIANS_PER_DEGREE); final List points = new ArrayList<>(); - points.add(new GeoPoint(planetModel, 30.4579218227 * RADIANS_PER_DEGREE, 14.5238410082 * RADIANS_PER_DEGREE)); - points.add(new GeoPoint(planetModel, 43.684447915 * RADIANS_PER_DEGREE, 46.2210986329 * RADIANS_PER_DEGREE)); - points.add(new GeoPoint(planetModel, 66.2465299717 * RADIANS_PER_DEGREE, -29.1786158537 * RADIANS_PER_DEGREE)); + points.add( + new GeoPoint( + planetModel, 30.4579218227 * RADIANS_PER_DEGREE, 14.5238410082 * RADIANS_PER_DEGREE)); + points.add( + new GeoPoint( + planetModel, 43.684447915 * RADIANS_PER_DEGREE, 46.2210986329 * RADIANS_PER_DEGREE)); + points.add( + new GeoPoint( + planetModel, 66.2465299717 * RADIANS_PER_DEGREE, -29.1786158537 * RADIANS_PER_DEGREE)); final GeoShape path = GeoPolygonFactory.makeGeoPolygon(planetModel, points); - final GeoPoint point = new GeoPoint(planetModel, 34.2730264413182 * RADIANS_PER_DEGREE, 82.75500168892472 * RADIANS_PER_DEGREE); + final GeoPoint point = + new GeoPoint( + planetModel, + 34.2730264413182 * RADIANS_PER_DEGREE, + 82.75500168892472 * RADIANS_PER_DEGREE); - // Apparently the rectangle thinks the polygon is completely within it... "shape inside rectangle" + // Apparently the rectangle thinks the polygon is completely within it... "shape inside + // rectangle" assertTrue(GeoArea.WITHIN == rect.getRelationship(path)); // Point is within path? Apparently not... @@ -63,23 +79,25 @@ public class TestGeo3dShapeSphereModelRectRelation extends ShapeRectRelationTest // If it is within the path, it must be within the rectangle, and similarly visa versa assertFalse(rect.isWithin(point)); - } @Test public void testFailure2_LUCENE6475() { - GeoCircle geo3dCircle = GeoCircleFactory.makeGeoCircle(planetModel, 1.6282053147165243E-4 * RADIANS_PER_DEGREE, - -70.1600629789353 * RADIANS_PER_DEGREE, 86 * RADIANS_PER_DEGREE); + GeoCircle geo3dCircle = + GeoCircleFactory.makeGeoCircle( + planetModel, + 1.6282053147165243E-4 * RADIANS_PER_DEGREE, + -70.1600629789353 * RADIANS_PER_DEGREE, + 86 * RADIANS_PER_DEGREE); Geo3dShape geo3dShape = new Geo3dShape<>(geo3dCircle, ctx); Rectangle rect = ctx.getShapeFactory().rect(-118, -114, -2.0, 32.0); assertTrue(geo3dShape.relate(rect).intersects()); // thus the bounding box must intersect too assertTrue(geo3dShape.getBoundingBox().relate(rect).intersects()); - } @Test - public void pointBearingTest(){ + public void pointBearingTest() { double radius = 136; double distance = 135.97; double bearing = 188; @@ -88,7 +106,7 @@ public class TestGeo3dShapeSphereModelRectRelation extends ShapeRectRelationTest Point bPoint = ctx.getDistCalc().pointOnBearing(p, distance, bearing, ctx, (Point) null); double d = ctx.getDistCalc().distance(p, bPoint); - assertEquals(d, distance, 10-8); + assertEquals(d, distance, 10 - 8); assertEquals(circle.relate(bPoint), SpatialRelation.CONTAINS); } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeWGS84ModelRectRelation.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeWGS84ModelRectRelation.java index 8b98aad6366..6376fc0b7f7 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeWGS84ModelRectRelation.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeWGS84ModelRectRelation.java @@ -20,10 +20,10 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.spatial3d.geom.GeoArea; import org.apache.lucene.spatial3d.geom.GeoBBox; import org.apache.lucene.spatial3d.geom.GeoBBoxFactory; -import org.apache.lucene.spatial3d.geom.GeoCircleFactory; import org.apache.lucene.spatial3d.geom.GeoCircle; -import org.apache.lucene.spatial3d.geom.GeoPathFactory; +import org.apache.lucene.spatial3d.geom.GeoCircleFactory; import org.apache.lucene.spatial3d.geom.GeoPath; +import org.apache.lucene.spatial3d.geom.GeoPathFactory; import org.apache.lucene.spatial3d.geom.GeoPoint; import org.apache.lucene.spatial3d.geom.PlanetModel; import org.junit.Test; @@ -33,77 +33,111 @@ import org.locationtech.spatial4j.shape.SpatialRelation; public class TestGeo3dShapeWGS84ModelRectRelation extends ShapeRectRelationTestCase { - PlanetModel planetModel = RandomPicks.randomFrom(random(), new PlanetModel[] {PlanetModel.WGS84, PlanetModel.CLARKE_1866}); + PlanetModel planetModel = + RandomPicks.randomFrom( + random(), new PlanetModel[] {PlanetModel.WGS84, PlanetModel.CLARKE_1866}); public TestGeo3dShapeWGS84ModelRectRelation() { Geo3dSpatialContextFactory factory = new Geo3dSpatialContextFactory(); factory.planetModel = planetModel; this.ctx = factory.newSpatialContext(); this.maxRadius = 175; - ((Geo3dShapeFactory)ctx.getShapeFactory()).setCircleAccuracy(1e-12); + ((Geo3dShapeFactory) ctx.getShapeFactory()).setCircleAccuracy(1e-12); } @Test public void testFailure1() { - final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(planetModel, 90 * RADIANS_PER_DEGREE, 74 * RADIANS_PER_DEGREE, - 40 * RADIANS_PER_DEGREE, 60 * RADIANS_PER_DEGREE); - final GeoPoint[] pathPoints = new GeoPoint[] { - new GeoPoint(planetModel, 84.4987594274 * RADIANS_PER_DEGREE, -22.8345484402 * RADIANS_PER_DEGREE)}; - final GeoPath path = GeoPathFactory.makeGeoPath(planetModel, 4 * RADIANS_PER_DEGREE, pathPoints); + final GeoBBox rect = + GeoBBoxFactory.makeGeoBBox( + planetModel, + 90 * RADIANS_PER_DEGREE, + 74 * RADIANS_PER_DEGREE, + 40 * RADIANS_PER_DEGREE, + 60 * RADIANS_PER_DEGREE); + final GeoPoint[] pathPoints = + new GeoPoint[] { + new GeoPoint( + planetModel, 84.4987594274 * RADIANS_PER_DEGREE, -22.8345484402 * RADIANS_PER_DEGREE) + }; + final GeoPath path = + GeoPathFactory.makeGeoPath(planetModel, 4 * RADIANS_PER_DEGREE, pathPoints); assertTrue(GeoArea.DISJOINT == rect.getRelationship(path)); // This is what the test failure claimed... - //assertTrue(GeoArea.CONTAINS == rect.getRelationship(path)); - //final GeoBBox bbox = getBoundingBox(path); - //assertFalse(GeoArea.DISJOINT == rect.getRelationship(bbox)); + // assertTrue(GeoArea.CONTAINS == rect.getRelationship(path)); + // final GeoBBox bbox = getBoundingBox(path); + // assertFalse(GeoArea.DISJOINT == rect.getRelationship(bbox)); } @Test public void testFailure2() { - final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(planetModel, -74 * RADIANS_PER_DEGREE, -90 * RADIANS_PER_DEGREE, - 0 * RADIANS_PER_DEGREE, 26 * RADIANS_PER_DEGREE); - final GeoCircle circle = GeoCircleFactory.makeGeoCircle(planetModel, -87.3647352103 * RADIANS_PER_DEGREE, 52.3769709972 * RADIANS_PER_DEGREE, 1 * RADIANS_PER_DEGREE); + final GeoBBox rect = + GeoBBoxFactory.makeGeoBBox( + planetModel, + -74 * RADIANS_PER_DEGREE, + -90 * RADIANS_PER_DEGREE, + 0 * RADIANS_PER_DEGREE, + 26 * RADIANS_PER_DEGREE); + final GeoCircle circle = + GeoCircleFactory.makeGeoCircle( + planetModel, + -87.3647352103 * RADIANS_PER_DEGREE, + 52.3769709972 * RADIANS_PER_DEGREE, + 1 * RADIANS_PER_DEGREE); assertTrue(GeoArea.DISJOINT == rect.getRelationship(circle)); // This is what the test failure claimed... - //assertTrue(GeoArea.CONTAINS == rect.getRelationship(circle)); - //final GeoBBox bbox = getBoundingBox(circle); - //assertFalse(GeoArea.DISJOINT == rect.getRelationship(bbox)); + // assertTrue(GeoArea.CONTAINS == rect.getRelationship(circle)); + // final GeoBBox bbox = getBoundingBox(circle); + // assertFalse(GeoArea.DISJOINT == rect.getRelationship(bbox)); } @Test public void testFailure3() { /* - [junit4] 1> S-R Rel: {}, Shape {}, Rectangle {} lap# {} [CONTAINS, Geo3dShape{planetmodel=PlanetModel: {xyScaling=1.0011188180710464, zScaling=0.9977622539852008}, shape=GeoPath: {planetmodel=PlanetModel: {xyScaling=1.0011188180710464, zScaling=0.9977622539852008}, width=1.53588974175501(87.99999999999999), - points={[[X=0.12097657665150223, Y=-0.6754177666095532, Z=0.7265376136709238], [X=-0.3837892785614207, Y=0.4258049113530899, Z=0.8180007850434892]]}}}, - Rect(minX=4.0,maxX=36.0,minY=16.0,maxY=16.0), 6981](no slf4j subst; sorry) - [junit4] FAILURE 0.59s | Geo3dWGS84ShapeRectRelationTest.testGeoPathRect <<< - [junit4] > Throwable #1: java.lang.AssertionError: Geo3dShape{planetmodel=PlanetModel: {xyScaling=1.0011188180710464, zScaling=0.9977622539852008}, shape=GeoPath: {planetmodel=PlanetModel: {xyScaling=1.0011188180710464, zScaling=0.9977622539852008}, width=1.53588974175501(87.99999999999999), - points={[[X=0.12097657665150223, Y=-0.6754177666095532, Z=0.7265376136709238], [X=-0.3837892785614207, Y=0.4258049113530899, Z=0.8180007850434892]]}}} intersect Pt(x=23.81626064835212,y=16.0) - [junit4] > at __randomizedtesting.SeedInfo.seed([2595268DA3F13FEA:6CC30D8C83453E5D]:0) - [junit4] > at org.apache.lucene.spatial.spatial4j.RandomizedShapeTestCase._assertIntersect(RandomizedShapeTestCase.java:168) - [junit4] > at org.apache.lucene.spatial.spatial4j.RandomizedShapeTestCase.assertRelation(RandomizedShapeTestCase.java:153) - [junit4] > at org.apache.lucene.spatial.spatial4j.RectIntersectionTestHelper.testRelateWithRectangle(RectIntersectionTestHelper.java:128) - [junit4] > at org.apache.lucene.spatial.spatial4j.Geo3dWGS84ShapeRectRelationTest.testGeoPathRect(Geo3dWGS84ShapeRectRelationTest.java:265) - */ - final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(planetModel, 16 * RADIANS_PER_DEGREE, 16 * RADIANS_PER_DEGREE, 4 * RADIANS_PER_DEGREE, 36 * RADIANS_PER_DEGREE); - final GeoPoint pt = new GeoPoint(planetModel, 16 * RADIANS_PER_DEGREE, 23.81626064835212 * RADIANS_PER_DEGREE); - final GeoPoint[] pathPoints = new GeoPoint[]{ - new GeoPoint(planetModel, 46.6369060853 * RADIANS_PER_DEGREE, -79.8452213228 * RADIANS_PER_DEGREE), - new GeoPoint(planetModel, 54.9779334519 * RADIANS_PER_DEGREE, 132.029177424 * RADIANS_PER_DEGREE)}; - final GeoPath path = GeoPathFactory.makeGeoPath(planetModel, 88 * RADIANS_PER_DEGREE, pathPoints); + [junit4] 1> S-R Rel: {}, Shape {}, Rectangle {} lap# {} [CONTAINS, Geo3dShape{planetmodel=PlanetModel: {xyScaling=1.0011188180710464, zScaling=0.9977622539852008}, shape=GeoPath: {planetmodel=PlanetModel: {xyScaling=1.0011188180710464, zScaling=0.9977622539852008}, width=1.53588974175501(87.99999999999999), + points={[[X=0.12097657665150223, Y=-0.6754177666095532, Z=0.7265376136709238], [X=-0.3837892785614207, Y=0.4258049113530899, Z=0.8180007850434892]]}}}, + Rect(minX=4.0,maxX=36.0,minY=16.0,maxY=16.0), 6981](no slf4j subst; sorry) + [junit4] FAILURE 0.59s | Geo3dWGS84ShapeRectRelationTest.testGeoPathRect <<< + [junit4] > Throwable #1: java.lang.AssertionError: Geo3dShape{planetmodel=PlanetModel: {xyScaling=1.0011188180710464, zScaling=0.9977622539852008}, shape=GeoPath: {planetmodel=PlanetModel: {xyScaling=1.0011188180710464, zScaling=0.9977622539852008}, width=1.53588974175501(87.99999999999999), + points={[[X=0.12097657665150223, Y=-0.6754177666095532, Z=0.7265376136709238], [X=-0.3837892785614207, Y=0.4258049113530899, Z=0.8180007850434892]]}}} intersect Pt(x=23.81626064835212,y=16.0) + [junit4] > at __randomizedtesting.SeedInfo.seed([2595268DA3F13FEA:6CC30D8C83453E5D]:0) + [junit4] > at org.apache.lucene.spatial.spatial4j.RandomizedShapeTestCase._assertIntersect(RandomizedShapeTestCase.java:168) + [junit4] > at org.apache.lucene.spatial.spatial4j.RandomizedShapeTestCase.assertRelation(RandomizedShapeTestCase.java:153) + [junit4] > at org.apache.lucene.spatial.spatial4j.RectIntersectionTestHelper.testRelateWithRectangle(RectIntersectionTestHelper.java:128) + [junit4] > at org.apache.lucene.spatial.spatial4j.Geo3dWGS84ShapeRectRelationTest.testGeoPathRect(Geo3dWGS84ShapeRectRelationTest.java:265) + */ + final GeoBBox rect = + GeoBBoxFactory.makeGeoBBox( + planetModel, + 16 * RADIANS_PER_DEGREE, + 16 * RADIANS_PER_DEGREE, + 4 * RADIANS_PER_DEGREE, + 36 * RADIANS_PER_DEGREE); + final GeoPoint pt = + new GeoPoint(planetModel, 16 * RADIANS_PER_DEGREE, 23.81626064835212 * RADIANS_PER_DEGREE); + final GeoPoint[] pathPoints = + new GeoPoint[] { + new GeoPoint( + planetModel, 46.6369060853 * RADIANS_PER_DEGREE, -79.8452213228 * RADIANS_PER_DEGREE), + new GeoPoint( + planetModel, 54.9779334519 * RADIANS_PER_DEGREE, 132.029177424 * RADIANS_PER_DEGREE) + }; + final GeoPath path = + GeoPathFactory.makeGeoPath(planetModel, 88 * RADIANS_PER_DEGREE, pathPoints); System.out.println("rect=" + rect); // Rectangle is within path (this is wrong; it's on the other side. Should be OVERLAPS) assertTrue(GeoArea.OVERLAPS == rect.getRelationship(path)); // Rectangle contains point - //assertTrue(rect.isWithin(pt)); + // assertTrue(rect.isWithin(pt)); // Path contains point (THIS FAILS) - //assertTrue(path.isWithin(pt)); - // What happens: (1) The center point of the horizontal line is within the path, in fact within a radius of one of the endpoints. + // assertTrue(path.isWithin(pt)); + // What happens: (1) The center point of the horizontal line is within the path, in fact within + // a radius of one of the endpoints. // (2) The point mentioned is NOT inside either SegmentEndpoint. // (3) The point mentioned is NOT inside the path segment, either. (I think it should be...) } @Test - public void pointBearingTest(){ + public void pointBearingTest() { double radius = 136; double distance = 135.97; double bearing = 188; @@ -112,17 +146,17 @@ public class TestGeo3dShapeWGS84ModelRectRelation extends ShapeRectRelationTestC Point bPoint = ctx.getDistCalc().pointOnBearing(p, distance, bearing, ctx, (Point) null); double d = ctx.getDistCalc().distance(p, bPoint); - assertEquals(d, distance, 10-8); + assertEquals(d, distance, 10 - 8); assertEquals(circle.relate(bPoint), SpatialRelation.CONTAINS); } - + // very slow, test sources are not all here, no clue how to fix it @Nightly public void testGeoCircleRect() { super.testGeoCircleRect(); } - + // very slow, test sources are not all here, no clue how to fix it @Nightly public void testGeoPolygonRect() { diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java index 901594ef749..f84f6007f3f 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java @@ -18,7 +18,6 @@ package org.apache.lucene.spatial.vector; import java.io.IOException; import java.text.ParseException; - import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; import org.apache.lucene.search.MatchAllDocsQuery; @@ -76,26 +75,38 @@ public class TestPointVectorStrategy extends StrategyTestCase { commit(); SearchResults results = executeQuery(new MatchAllDocsQuery(), 1); Document document = results.results.get(0).document; - assertNull("not stored", document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_X)); - assertNull("not stored", document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_Y)); + assertNull( + "not stored", document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_X)); + assertNull( + "not stored", document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_Y)); deleteAll(); // Now we mark it stored. We also disable pointvalues... FieldType fieldType = new FieldType(PointVectorStrategy.DEFAULT_FIELDTYPE); fieldType.setStored(true); - fieldType.setDimensions(0, 0);//disable point values + fieldType.setDimensions(0, 0); // disable point values this.strategy = new PointVectorStrategy(ctx, getClass().getSimpleName(), fieldType); adoc("99", "POINT(-5.0 8.2)"); commit(); results = executeQuery(new MatchAllDocsQuery(), 1); document = results.results.get(0).document; - assertEquals("stored", -5.0, document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_X).numericValue()); - assertEquals("stored", 8.2, document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_Y).numericValue()); + assertEquals( + "stored", + -5.0, + document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_X).numericValue()); + assertEquals( + "stored", + 8.2, + document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_Y).numericValue()); // Test a query fails without point values - expectThrows(UnsupportedOperationException.class, () -> { - SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(-10.0, 10.0, -5.0, 5.0)); - this.strategy.makeQuery(args); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + SpatialArgs args = + new SpatialArgs( + SpatialOperation.Intersects, ctx.makeRectangle(-10.0, 10.0, -5.0, 5.0)); + this.strategy.makeQuery(args); + }); } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DDocValuesField.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DDocValuesField.java index e2d27330ebc..fcf2235c7ac 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DDocValuesField.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DDocValuesField.java @@ -16,84 +16,95 @@ */ package org.apache.lucene.spatial3d; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.geo.Polygon; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.SortField; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.geo.Polygon; - -import org.apache.lucene.spatial3d.geom.PlanetModel; -import org.apache.lucene.spatial3d.geom.GeoPoint; import org.apache.lucene.spatial3d.geom.GeoDistanceShape; import org.apache.lucene.spatial3d.geom.GeoOutsideDistance; +import org.apache.lucene.spatial3d.geom.GeoPoint; +import org.apache.lucene.spatial3d.geom.PlanetModel; -/** +/** * An per-document 3D location field. - *

    - * Sorting by distance is efficient. Multiple values for the same field in one document - * is allowed. - *

    - * This field defines static factory methods for common operations: + * + *

    Sorting by distance is efficient. Multiple values for the same field in one document is + * allowed. + * + *

    This field defines static factory methods for common operations: + * *

      *
    • TBD *
    - *

    - * If you also need query operations, you should add a separate {@link Geo3DPoint} instance. - *

    - * WARNING: Values are indexed with some loss of precision from the - * original {@code double} values (4.190951585769653E-8 for the latitude component - * and 8.381903171539307E-8 for longitude). + * + *

    If you also need query operations, you should add a separate {@link Geo3DPoint} instance. + * + *

    WARNING: Values are indexed with some loss of precision from the original {@code + * double} values (4.190951585769653E-8 for the latitude component and 8.381903171539307E-8 for + * longitude). + * * @see Geo3DPoint */ public class Geo3DDocValuesField extends Field { private final PlanetModel planetModel; - + /** * Type for a Geo3DDocValuesField - *

    - * Each value stores a 64-bit long where the three values (x, y, and z) are given - * 21 bits each. Each 21-bit value represents the maximum extent in that dimension - * for the defined planet model. + * + *

    Each value stores a 64-bit long where the three values (x, y, and z) are given 21 bits each. + * Each 21-bit value represents the maximum extent in that dimension for the defined planet model. */ public static final FieldType TYPE = new FieldType(); + static { TYPE.setDocValuesType(DocValuesType.SORTED_NUMERIC); TYPE.freeze(); } - - /** + + /** * Creates a new Geo3DDocValuesField with the specified x, y, and z + * * @param name field name * @param point is the point. * @throws IllegalArgumentException if the field name is null or the point is out of bounds */ - public Geo3DDocValuesField(final String name, final GeoPoint point, final PlanetModel planetModel) { + public Geo3DDocValuesField( + final String name, final GeoPoint point, final PlanetModel planetModel) { this(name, TYPE, planetModel); setLocationValue(point); } - /** + /** * Creates a new Geo3DDocValuesField with the specified x, y, and z + * * @param name field name * @param x is the x value for the point. * @param y is the y value for the point. * @param z is the z value for the point. * @throws IllegalArgumentException if the field name is null or x, y, or z are out of bounds */ - public Geo3DDocValuesField(final String name, final double x, final double y, final double z, final PlanetModel planetModel) { + public Geo3DDocValuesField( + final String name, + final double x, + final double y, + final double z, + final PlanetModel planetModel) { this(name, TYPE, planetModel); setLocationValue(x, y, z); } - private Geo3DDocValuesField(final String name, final FieldType type, final PlanetModel planetModel) { + private Geo3DDocValuesField( + final String name, final FieldType type, final PlanetModel planetModel) { super(name, TYPE); this.planetModel = planetModel; } /** * Change the values of this field + * * @param point is the point. * @throws IllegalArgumentException if the point is out of bounds */ @@ -103,6 +114,7 @@ public class Geo3DDocValuesField extends Field { /** * Change the values of this field + * * @param x is the x value for the point. * @param y is the y value for the point. * @param z is the z value for the point. @@ -114,14 +126,21 @@ public class Geo3DDocValuesField extends Field { /** helper: checks a fieldinfo and throws exception if its definitely not a Geo3DDocValuesField */ static void checkCompatible(FieldInfo fieldInfo) { - // dv properties could be "unset", if you e.g. used only StoredField with this same name in the segment. - if (fieldInfo.getDocValuesType() != DocValuesType.NONE && fieldInfo.getDocValuesType() != TYPE.docValuesType()) { - throw new IllegalArgumentException("field=\"" + fieldInfo.name + "\" was indexed with docValuesType=" + fieldInfo.getDocValuesType() + - " but this type has docValuesType=" + TYPE.docValuesType() + - ", is the field really a Geo3DDocValuesField?"); + // dv properties could be "unset", if you e.g. used only StoredField with this same name in the + // segment. + if (fieldInfo.getDocValuesType() != DocValuesType.NONE + && fieldInfo.getDocValuesType() != TYPE.docValuesType()) { + throw new IllegalArgumentException( + "field=\"" + + fieldInfo.name + + "\" was indexed with docValuesType=" + + fieldInfo.getDocValuesType() + + " but this type has docValuesType=" + + TYPE.docValuesType() + + ", is the field really a Geo3DDocValuesField?"); } } - + @Override public String toString() { StringBuilder result = new StringBuilder(); @@ -130,8 +149,8 @@ public class Geo3DDocValuesField extends Field { result.append(name); result.append(':'); - long currentValue = (Long)fieldsData; - + long currentValue = (Long) fieldsData; + result.append(planetModel.getDocValueEncoder().decodeXValue(currentValue)); result.append(','); result.append(planetModel.getDocValueEncoder().decodeYValue(currentValue)); @@ -144,15 +163,16 @@ public class Geo3DDocValuesField extends Field { /** * Creates a SortField for sorting by distance within a circle. - *

    - * This sort orders documents by ascending distance from the location. The value returned in {@link FieldDoc} for - * the hits contains a Double instance with the distance in meters. - *

    - * If a document is missing the field, then by default it is treated as having {@link Double#POSITIVE_INFINITY} distance - * (missing values sort last). - *

    - * If a document contains multiple values for the field, the closest distance from the circle center is used. - * + * + *

    This sort orders documents by ascending distance from the location. The value returned in + * {@link FieldDoc} for the hits contains a Double instance with the distance in meters. + * + *

    If a document is missing the field, then by default it is treated as having {@link + * Double#POSITIVE_INFINITY} distance (missing values sort last). + * + *

    If a document contains multiple values for the field, the closest distance from the + * circle center is used. + * * @param field field name. must not be null. * @param latitude latitude at the center: must be within standard +/-90 coordinate bounds. * @param longitude longitude at the center: must be within standard +/-180 coordinate bounds. @@ -160,48 +180,65 @@ public class Geo3DDocValuesField extends Field { * @return SortField ordering documents by distance * @throws IllegalArgumentException if {@code field} is null or circle has invalid coordinates. */ - public static SortField newDistanceSort(final String field, final double latitude, final double longitude, final double maxRadiusMeters, final PlanetModel planetModel) { - final GeoDistanceShape shape = Geo3DUtil.fromDistance(planetModel, latitude, longitude, maxRadiusMeters); + public static SortField newDistanceSort( + final String field, + final double latitude, + final double longitude, + final double maxRadiusMeters, + final PlanetModel planetModel) { + final GeoDistanceShape shape = + Geo3DUtil.fromDistance(planetModel, latitude, longitude, maxRadiusMeters); return new Geo3DPointSortField(field, planetModel, shape); } /** * Creates a SortField for sorting by distance along a path. - *

    - * This sort orders documents by ascending distance along the described path. The value returned in {@link FieldDoc} for - * the hits contains a Double instance with the distance in meters. - *

    - * If a document is missing the field, then by default it is treated as having {@link Double#POSITIVE_INFINITY} distance - * (missing values sort last). - *

    - * If a document contains multiple values for the field, the closest distance along the path is used. - * + * + *

    This sort orders documents by ascending distance along the described path. The value + * returned in {@link FieldDoc} for the hits contains a Double instance with the distance in + * meters. + * + *

    If a document is missing the field, then by default it is treated as having {@link + * Double#POSITIVE_INFINITY} distance (missing values sort last). + * + *

    If a document contains multiple values for the field, the closest distance along the + * path is used. + * * @param field field name. must not be null. - * @param pathLatitudes latitude values for points of the path: must be within standard +/-90 coordinate bounds. - * @param pathLongitudes longitude values for points of the path: must be within standard +/-180 coordinate bounds. + * @param pathLatitudes latitude values for points of the path: must be within standard +/-90 + * coordinate bounds. + * @param pathLongitudes longitude values for points of the path: must be within standard +/-180 + * coordinate bounds. * @param pathWidthMeters width of the path in meters. * @return SortField ordering documents by distance * @throws IllegalArgumentException if {@code field} is null or path has invalid coordinates. */ - public static SortField newPathSort(final String field, final double[] pathLatitudes, final double[] pathLongitudes, final double pathWidthMeters, final PlanetModel planetModel) { - final GeoDistanceShape shape = Geo3DUtil.fromPath(planetModel, pathLatitudes, pathLongitudes, pathWidthMeters); + public static SortField newPathSort( + final String field, + final double[] pathLatitudes, + final double[] pathLongitudes, + final double pathWidthMeters, + final PlanetModel planetModel) { + final GeoDistanceShape shape = + Geo3DUtil.fromPath(planetModel, pathLatitudes, pathLongitudes, pathWidthMeters); return new Geo3DPointSortField(field, planetModel, shape); } // Outside distances - + /** * Creates a SortField for sorting by outside distance from a circle. - *

    - * This sort orders documents by ascending outside distance from the circle. Points within the circle have distance 0.0. - * The value returned in {@link FieldDoc} for - * the hits contains a Double instance with the distance in meters. - *

    - * If a document is missing the field, then by default it is treated as having {@link Double#POSITIVE_INFINITY} distance - * (missing values sort last). - *

    - * If a document contains multiple values for the field, the closest distance to the circle is used. - * + * + *

    This sort orders documents by ascending outside distance from the circle. Points within the + * circle have distance 0.0. The value returned in {@link FieldDoc} for the hits contains a Double + * instance with the distance in meters. + * + *

    If a document is missing the field, then by default it is treated as having {@link + * Double#POSITIVE_INFINITY} distance (missing values sort last). + * + *

    If a document contains multiple values for the field, the closest distance to the + * circle is used. + * * @param field field name. must not be null. * @param latitude latitude at the center: must be within standard +/-90 coordinate bounds. * @param longitude longitude at the center: must be within standard +/-180 coordinate bounds. @@ -209,23 +246,30 @@ public class Geo3DDocValuesField extends Field { * @return SortField ordering documents by distance * @throws IllegalArgumentException if {@code field} is null or location has invalid coordinates. */ - public static SortField newOutsideDistanceSort(final String field, final double latitude, final double longitude, final double maxRadiusMeters, final PlanetModel planetModel) { - final GeoOutsideDistance shape = Geo3DUtil.fromDistance(planetModel, latitude, longitude, maxRadiusMeters); + public static SortField newOutsideDistanceSort( + final String field, + final double latitude, + final double longitude, + final double maxRadiusMeters, + final PlanetModel planetModel) { + final GeoOutsideDistance shape = + Geo3DUtil.fromDistance(planetModel, latitude, longitude, maxRadiusMeters); return new Geo3DPointOutsideSortField(field, planetModel, shape); } /** * Creates a SortField for sorting by outside distance from a box. - *

    - * This sort orders documents by ascending outside distance from the box. Points within the box have distance 0.0. - * The value returned in {@link FieldDoc} for - * the hits contains a Double instance with the distance in meters. - *

    - * If a document is missing the field, then by default it is treated as having {@link Double#POSITIVE_INFINITY} distance - * (missing values sort last). - *

    - * If a document contains multiple values for the field, the closest distance to the box is used. - * + * + *

    This sort orders documents by ascending outside distance from the box. Points within the box + * have distance 0.0. The value returned in {@link FieldDoc} for the hits contains a Double + * instance with the distance in meters. + * + *

    If a document is missing the field, then by default it is treated as having {@link + * Double#POSITIVE_INFINITY} distance (missing values sort last). + * + *

    If a document contains multiple values for the field, the closest distance to the box + * is used. + * * @param field field name. must not be null. * @param minLatitude latitude lower bound: must be within standard +/-90 coordinate bounds. * @param maxLatitude latitude upper bound: must be within standard +/-90 coordinate bounds. @@ -234,77 +278,98 @@ public class Geo3DDocValuesField extends Field { * @return SortField ordering documents by distance * @throws IllegalArgumentException if {@code field} is null or box has invalid coordinates. */ - public static SortField newOutsideBoxSort(final String field, final double minLatitude, final double maxLatitude, final double minLongitude, final double maxLongitude, final PlanetModel planetModel) { - final GeoOutsideDistance shape = Geo3DUtil.fromBox(planetModel, minLatitude, maxLatitude, minLongitude, maxLongitude); + public static SortField newOutsideBoxSort( + final String field, + final double minLatitude, + final double maxLatitude, + final double minLongitude, + final double maxLongitude, + final PlanetModel planetModel) { + final GeoOutsideDistance shape = + Geo3DUtil.fromBox(planetModel, minLatitude, maxLatitude, minLongitude, maxLongitude); return new Geo3DPointOutsideSortField(field, planetModel, shape); } /** * Creates a SortField for sorting by outside distance from a polygon. - *

    - * This sort orders documents by ascending outside distance from the polygon. Points within the polygon have distance 0.0. - * The value returned in {@link FieldDoc} for - * the hits contains a Double instance with the distance in meters. - *

    - * If a document is missing the field, then by default it is treated as having {@link Double#POSITIVE_INFINITY} distance - * (missing values sort last). - *

    - * If a document contains multiple values for the field, the closest distance to the polygon is used. - * + * + *

    This sort orders documents by ascending outside distance from the polygon. Points within the + * polygon have distance 0.0. The value returned in {@link FieldDoc} for the hits contains a + * Double instance with the distance in meters. + * + *

    If a document is missing the field, then by default it is treated as having {@link + * Double#POSITIVE_INFINITY} distance (missing values sort last). + * + *

    If a document contains multiple values for the field, the closest distance to the + * polygon is used. + * * @param field field name. must not be null. * @param polygons is the list of polygons to use to construct the query; must be at least one. * @return SortField ordering documents by distance * @throws IllegalArgumentException if {@code field} is null or polygon has invalid coordinates. */ - public static SortField newOutsidePolygonSort(final String field, final PlanetModel planetModel, final Polygon... polygons) { + public static SortField newOutsidePolygonSort( + final String field, final PlanetModel planetModel, final Polygon... polygons) { final GeoOutsideDistance shape = Geo3DUtil.fromPolygon(planetModel, polygons); return new Geo3DPointOutsideSortField(field, planetModel, shape); } /** - * Creates a SortField for sorting by outside distance from a large polygon. This differs from the related newOutsideLargePolygonSort in that it - * does little or no legality checking and is optimized for very large numbers of polygon edges. - *

    - * This sort orders documents by ascending outside distance from the polygon. Points within the polygon have distance 0.0. - * The value returned in {@link FieldDoc} for - * the hits contains a Double instance with the distance in meters. - *

    - * If a document is missing the field, then by default it is treated as having {@link Double#POSITIVE_INFINITY} distance - * (missing values sort last). - *

    - * If a document contains multiple values for the field, the closest distance to the polygon is used. - * + * Creates a SortField for sorting by outside distance from a large polygon. This differs from the + * related newOutsideLargePolygonSort in that it does little or no legality checking and is + * optimized for very large numbers of polygon edges. + * + *

    This sort orders documents by ascending outside distance from the polygon. Points within the + * polygon have distance 0.0. The value returned in {@link FieldDoc} for the hits contains a + * Double instance with the distance in meters. + * + *

    If a document is missing the field, then by default it is treated as having {@link + * Double#POSITIVE_INFINITY} distance (missing values sort last). + * + *

    If a document contains multiple values for the field, the closest distance to the + * polygon is used. + * * @param field field name. must not be null. * @param polygons is the list of polygons to use to construct the query; must be at least one. * @return SortField ordering documents by distance * @throws IllegalArgumentException if {@code field} is null or polygon has invalid coordinates. */ - public static SortField newOutsideLargePolygonSort(final String field, final PlanetModel planetModel, final Polygon... polygons) { + public static SortField newOutsideLargePolygonSort( + final String field, final PlanetModel planetModel, final Polygon... polygons) { final GeoOutsideDistance shape = Geo3DUtil.fromLargePolygon(planetModel, polygons); return new Geo3DPointOutsideSortField(field, planetModel, shape); } /** * Creates a SortField for sorting by outside distance from a path. - *

    - * This sort orders documents by ascending outside distance from the described path. Points within the path - * are given the distance of 0.0. The value returned in {@link FieldDoc} for - * the hits contains a Double instance with the distance in meters. - *

    - * If a document is missing the field, then by default it is treated as having {@link Double#POSITIVE_INFINITY} distance - * (missing values sort last). - *

    - * If a document contains multiple values for the field, the closest distance from the path is used. - * + * + *

    This sort orders documents by ascending outside distance from the described path. Points + * within the path are given the distance of 0.0. The value returned in {@link FieldDoc} for the + * hits contains a Double instance with the distance in meters. + * + *

    If a document is missing the field, then by default it is treated as having {@link + * Double#POSITIVE_INFINITY} distance (missing values sort last). + * + *

    If a document contains multiple values for the field, the closest distance from the + * path is used. + * * @param field field name. must not be null. - * @param pathLatitudes latitude values for points of the path: must be within standard +/-90 coordinate bounds. - * @param pathLongitudes longitude values for points of the path: must be within standard +/-180 coordinate bounds. + * @param pathLatitudes latitude values for points of the path: must be within standard +/-90 + * coordinate bounds. + * @param pathLongitudes longitude values for points of the path: must be within standard +/-180 + * coordinate bounds. * @param pathWidthMeters width of the path in meters. * @return SortField ordering documents by distance * @throws IllegalArgumentException if {@code field} is null or path has invalid coordinates. */ - public static SortField newOutsidePathSort(final String field, final double[] pathLatitudes, final double[] pathLongitudes, final double pathWidthMeters, final PlanetModel planetModel) { - final GeoOutsideDistance shape = Geo3DUtil.fromPath(planetModel, pathLatitudes, pathLongitudes, pathWidthMeters); + public static SortField newOutsidePathSort( + final String field, + final double[] pathLatitudes, + final double[] pathLongitudes, + final double pathWidthMeters, + final PlanetModel planetModel) { + final GeoOutsideDistance shape = + Geo3DUtil.fromPath(planetModel, pathLatitudes, pathLongitudes, pathWidthMeters); return new Geo3DPointOutsideSortField(field, planetModel, shape); } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPoint.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPoint.java index eba72d5a4d8..b35126689c2 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPoint.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPoint.java @@ -18,27 +18,29 @@ package org.apache.lucene.spatial3d; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; -import org.apache.lucene.index.PointValues; -import org.apache.lucene.geo.Polygon; import org.apache.lucene.geo.GeoUtils; +import org.apache.lucene.geo.Polygon; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.Query; import org.apache.lucene.spatial3d.geom.GeoPoint; import org.apache.lucene.spatial3d.geom.GeoShape; import org.apache.lucene.spatial3d.geom.PlanetModel; -import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; /** - * Add this to a document to index lat/lon or x/y/z point, indexed as a 3D point. - * Multiple values are allowed: just add multiple Geo3DPoint to the document with the - * same field name. - *

    - * This field defines static factory methods for creating a shape query: + * Add this to a document to index lat/lon or x/y/z point, indexed as a 3D point. Multiple values + * are allowed: just add multiple Geo3DPoint to the document with the same field name. + * + *

    This field defines static factory methods for creating a shape query: + * *

      *
    • {@link #newShapeQuery newShapeQuery()} for matching all points inside a specified shape *
    + * * @see PointValues - * @lucene.experimental */ + * @lucene.experimental + */ public final class Geo3DPoint extends Field { /** Planet Model for this Geo3DPoint */ @@ -46,15 +48,18 @@ public final class Geo3DPoint extends Field { /** Indexing {@link FieldType}. */ public static final FieldType TYPE = new FieldType(); + static { TYPE.setDimensions(3, Integer.BYTES); TYPE.freeze(); } /** - * Creates a new Geo3DPoint field with the specified latitude, longitude (in degrees), with default WGS84 PlanetModel. + * Creates a new Geo3DPoint field with the specified latitude, longitude (in degrees), with + * default WGS84 PlanetModel. * - * @throws IllegalArgumentException if the field name is null or latitude or longitude are out of bounds + * @throws IllegalArgumentException if the field name is null or latitude or longitude are out of + * bounds */ public Geo3DPoint(String name, double lat, double lon) { this(name, PlanetModel.WGS84, lat, lon); @@ -63,7 +68,8 @@ public final class Geo3DPoint extends Field { /** * Creates a new Geo3DPoint field with the specified x,y,z, using default WGS84 planet model. * - * @throws IllegalArgumentException if the field name is null or latitude or longitude are out of bounds + * @throws IllegalArgumentException if the field name is null or latitude or longitude are out of + * bounds */ public Geo3DPoint(String name, double x, double y, double z) { this(name, PlanetModel.WGS84, x, y, z); @@ -72,7 +78,8 @@ public final class Geo3DPoint extends Field { /** * Creates a new Geo3DPoint field with the specified x,y,z, and given planet model. * - * @throws IllegalArgumentException if the field name is null or latitude or longitude are out of bounds + * @throws IllegalArgumentException if the field name is null or latitude or longitude are out of + * bounds */ public Geo3DPoint(String name, PlanetModel planetModel, double x, double y, double z) { super(name, TYPE); @@ -81,9 +88,11 @@ public final class Geo3DPoint extends Field { } /** - * Creates a new Geo3DPoint field with the specified latitude, longitude (in degrees), given a planet model. + * Creates a new Geo3DPoint field with the specified latitude, longitude (in degrees), given a + * planet model. * - * @throws IllegalArgumentException if the field name is null or latitude or longitude are out of bounds + * @throws IllegalArgumentException if the field name is null or latitude or longitude are out of + * bounds */ public Geo3DPoint(String name, PlanetModel planetModel, double latitude, double longitude) { super(name, TYPE); @@ -91,31 +100,40 @@ public final class Geo3DPoint extends Field { GeoUtils.checkLongitude(longitude); this.planetModel = planetModel; // Translate latitude/longitude to x,y,z: - final GeoPoint point = new GeoPoint(planetModel, Geo3DUtil.fromDegrees(latitude), Geo3DUtil.fromDegrees(longitude)); + final GeoPoint point = + new GeoPoint( + planetModel, Geo3DUtil.fromDegrees(latitude), Geo3DUtil.fromDegrees(longitude)); fillFieldsData(planetModel, point.x, point.y, point.z); } /** * Create a query for matching points within the specified distance of the supplied location. - * @param field field name. must not be null. Note that if - * {@link PlanetModel#WGS84} is used, the query is approximate and may have up - * to 0.5% error. * + * @param field field name. must not be null. Note that if {@link PlanetModel#WGS84} is used, the + * query is approximate and may have up to 0.5% error. * @param latitude latitude at the center: must be within standard +/-90 coordinate bounds. * @param longitude longitude at the center: must be within standard +/-180 coordinate bounds. - * @param radiusMeters maximum distance from the center in meters: must be non-negative and finite. + * @param radiusMeters maximum distance from the center in meters: must be non-negative and + * finite. * @return query matching points within this distance - * @throws IllegalArgumentException if {@code field} is null, location has invalid coordinates, or radius is invalid. + * @throws IllegalArgumentException if {@code field} is null, location has invalid coordinates, or + * radius is invalid. */ - public static Query newDistanceQuery(final String field, final PlanetModel planetModel, final double latitude, final double longitude, final double radiusMeters) { + public static Query newDistanceQuery( + final String field, + final PlanetModel planetModel, + final double latitude, + final double longitude, + final double radiusMeters) { final GeoShape shape = Geo3DUtil.fromDistance(planetModel, latitude, longitude, radiusMeters); return newShapeQuery(field, shape); } /** * Create a query for matching a box. - *

    - * The box may cross over the dateline. + * + *

    The box may cross over the dateline. + * * @param field field name. must not be null. * @param minLatitude latitude lower bound: must be within standard +/-90 coordinate bounds. * @param maxLatitude latitude upper bound: must be within standard +/-90 coordinate bounds. @@ -124,50 +142,74 @@ public final class Geo3DPoint extends Field { * @return query matching points within this box * @throws IllegalArgumentException if {@code field} is null, or the box has invalid coordinates. */ - public static Query newBoxQuery(final String field, final PlanetModel planetModel, final double minLatitude, final double maxLatitude, final double minLongitude, final double maxLongitude) { - final GeoShape shape = Geo3DUtil.fromBox(planetModel, minLatitude, maxLatitude, minLongitude, maxLongitude); + public static Query newBoxQuery( + final String field, + final PlanetModel planetModel, + final double minLatitude, + final double maxLatitude, + final double minLongitude, + final double maxLongitude) { + final GeoShape shape = + Geo3DUtil.fromBox(planetModel, minLatitude, maxLatitude, minLongitude, maxLongitude); return newShapeQuery(field, shape); } - /** - * Create a query for matching a polygon. The polygon should have a limited number of edges (less than 100) and be well-defined, - * with well-separated vertices. - *

    - * The supplied {@code polygons} must be clockwise on the outside level, counterclockwise on the next level in, etc. + /** + * Create a query for matching a polygon. The polygon should have a limited number of edges (less + * than 100) and be well-defined, with well-separated vertices. + * + *

    The supplied {@code polygons} must be clockwise on the outside level, counterclockwise on + * the next level in, etc. + * * @param field field name. must not be null. * @param polygons is the list of polygons to use to construct the query; must be at least one. * @return query matching points within this polygon */ - public static Query newPolygonQuery(final String field, final PlanetModel planetModel, final Polygon... polygons) { + public static Query newPolygonQuery( + final String field, final PlanetModel planetModel, final Polygon... polygons) { final GeoShape shape = Geo3DUtil.fromPolygon(planetModel, polygons); return newShapeQuery(field, shape); } - /** - * Create a query for matching a large polygon. This differs from the related newPolygonQuery in that it - * does little or no legality checking and is optimized for very large numbers of polygon edges. - *

    - * The supplied {@code polygons} must be clockwise on the outside level, counterclockwise on the next level in, etc. + /** + * Create a query for matching a large polygon. This differs from the related newPolygonQuery in + * that it does little or no legality checking and is optimized for very large numbers of polygon + * edges. + * + *

    The supplied {@code polygons} must be clockwise on the outside level, counterclockwise on + * the next level in, etc. + * * @param field field name. must not be null. * @param polygons is the list of polygons to use to construct the query; must be at least one. * @return query matching points within this polygon */ - public static Query newLargePolygonQuery(final String field, PlanetModel planetModel, final Polygon... polygons) { + public static Query newLargePolygonQuery( + final String field, PlanetModel planetModel, final Polygon... polygons) { final GeoShape shape = Geo3DUtil.fromLargePolygon(planetModel, polygons); return newShapeQuery(field, shape); } - /** + /** * Create a query for matching a path. + * *

    + * * @param field field name. must not be null. - * @param pathLatitudes latitude values for points of the path: must be within standard +/-90 coordinate bounds. - * @param pathLongitudes longitude values for points of the path: must be within standard +/-180 coordinate bounds. + * @param pathLatitudes latitude values for points of the path: must be within standard +/-90 + * coordinate bounds. + * @param pathLongitudes longitude values for points of the path: must be within standard +/-180 + * coordinate bounds. * @param pathWidthMeters width of the path in meters. * @return query matching points within this polygon */ - public static Query newPathQuery(final String field, final double[] pathLatitudes, final double[] pathLongitudes, final double pathWidthMeters, PlanetModel planetModel) { - final GeoShape shape = Geo3DUtil.fromPath(planetModel, pathLatitudes, pathLongitudes, pathWidthMeters); + public static Query newPathQuery( + final String field, + final double[] pathLatitudes, + final double[] pathLongitudes, + final double pathWidthMeters, + PlanetModel planetModel) { + final GeoShape shape = + Geo3DUtil.fromPath(planetModel, pathLatitudes, pathLongitudes, pathWidthMeters); return newShapeQuery(field, shape); } @@ -175,24 +217,26 @@ public final class Geo3DPoint extends Field { byte[] bytes = new byte[12]; encodeDimension(x, bytes, 0, planetModel); encodeDimension(y, bytes, Integer.BYTES, planetModel); - encodeDimension(z, bytes, 2*Integer.BYTES, planetModel); + encodeDimension(z, bytes, 2 * Integer.BYTES, planetModel); fieldsData = new BytesRef(bytes); } // public helper methods (e.g. for queries) - + /** Encode single dimension */ - public static void encodeDimension(double value, byte bytes[], int offset, PlanetModel planetModel) { + public static void encodeDimension( + double value, byte bytes[], int offset, PlanetModel planetModel) { NumericUtils.intToSortableBytes(planetModel.encodeValue(value), bytes, offset); } - + /** Decode single dimension */ public static double decodeDimension(byte value[], int offset, PlanetModel planetModel) { return planetModel.decodeValue(NumericUtils.sortableBytesToInt(value, offset)); } - /** Returns a query matching all points inside the provided shape. - * + /** + * Returns a query matching all points inside the provided shape. + * * @param field field name. must not be {@code null}. * @param shape Which {@link GeoShape} to match */ @@ -210,10 +254,13 @@ public final class Geo3DPoint extends Field { BytesRef bytes = (BytesRef) fieldsData; result.append(" x=").append(decodeDimension(bytes.bytes, bytes.offset, this.planetModel)); - result.append(" y=").append(decodeDimension(bytes.bytes, bytes.offset + Integer.BYTES, this.planetModel)); - result.append(" z=").append(decodeDimension(bytes.bytes, bytes.offset + 2 * Integer.BYTES, this.planetModel)); + result + .append(" y=") + .append(decodeDimension(bytes.bytes, bytes.offset + Integer.BYTES, this.planetModel)); + result + .append(" z=") + .append(decodeDimension(bytes.bytes, bytes.offset + 2 * Integer.BYTES, this.planetModel)); result.append('>'); return result.toString(); } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointDistanceComparator.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointDistanceComparator.java index 654c4cc14c4..007209d23b6 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointDistanceComparator.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointDistanceComparator.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial3d; import java.io.IOException; - import org.apache.lucene.index.DocValues; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.LeafReader; @@ -32,35 +31,40 @@ import org.apache.lucene.spatial3d.geom.PlanetModel; import org.apache.lucene.spatial3d.geom.XYZBounds; /** - * Compares documents by distance from an origin point, using a GeoDistanceShape to compute the distance - *

    - * When the least competitive item on the priority queue changes (setBottom), we recompute - * a bounding box representing competitive distance to the top-N. Then in compareBottom, we can + * Compares documents by distance from an origin point, using a GeoDistanceShape to compute the + * distance + * + *

    When the least competitive item on the priority queue changes (setBottom), we recompute a + * bounding box representing competitive distance to the top-N. Then in compareBottom, we can * quickly reject hits based on bounding box alone without computing distance for every element. */ class Geo3DPointDistanceComparator extends FieldComparator implements LeafFieldComparator { final String field; - + final GeoDistanceShape distanceShape; - final private PlanetModel planetModel; + private final PlanetModel planetModel; final double[] values; double bottomDistance; double topValue; SortedNumericDocValues currentDocs; - + XYZBounds priorityQueueBounds; - + // the number of times setBottom has been called (adversary protection) int setBottomCounter = 0; - public Geo3DPointDistanceComparator(String field, final PlanetModel planetModel, final GeoDistanceShape distanceShape, int numHits) { + public Geo3DPointDistanceComparator( + String field, + final PlanetModel planetModel, + final GeoDistanceShape distanceShape, + int numHits) { this.field = field; this.distanceShape = distanceShape; this.planetModel = planetModel; this.values = new double[numHits]; } - + @Override public void setScorer(Scorable scorer) {} @@ -68,7 +72,7 @@ class Geo3DPointDistanceComparator extends FieldComparator implements Le public int compare(int slot1, int slot2) { return Double.compare(values[slot1], values[slot2]); } - + @Override public void setBottom(int slot) { bottomDistance = values[slot]; @@ -83,12 +87,12 @@ class Geo3DPointDistanceComparator extends FieldComparator implements Le } setBottomCounter++; } - + @Override public void setTopValue(Double value) { topValue = value.doubleValue(); } - + @Override public int compareBottom(int doc) throws IOException { if (doc > currentDocs.docID()) { @@ -97,7 +101,7 @@ class Geo3DPointDistanceComparator extends FieldComparator implements Le if (doc < currentDocs.docID()) { return Double.compare(bottomDistance, Double.POSITIVE_INFINITY); } - + int numValues = currentDocs.docValueCount(); assert numValues > 0; @@ -110,26 +114,30 @@ class Geo3DPointDistanceComparator extends FieldComparator implements Le final double x = planetModel.getDocValueEncoder().decodeXValue(encoded); final double y = planetModel.getDocValueEncoder().decodeYValue(encoded); final double z = planetModel.getDocValueEncoder().decodeZValue(encoded); - - if (x > priorityQueueBounds.getMaximumX() || - x < priorityQueueBounds.getMinimumX() || - y > priorityQueueBounds.getMaximumY() || - y < priorityQueueBounds.getMinimumY() || - z > priorityQueueBounds.getMaximumZ() || - z < priorityQueueBounds.getMinimumZ()) { + + if (x > priorityQueueBounds.getMaximumX() + || x < priorityQueueBounds.getMinimumX() + || y > priorityQueueBounds.getMaximumY() + || y < priorityQueueBounds.getMinimumY() + || z > priorityQueueBounds.getMaximumZ() + || z < priorityQueueBounds.getMinimumZ()) { continue; } - cmp = Math.max(cmp, Double.compare(bottomDistance, distanceShape.computeDistance(DistanceStyle.ARC, x, y, z))); + cmp = + Math.max( + cmp, + Double.compare( + bottomDistance, distanceShape.computeDistance(DistanceStyle.ARC, x, y, z))); } return cmp; } - + @Override public void copy(int slot, int doc) throws IOException { values[slot] = computeMinimumDistance(doc); } - + @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { LeafReader reader = context.reader(); @@ -140,13 +148,13 @@ class Geo3DPointDistanceComparator extends FieldComparator implements Le currentDocs = DocValues.getSortedNumeric(reader, field); return this; } - + @Override public Double value(int slot) { // Return the arc distance return Double.valueOf(values[slot] * planetModel.getMeanRadius()); } - + @Override public int compareTop(int doc) throws IOException { return Double.compare(topValue, computeMinimumDistance(doc)); @@ -161,10 +169,12 @@ class Geo3DPointDistanceComparator extends FieldComparator implements Le final int numValues = currentDocs.docValueCount(); for (int i = 0; i < numValues; i++) { final long encoded = currentDocs.nextValue(); - final double distance = distanceShape.computeDistance(DistanceStyle.ARC, - planetModel.getDocValueEncoder().decodeXValue(encoded), - planetModel.getDocValueEncoder().decodeYValue(encoded), - planetModel.getDocValueEncoder().decodeZValue(encoded)); + final double distance = + distanceShape.computeDistance( + DistanceStyle.ARC, + planetModel.getDocValueEncoder().decodeXValue(encoded), + planetModel.getDocValueEncoder().decodeYValue(encoded), + planetModel.getDocValueEncoder().decodeZValue(encoded)); minValue = Math.min(minValue, distance); } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideDistanceComparator.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideDistanceComparator.java index 56145a00a22..3696a6f2ba8 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideDistanceComparator.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideDistanceComparator.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial3d; import java.io.IOException; - import org.apache.lucene.index.DocValues; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.LeafReader; @@ -30,27 +29,30 @@ import org.apache.lucene.spatial3d.geom.DistanceStyle; import org.apache.lucene.spatial3d.geom.GeoOutsideDistance; import org.apache.lucene.spatial3d.geom.PlanetModel; -/** - * Compares documents by outside distance, using a GeoOutsideDistance to compute the distance - */ -class Geo3DPointOutsideDistanceComparator extends FieldComparator implements LeafFieldComparator { +/** Compares documents by outside distance, using a GeoOutsideDistance to compute the distance */ +class Geo3DPointOutsideDistanceComparator extends FieldComparator + implements LeafFieldComparator { final String field; - + final GeoOutsideDistance distanceShape; - final private PlanetModel planetModel; + private final PlanetModel planetModel; final double[] values; double bottomDistance; double topValue; SortedNumericDocValues currentDocs; - - public Geo3DPointOutsideDistanceComparator(String field, final PlanetModel planetModel, final GeoOutsideDistance distanceShape, int numHits) { + + public Geo3DPointOutsideDistanceComparator( + String field, + final PlanetModel planetModel, + final GeoOutsideDistance distanceShape, + int numHits) { this.field = field; this.planetModel = planetModel; this.distanceShape = distanceShape; this.values = new double[numHits]; } - + @Override public void setScorer(Scorable scorer) {} @@ -58,17 +60,17 @@ class Geo3DPointOutsideDistanceComparator extends FieldComparator implem public int compare(int slot1, int slot2) { return Double.compare(values[slot1], values[slot2]); } - + @Override public void setBottom(int slot) { bottomDistance = values[slot]; } - + @Override public void setTopValue(Double value) { topValue = value.doubleValue(); } - + @Override public int compareBottom(int doc) throws IOException { if (doc > currentDocs.docID()) { @@ -90,17 +92,22 @@ class Geo3DPointOutsideDistanceComparator extends FieldComparator implem final double x = planetModel.getDocValueEncoder().decodeXValue(encoded); final double y = planetModel.getDocValueEncoder().decodeYValue(encoded); final double z = planetModel.getDocValueEncoder().decodeZValue(encoded); - - cmp = Math.max(cmp, Double.compare(bottomDistance, distanceShape.computeOutsideDistance(DistanceStyle.ARC, x, y, z))); + + cmp = + Math.max( + cmp, + Double.compare( + bottomDistance, + distanceShape.computeOutsideDistance(DistanceStyle.ARC, x, y, z))); } return cmp; } - + @Override public void copy(int slot, int doc) throws IOException { values[slot] = computeMinimumDistance(doc); } - + @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { LeafReader reader = context.reader(); @@ -111,13 +118,13 @@ class Geo3DPointOutsideDistanceComparator extends FieldComparator implem currentDocs = DocValues.getSortedNumeric(reader, field); return this; } - + @Override public Double value(int slot) { // Return the arc distance return Double.valueOf(values[slot] * planetModel.getMeanRadius()); } - + @Override public int compareTop(int doc) throws IOException { return Double.compare(topValue, computeMinimumDistance(doc)); @@ -132,10 +139,12 @@ class Geo3DPointOutsideDistanceComparator extends FieldComparator implem final int numValues = currentDocs.docValueCount(); for (int i = 0; i < numValues; i++) { final long encoded = currentDocs.nextValue(); - final double distance = distanceShape.computeOutsideDistance(DistanceStyle.ARC, - planetModel.getDocValueEncoder().decodeXValue(encoded), - planetModel.getDocValueEncoder().decodeYValue(encoded), - planetModel.getDocValueEncoder().decodeZValue(encoded)); + final double distance = + distanceShape.computeOutsideDistance( + DistanceStyle.ARC, + planetModel.getDocValueEncoder().decodeXValue(encoded), + planetModel.getDocValueEncoder().decodeYValue(encoded), + planetModel.getDocValueEncoder().decodeZValue(encoded)); minValue = Math.min(minValue, distance); } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java index e7251ff3174..c7605e34187 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java @@ -21,14 +21,13 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.spatial3d.geom.GeoOutsideDistance; import org.apache.lucene.spatial3d.geom.PlanetModel; -/** - * Sorts by outside distance from an origin location. - */ +/** Sorts by outside distance from an origin location. */ final class Geo3DPointOutsideSortField extends SortField { final GeoOutsideDistance distanceShape; - final private PlanetModel planetModel; + private final PlanetModel planetModel; - Geo3DPointOutsideSortField(final String field, final PlanetModel planetModel, final GeoOutsideDistance distanceShape) { + Geo3DPointOutsideSortField( + final String field, final PlanetModel planetModel, final GeoOutsideDistance distanceShape) { super(field, SortField.Type.CUSTOM); if (field == null) { throw new IllegalArgumentException("field must not be null"); @@ -40,7 +39,7 @@ final class Geo3DPointOutsideSortField extends SortField { this.distanceShape = distanceShape; setMissingValue(Double.POSITIVE_INFINITY); } - + @Override public FieldComparator getComparator(int numHits, int sortPos) { return new Geo3DPointOutsideDistanceComparator(getField(), planetModel, distanceShape, numHits); @@ -54,11 +53,13 @@ final class Geo3DPointOutsideSortField extends SortField { @Override public void setMissingValue(Object missingValue) { if (Double.valueOf(Double.POSITIVE_INFINITY).equals(missingValue) == false) { - throw new IllegalArgumentException("Missing value can only be Double.POSITIVE_INFINITY (missing values last), but got " + missingValue); + throw new IllegalArgumentException( + "Missing value can only be Double.POSITIVE_INFINITY (missing values last), but got " + + missingValue); } this.missingValue = missingValue; } - + @Override public int hashCode() { final int prime = 31; diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java index d15eb2579dd..0e5dba512ae 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java @@ -21,14 +21,13 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.spatial3d.geom.GeoDistanceShape; import org.apache.lucene.spatial3d.geom.PlanetModel; -/** - * Sorts by distance from an origin location. - */ +/** Sorts by distance from an origin location. */ final class Geo3DPointSortField extends SortField { final GeoDistanceShape distanceShape; final PlanetModel planetModel; - Geo3DPointSortField(final String field, final PlanetModel planetModel, final GeoDistanceShape distanceShape) { + Geo3DPointSortField( + final String field, final PlanetModel planetModel, final GeoDistanceShape distanceShape) { super(field, SortField.Type.CUSTOM); if (field == null) { throw new IllegalArgumentException("field must not be null"); @@ -40,7 +39,7 @@ final class Geo3DPointSortField extends SortField { this.planetModel = planetModel; setMissingValue(Double.POSITIVE_INFINITY); } - + @Override public FieldComparator getComparator(int numHits, int sortPos) { return new Geo3DPointDistanceComparator(getField(), planetModel, distanceShape, numHits); @@ -54,11 +53,13 @@ final class Geo3DPointSortField extends SortField { @Override public void setMissingValue(Object missingValue) { if (Double.valueOf(Double.POSITIVE_INFINITY).equals(missingValue) == false) { - throw new IllegalArgumentException("Missing value can only be Double.POSITIVE_INFINITY (missing values last), but got " + missingValue); + throw new IllegalArgumentException( + "Missing value can only be Double.POSITIVE_INFINITY (missing values last), but got " + + missingValue); } this.missingValue = missingValue; } - + @Override public int hashCode() { final int prime = 31; diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DUtil.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DUtil.java index 21e69b194d7..2bfc75a2526 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DUtil.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DUtil.java @@ -16,28 +16,26 @@ */ package org.apache.lucene.spatial3d; -import org.apache.lucene.spatial3d.geom.PlanetModel; -import org.apache.lucene.spatial3d.geom.GeoPolygonFactory; -import org.apache.lucene.spatial3d.geom.GeoPathFactory; -import org.apache.lucene.spatial3d.geom.GeoCircleFactory; -import org.apache.lucene.spatial3d.geom.GeoBBoxFactory; -import org.apache.lucene.spatial3d.geom.GeoPath; -import org.apache.lucene.spatial3d.geom.GeoPolygon; -import org.apache.lucene.spatial3d.geom.GeoCircle; -import org.apache.lucene.spatial3d.geom.GeoBBox; -import org.apache.lucene.spatial3d.geom.GeoCompositePolygon; -import org.apache.lucene.spatial3d.geom.GeoPoint; - -import org.apache.lucene.geo.Polygon; -import org.apache.lucene.geo.GeoUtils; - -import java.util.List; import java.util.ArrayList; +import java.util.List; +import org.apache.lucene.geo.GeoUtils; +import org.apache.lucene.geo.Polygon; +import org.apache.lucene.spatial3d.geom.GeoBBox; +import org.apache.lucene.spatial3d.geom.GeoBBoxFactory; +import org.apache.lucene.spatial3d.geom.GeoCircle; +import org.apache.lucene.spatial3d.geom.GeoCircleFactory; +import org.apache.lucene.spatial3d.geom.GeoCompositePolygon; +import org.apache.lucene.spatial3d.geom.GeoPath; +import org.apache.lucene.spatial3d.geom.GeoPathFactory; +import org.apache.lucene.spatial3d.geom.GeoPoint; +import org.apache.lucene.spatial3d.geom.GeoPolygon; +import org.apache.lucene.spatial3d.geom.GeoPolygonFactory; +import org.apache.lucene.spatial3d.geom.PlanetModel; class Geo3DUtil { /** How many radians are in one degree */ - final static double RADIANS_PER_DEGREE = Math.PI / 180.0; + static final double RADIANS_PER_DEGREE = Math.PI / 180.0; /** Returns smallest double that would encode to int x. */ // NOTE: remains in this class to keep method package private!! @@ -56,21 +54,22 @@ class Geo3DUtil { if (x == planetModel.MAX_ENCODED_VALUE) { return planetModel.MAX_VALUE; } - return Math.nextDown((x+1) * planetModel.DECODE); + return Math.nextDown((x + 1) * planetModel.DECODE); } - + /** Converts degress to radians */ static double fromDegrees(final double degrees) { return degrees * RADIANS_PER_DEGREE; } /** - * Convert a set of Polygon objects into a GeoPolygon. - * @param polygons are the Polygon objects. - * @return the GeoPolygon. - */ + * Convert a set of Polygon objects into a GeoPolygon. + * + * @param polygons are the Polygon objects. + * @return the GeoPolygon. + */ static GeoPolygon fromPolygon(final PlanetModel planetModel, final Polygon... polygons) { - //System.err.println("Creating polygon..."); + // System.err.println("Creating polygon..."); if (polygons.length < 1) { throw new IllegalArgumentException("need at least one polygon"); } @@ -94,12 +93,12 @@ class Geo3DUtil { shape = poly; } return shape; - //System.err.println("...done"); + // System.err.println("...done"); } - - + /** * Convert a Polygon object to a large GeoPolygon. + * * @param polygons is the list of polygons to convert. * @return the large GeoPolygon. */ @@ -107,17 +106,25 @@ class Geo3DUtil { if (polygons.length < 1) { throw new IllegalArgumentException("need at least one polygon"); } - return GeoPolygonFactory.makeLargeGeoPolygon(planetModel, convertToDescription(planetModel, polygons)); + return GeoPolygonFactory.makeLargeGeoPolygon( + planetModel, convertToDescription(planetModel, polygons)); } - + /** * Convert input parameters to a path. - * @param pathLatitudes latitude values for points of the path: must be within standard +/-90 coordinate bounds. - * @param pathLongitudes longitude values for points of the path: must be within standard +/-180 coordinate bounds. + * + * @param pathLatitudes latitude values for points of the path: must be within standard +/-90 + * coordinate bounds. + * @param pathLongitudes longitude values for points of the path: must be within standard +/-180 + * coordinate bounds. * @param pathWidthMeters width of the path in meters. * @return the path. */ - static GeoPath fromPath(final PlanetModel planetModel, final double[] pathLatitudes, final double[] pathLongitudes, final double pathWidthMeters) { + static GeoPath fromPath( + final PlanetModel planetModel, + final double[] pathLatitudes, + final double[] pathLongitudes, + final double pathWidthMeters) { if (pathLatitudes.length != pathLongitudes.length) { throw new IllegalArgumentException("same number of latitudes and longitudes required"); } @@ -132,101 +139,124 @@ class Geo3DUtil { double radiusRadians = pathWidthMeters / (planetModel.getMeanRadius() * planetModel.xyScaling); return GeoPathFactory.makeGeoPath(planetModel, radiusRadians, points); } - + /** * Convert input parameters to a circle. + * * @param latitude latitude at the center: must be within standard +/-90 coordinate bounds. * @param longitude longitude at the center: must be within standard +/-180 coordinate bounds. - * @param radiusMeters maximum distance from the center in meters: must be non-negative and finite. + * @param radiusMeters maximum distance from the center in meters: must be non-negative and + * finite. * @return the circle. */ - static GeoCircle fromDistance(final PlanetModel planetModel, final double latitude, final double longitude, final double radiusMeters) { + static GeoCircle fromDistance( + final PlanetModel planetModel, + final double latitude, + final double longitude, + final double radiusMeters) { GeoUtils.checkLatitude(latitude); GeoUtils.checkLongitude(longitude); double radiusRadians = radiusMeters / (planetModel.getMeanRadius()); - return GeoCircleFactory.makeGeoCircle(planetModel, fromDegrees(latitude), fromDegrees(longitude), radiusRadians); + return GeoCircleFactory.makeGeoCircle( + planetModel, fromDegrees(latitude), fromDegrees(longitude), radiusRadians); } - + /** * Convert input parameters to a box. + * * @param minLatitude latitude lower bound: must be within standard +/-90 coordinate bounds. * @param maxLatitude latitude upper bound: must be within standard +/-90 coordinate bounds. * @param minLongitude longitude lower bound: must be within standard +/-180 coordinate bounds. * @param maxLongitude longitude upper bound: must be within standard +/-180 coordinate bounds. * @return the box. */ - static GeoBBox fromBox(final PlanetModel planetModel, final double minLatitude, final double maxLatitude, final double minLongitude, final double maxLongitude) { + static GeoBBox fromBox( + final PlanetModel planetModel, + final double minLatitude, + final double maxLatitude, + final double minLongitude, + final double maxLongitude) { GeoUtils.checkLatitude(minLatitude); GeoUtils.checkLongitude(minLongitude); GeoUtils.checkLatitude(maxLatitude); GeoUtils.checkLongitude(maxLongitude); - return GeoBBoxFactory.makeGeoBBox(planetModel, - Geo3DUtil.fromDegrees(maxLatitude), Geo3DUtil.fromDegrees(minLatitude), Geo3DUtil.fromDegrees(minLongitude), Geo3DUtil.fromDegrees(maxLongitude)); + return GeoBBoxFactory.makeGeoBBox( + planetModel, + Geo3DUtil.fromDegrees(maxLatitude), + Geo3DUtil.fromDegrees(minLatitude), + Geo3DUtil.fromDegrees(minLongitude), + Geo3DUtil.fromDegrees(maxLongitude)); } /** - * Convert a Polygon object into a GeoPolygon. - * This method uses - * @param polygon is the Polygon object. - * @return the GeoPolygon. - */ + * Convert a Polygon object into a GeoPolygon. This method uses + * + * @param polygon is the Polygon object. + * @return the GeoPolygon. + */ private static GeoPolygon fromPolygon(final PlanetModel planetModel, final Polygon polygon) { - // First, assemble the "holes". The geo3d convention is to use the same polygon sense on the inner ring as the - // outer ring, so we process these recursively with reverseMe flipped. + // First, assemble the "holes". The geo3d convention is to use the same polygon sense on the + // inner ring as the outer ring, so we process these recursively with reverseMe flipped. final Polygon[] theHoles = polygon.getHoles(); final List holeList = new ArrayList<>(theHoles.length); for (final Polygon hole : theHoles) { - //System.out.println("Hole: "+hole); + // System.out.println("Hole: " + hole); final GeoPolygon component = fromPolygon(planetModel, hole); if (component != null) { holeList.add(component); } } - + // Now do the polygon itself final double[] polyLats = polygon.getPolyLats(); final double[] polyLons = polygon.getPolyLons(); - + // I presume the arguments have already been checked - final List points = new ArrayList<>(polyLats.length-1); - // We skip the last point anyway because the API requires it to be repeated, and geo3d doesn't repeat it. + final List points = new ArrayList<>(polyLats.length - 1); + // We skip the last point anyway because the API requires it to be repeated, and geo3d doesn't + // repeat it. for (int i = 0; i < polyLats.length - 1; i++) { final int index = polyLats.length - 2 - i; - points.add(new GeoPoint(planetModel, fromDegrees(polyLats[index]), fromDegrees(polyLons[index]))); + points.add( + new GeoPoint(planetModel, fromDegrees(polyLats[index]), fromDegrees(polyLons[index]))); } - //System.err.println(" building polygon with "+points.size()+" points..."); + // System.err.println(" building polygon with "+points.size()+" points..."); final GeoPolygon rval = GeoPolygonFactory.makeGeoPolygon(planetModel, points, holeList); - //System.err.println(" ...done"); + // System.err.println(" ...done"); return rval; } /** * Convert a list of polygons to a list of polygon descriptions. + * * @param polygons is the list of polygons to convert. * @return the list of polygon descriptions. */ - private static List convertToDescription(final PlanetModel planetModel, final Polygon... polygons) { - final List descriptions = new ArrayList<>(polygons.length); + private static List convertToDescription( + final PlanetModel planetModel, final Polygon... polygons) { + final List descriptions = + new ArrayList<>(polygons.length); for (final Polygon polygon : polygons) { final Polygon[] theHoles = polygon.getHoles(); - final List holes = convertToDescription(planetModel, theHoles); - + final List holes = + convertToDescription(planetModel, theHoles); + // Now do the polygon itself final double[] polyLats = polygon.getPolyLats(); final double[] polyLons = polygon.getPolyLons(); - + // I presume the arguments have already been checked - final List points = new ArrayList<>(polyLats.length-1); - // We skip the last point anyway because the API requires it to be repeated, and geo3d doesn't repeat it. + final List points = new ArrayList<>(polyLats.length - 1); + // We skip the last point anyway because the API requires it to be repeated, and geo3d doesn't + // repeat it. for (int i = 0; i < polyLats.length - 1; i++) { final int index = polyLats.length - 2 - i; - points.add(new GeoPoint(planetModel, fromDegrees(polyLats[index]), fromDegrees(polyLons[index]))); + points.add( + new GeoPoint(planetModel, fromDegrees(polyLats[index]), fromDegrees(polyLons[index]))); } - + descriptions.add(new GeoPolygonFactory.PolygonDescription(points, holes)); } return descriptions; } - - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/PointInGeo3DShapeQuery.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/PointInGeo3DShapeQuery.java index 09450fcf589..283cb514410 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/PointInGeo3DShapeQuery.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/PointInGeo3DShapeQuery.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial3d; import java.io.IOException; - import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PointValues; @@ -35,14 +34,16 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.DocIdSetBuilder; import org.apache.lucene.util.RamUsageEstimator; -/** Finds all previously indexed points that fall within the specified polygon. +/** + * Finds all previously indexed points that fall within the specified polygon. * *

    The field must be indexed using {@link Geo3DPoint}. * - * @lucene.experimental */ - + * @lucene.experimental + */ final class PointInGeo3DShapeQuery extends Query implements Accountable { - private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(PointInGeo3DShapeQuery.class); + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(PointInGeo3DShapeQuery.class); final String field; final GeoShape shape; @@ -64,10 +65,11 @@ final class PointInGeo3DShapeQuery extends Query implements Accountable { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { - // I don't use RandomAccessWeight here: it's no good to approximate with "match all docs"; this is an inverted structure and should be - // used in the first pass: + // I don't use RandomAccessWeight here: it's no good to approximate with "match all docs"; this + // is an inverted structure and should be used in the first pass: return new ConstantScoreWeight(this, boost) { @@ -112,7 +114,6 @@ final class PointInGeo3DShapeQuery extends Query implements Accountable { public boolean isCacheable(LeafReaderContext ctx) { return true; } - }; } @@ -126,13 +127,11 @@ final class PointInGeo3DShapeQuery extends Query implements Accountable { @Override public boolean equals(Object other) { - return sameClassAs(other) && - equalsTo(getClass().cast(other)); + return sameClassAs(other) && equalsTo(getClass().cast(other)); } - + private boolean equalsTo(PointInGeo3DShapeQuery other) { - return field.equals(other.field) && - shape.equals(other.shape); + return field.equals(other.field) && shape.equals(other.shape); } @Override @@ -160,9 +159,9 @@ final class PointInGeo3DShapeQuery extends Query implements Accountable { @Override public long ramBytesUsed() { - return BASE_RAM_BYTES + - RamUsageEstimator.sizeOfObject(field) + - RamUsageEstimator.sizeOfObject(shape) + - RamUsageEstimator.sizeOfObject(shapeBounds); + return BASE_RAM_BYTES + + RamUsageEstimator.sizeOfObject(field) + + RamUsageEstimator.sizeOfObject(shape) + + RamUsageEstimator.sizeOfObject(shapeBounds); } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/PointInShapeIntersectVisitor.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/PointInShapeIntersectVisitor.java index 1aa0ec2ccc4..f33ee4fabed 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/PointInShapeIntersectVisitor.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/PointInShapeIntersectVisitor.java @@ -37,10 +37,8 @@ class PointInShapeIntersectVisitor implements IntersectVisitor { private final double minimumZ; private final double maximumZ; private DocIdSetBuilder.BulkAdder adder; - - public PointInShapeIntersectVisitor(DocIdSetBuilder hits, - GeoShape shape, - XYZBounds bounds) { + + public PointInShapeIntersectVisitor(DocIdSetBuilder hits, GeoShape shape, XYZBounds bounds) { this.hits = hits; this.shape = shape; DocValueEncoder docValueEncoder = shape.getPlanetModel().getDocValueEncoder(); @@ -68,64 +66,88 @@ class PointInShapeIntersectVisitor implements IntersectVisitor { double x = Geo3DPoint.decodeDimension(packedValue, 0, shape.getPlanetModel()); double y = Geo3DPoint.decodeDimension(packedValue, Integer.BYTES, shape.getPlanetModel()); double z = Geo3DPoint.decodeDimension(packedValue, 2 * Integer.BYTES, shape.getPlanetModel()); - if (x >= minimumX && x <= maximumX && - y >= minimumY && y <= maximumY && - z >= minimumZ && z <= maximumZ) { + if (x >= minimumX + && x <= maximumX + && y >= minimumY + && y <= maximumY + && z >= minimumZ + && z <= maximumZ) { if (shape.isWithin(x, y, z)) { adder.add(docID); } } } - + @Override public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { - // Because the dimensional format operates in quantized (64 bit -> 32 bit) space, and the cell bounds - // here are inclusive, we need to extend the bounds to the largest un-quantized values that - // could quantize into these bounds. The encoding (Geo3DUtil.encodeValue) does + // Because the dimensional format operates in quantized (64 bit -> 32 bit) space, and the cell + // bounds here are inclusive, we need to extend the bounds to the largest un-quantized values + // that could quantize into these bounds. The encoding (Geo3DUtil.encodeValue) does // a Math.round from double to long, so e.g. 1.4 -> 1, and -1.4 -> -1: - double xMin = Geo3DUtil.decodeValueFloor(NumericUtils.sortableBytesToInt(minPackedValue, 0), shape.getPlanetModel()); - double xMax = Geo3DUtil.decodeValueCeil(NumericUtils.sortableBytesToInt(maxPackedValue, 0), shape.getPlanetModel()); - double yMin = Geo3DUtil.decodeValueFloor(NumericUtils.sortableBytesToInt(minPackedValue, 1 * Integer.BYTES), shape.getPlanetModel()); - double yMax = Geo3DUtil.decodeValueCeil(NumericUtils.sortableBytesToInt(maxPackedValue, 1 * Integer.BYTES), shape.getPlanetModel()); - double zMin = Geo3DUtil.decodeValueFloor(NumericUtils.sortableBytesToInt(minPackedValue, 2 * Integer.BYTES), shape.getPlanetModel()); - double zMax = Geo3DUtil.decodeValueCeil(NumericUtils.sortableBytesToInt(maxPackedValue, 2 * Integer.BYTES), shape.getPlanetModel()); + double xMin = + Geo3DUtil.decodeValueFloor( + NumericUtils.sortableBytesToInt(minPackedValue, 0), shape.getPlanetModel()); + double xMax = + Geo3DUtil.decodeValueCeil( + NumericUtils.sortableBytesToInt(maxPackedValue, 0), shape.getPlanetModel()); + double yMin = + Geo3DUtil.decodeValueFloor( + NumericUtils.sortableBytesToInt(minPackedValue, 1 * Integer.BYTES), + shape.getPlanetModel()); + double yMax = + Geo3DUtil.decodeValueCeil( + NumericUtils.sortableBytesToInt(maxPackedValue, 1 * Integer.BYTES), + shape.getPlanetModel()); + double zMin = + Geo3DUtil.decodeValueFloor( + NumericUtils.sortableBytesToInt(minPackedValue, 2 * Integer.BYTES), + shape.getPlanetModel()); + double zMax = + Geo3DUtil.decodeValueCeil( + NumericUtils.sortableBytesToInt(maxPackedValue, 2 * Integer.BYTES), + shape.getPlanetModel()); - //System.out.println(" compare: x=" + cellXMin + "-" + cellXMax + " y=" + cellYMin + "-" + cellYMax + " z=" + cellZMin + "-" + cellZMax); + // System.out.println(" compare: x=" + cellXMin + "-" + cellXMax + " y=" + cellYMin + "-" + + // cellYMax + " z=" + cellZMin + "-" + cellZMax); assert xMin <= xMax; assert yMin <= yMax; assert zMin <= zMax; // First, check bounds. If the shape is entirely contained, return CELL_CROSSES_QUERY. - if (minimumX >= xMin && maximumX <= xMax && - minimumY >= yMin && maximumY <= yMax && - minimumZ >= zMin && maximumZ <= zMax) { + if (minimumX >= xMin + && maximumX <= xMax + && minimumY >= yMin + && maximumY <= yMax + && minimumZ >= zMin + && maximumZ <= zMax) { return Relation.CELL_CROSSES_QUERY; } // Quick test failed so do slower one... - GeoArea xyzSolid = GeoAreaFactory.makeGeoArea(shape.getPlanetModel(), xMin, xMax, yMin, yMax, zMin, zMax); + GeoArea xyzSolid = + GeoAreaFactory.makeGeoArea(shape.getPlanetModel(), xMin, xMax, yMin, yMax, zMin, zMax); - switch(xyzSolid.getRelationship(shape)) { - case GeoArea.CONTAINS: - // Shape fully contains the cell - //System.out.println(" inside"); - return Relation.CELL_INSIDE_QUERY; - case GeoArea.OVERLAPS: - // They do overlap but neither contains the other: - //System.out.println(" crosses1"); - return Relation.CELL_CROSSES_QUERY; - case GeoArea.WITHIN: - // Cell fully contains the shape: - //System.out.println(" crosses2"); - // return Relation.SHAPE_INSIDE_CELL; - return Relation.CELL_CROSSES_QUERY; - case GeoArea.DISJOINT: - // They do not overlap at all - //System.out.println(" outside"); - return Relation.CELL_OUTSIDE_QUERY; - default: - assert false; - return Relation.CELL_CROSSES_QUERY; + switch (xyzSolid.getRelationship(shape)) { + case GeoArea.CONTAINS: + // Shape fully contains the cell + // System.out.println(" inside"); + return Relation.CELL_INSIDE_QUERY; + case GeoArea.OVERLAPS: + // They do overlap but neither contains the other: + // System.out.println(" crosses1"); + return Relation.CELL_CROSSES_QUERY; + case GeoArea.WITHIN: + // Cell fully contains the shape: + // System.out.println(" crosses2"); + // return Relation.SHAPE_INSIDE_CELL; + return Relation.CELL_CROSSES_QUERY; + case GeoArea.DISJOINT: + // They do not overlap at all + // System.out.println(" outside"); + return Relation.CELL_OUTSIDE_QUERY; + default: + assert false; + return Relation.CELL_CROSSES_QUERY; } } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/ArcDistance.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/ArcDistance.java index 25b9c7d1376..2e6648dea3f 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/ArcDistance.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/ArcDistance.java @@ -22,50 +22,61 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class ArcDistance implements DistanceStyle { - + /** An instance of the ArcDistance DistanceStyle. */ - public final static ArcDistance INSTANCE = new ArcDistance(); - - /** Constructor. - */ - public ArcDistance() { - } - + public static final ArcDistance INSTANCE = new ArcDistance(); + + /** Constructor. */ + public ArcDistance() {} + @Override public double computeDistance(final GeoPoint point1, final GeoPoint point2) { return point1.arcDistance(point2); } - + @Override - public double computeDistance(final GeoPoint point1, final double x2, final double y2, final double z2) { - return point1.arcDistance(x2,y2,z2); + public double computeDistance( + final GeoPoint point1, final double x2, final double y2, final double z2) { + return point1.arcDistance(x2, y2, z2); } @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final GeoPoint point, final Membership... bounds) { + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final GeoPoint point, + final Membership... bounds) { return plane.arcDistance(planetModel, point, bounds); } - + @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final double x, final double y, final double z, final Membership... bounds) { - return plane.arcDistance(planetModel, x,y,z, bounds); + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final double x, + final double y, + final double z, + final Membership... bounds) { + return plane.arcDistance(planetModel, x, y, z, bounds); } @Override - public GeoPoint[] findDistancePoints(final PlanetModel planetModel, final double distanceValue, final GeoPoint startPoint, final Plane plane, final Membership... bounds) { + public GeoPoint[] findDistancePoints( + final PlanetModel planetModel, + final double distanceValue, + final GeoPoint startPoint, + final Plane plane, + final Membership... bounds) { return plane.findArcDistancePoints(planetModel, distanceValue, startPoint, bounds); } - + @Override public double findMinimumArcDistance(final PlanetModel planetModel, final double distanceValue) { return distanceValue; } - + @Override public double findMaximumArcDistance(final PlanetModel planetModel, final double distanceValue) { return distanceValue; } - } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/BasePlanetObject.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/BasePlanetObject.java index bb2cc31b607..8e51b68d6ec 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/BasePlanetObject.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/BasePlanetObject.java @@ -16,22 +16,22 @@ */ package org.apache.lucene.spatial3d.geom; -import java.io.OutputStream; import java.io.IOException; +import java.io.OutputStream; /** - * All Geo3D shapes can derive from this base class, which furnishes - * some common code + * All Geo3D shapes can derive from this base class, which furnishes some common code * * @lucene.internal */ public abstract class BasePlanetObject implements PlanetObject { - /** This is the planet model embedded in all objects derived from this - * class. */ + /** This is the planet model embedded in all objects derived from this class. */ protected final PlanetModel planetModel; - - /** Constructor creating class instance given a planet model. + + /** + * Constructor creating class instance given a planet model. + * * @param planetModel is the planet model. */ public BasePlanetObject(final PlanetModel planetModel) { @@ -42,26 +42,22 @@ public abstract class BasePlanetObject implements PlanetObject { public PlanetModel getPlanetModel() { return planetModel; } - + @Override public void write(final OutputStream outputStream) throws IOException { throw new UnsupportedOperationException(); } - + @Override public int hashCode() { return planetModel.hashCode(); } - + @Override public boolean equals(final Object o) { - if (!(o instanceof BasePlanetObject)) + if (!(o instanceof BasePlanetObject)) { return false; - return planetModel.equals(((BasePlanetObject)o).planetModel); + } + return planetModel.equals(((BasePlanetObject) o).planetModel); } - - } - - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/BaseXYZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/BaseXYZSolid.java index 25aa130b5d9..ba92e358114 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/BaseXYZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/BaseXYZSolid.java @@ -29,7 +29,7 @@ abstract class BaseXYZSolid extends BasePlanetObject implements XYZSolid { protected static final Vector yUnitVector = new Vector(0.0, 1.0, 0.0); /** Unit vector in z */ protected static final Vector zUnitVector = new Vector(0.0, 0.0, 1.0); - + /** Vertical plane normal to x unit vector passing through origin */ protected static final Plane xVerticalPlane = new Plane(0.0, 1.0, 0.0, 0.0); /** Vertical plane normal to y unit vector passing through origin */ @@ -37,16 +37,19 @@ abstract class BaseXYZSolid extends BasePlanetObject implements XYZSolid { /** Empty point vector */ protected static final GeoPoint[] EMPTY_POINTS = new GeoPoint[0]; - + /** * Base solid constructor. - *@param planetModel is the planet model. + * + * @param planetModel is the planet model. */ public BaseXYZSolid(final PlanetModel planetModel) { super(planetModel); } - - /** Construct a single array from a number of individual arrays. + + /** + * Construct a single array from a number of individual arrays. + * * @param pointArrays is the array of point arrays. * @return the single unified array. */ @@ -64,35 +67,37 @@ abstract class BaseXYZSolid extends BasePlanetObject implements XYZSolid { } return rval; } - + @Override public boolean isWithin(final Vector point) { return isWithin(point.x, point.y, point.z); } - + @Override public abstract boolean isWithin(final double x, final double y, final double z); - - // Signals for relationship of edge points to shape - - /** All edgepoints inside shape */ - protected final static int ALL_INSIDE = 0; - /** Some edgepoints inside shape */ - protected final static int SOME_INSIDE = 1; - /** No edgepoints inside shape */ - protected final static int NONE_INSIDE = 2; - /** No edgepoints at all (means a shape that is the whole world) */ - protected final static int NO_EDGEPOINTS = 3; - /** Determine the relationship between this area and the provided - * shape's edgepoints. - *@param path is the shape. - *@return the relationship. + // Signals for relationship of edge points to shape + + /** All edgepoints inside shape */ + protected static final int ALL_INSIDE = 0; + /** Some edgepoints inside shape */ + protected static final int SOME_INSIDE = 1; + /** No edgepoints inside shape */ + protected static final int NONE_INSIDE = 2; + /** No edgepoints at all (means a shape that is the whole world) */ + protected static final int NO_EDGEPOINTS = 3; + + /** + * Determine the relationship between this area and the provided shape's edgepoints. + * + * @param path is the shape. + * @return the relationship. */ protected int isShapeInsideArea(final GeoShape path) { final GeoPoint[] pathPoints = path.getEdgePoints(); - if (pathPoints.length == 0) + if (pathPoints.length == 0) { return NO_EDGEPOINTS; + } boolean foundOutside = false; boolean foundInside = false; for (final GeoPoint p : pathPoints) { @@ -105,19 +110,23 @@ abstract class BaseXYZSolid extends BasePlanetObject implements XYZSolid { return SOME_INSIDE; } } - if (!foundInside && !foundOutside) + if (!foundInside && !foundOutside) { return NONE_INSIDE; - if (foundInside && !foundOutside) + } + if (foundInside && !foundOutside) { return ALL_INSIDE; - if (foundOutside && !foundInside) + } + if (foundOutside && !foundInside) { return NONE_INSIDE; + } return SOME_INSIDE; } - /** Determine the relationship between a shape and this area's - * edgepoints. - *@param path is the shape. - *@return the relationship. + /** + * Determine the relationship between a shape and this area's edgepoints. + * + * @param path is the shape. + * @return the relationship. */ protected int isAreaInsideShape(final GeoShape path) { final GeoPoint[] edgePoints = getEdgePoints(); @@ -136,24 +145,30 @@ abstract class BaseXYZSolid extends BasePlanetObject implements XYZSolid { return SOME_INSIDE; } } - if (!foundInside && !foundOutside) + if (!foundInside && !foundOutside) { return NONE_INSIDE; - if (foundInside && !foundOutside) + } + if (foundInside && !foundOutside) { return ALL_INSIDE; - if (foundOutside && !foundInside) + } + if (foundOutside && !foundInside) { return NONE_INSIDE; + } return SOME_INSIDE; } - /** Get the edge points for this shape. - *@return the edge points. + /** + * Get the edge points for this shape. + * + * @return the edge points. */ protected abstract GeoPoint[] getEdgePoints(); - + @Override public boolean equals(Object o) { - if (!(o instanceof BaseXYZSolid)) + if (!(o instanceof BaseXYZSolid)) { return false; + } BaseXYZSolid other = (BaseXYZSolid) o; return super.equals(other); } @@ -162,6 +177,4 @@ abstract class BaseXYZSolid extends BasePlanetObject implements XYZSolid { public int hashCode() { return super.hashCode(); } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Bounded.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Bounded.java index 59dcc6a9a0c..f5539a042d6 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Bounded.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Bounded.java @@ -17,8 +17,7 @@ package org.apache.lucene.spatial3d.geom; /** - * This interface describes methods that determine what the bounds are - * for a shape. + * This interface describes methods that determine what the bounds are for a shape. * * @lucene.experimental */ @@ -27,9 +26,7 @@ public interface Bounded { /** * Compute bounds for the shape. * - * @param bounds is the input bounds object. - * The input object will be modified. + * @param bounds is the input bounds object. The input object will be modified. */ public void getBounds(final Bounds bounds); - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Bounds.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Bounds.java index 97a5d13a232..40aa34f3999 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Bounds.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Bounds.java @@ -17,115 +17,144 @@ package org.apache.lucene.spatial3d.geom; /** - * An interface for accumulating bounds information. - * The bounds object is initially empty. Bounding points - * are then applied by supplying (x,y,z) tuples. It is also - * possible to indicate the following edge cases: - * (1) No longitude bound possible - * (2) No upper latitude bound possible - * (3) No lower latitude bound possible - * When any of these have been applied, further application of - * points cannot override that decision. + * An interface for accumulating bounds information. The bounds object is initially empty. Bounding + * points are then applied by supplying (x,y,z) tuples. It is also possible to indicate the + * following edge cases: + * + *

      + *
    1. No longitude bound possible. + *
    2. No upper latitude bound possible. + *
    3. No lower latitude bound possible When any of these have been applied, further application + * of points cannot override that decision. + *
    * * @lucene.experimental */ public interface Bounds { - /** Add a general plane to the bounds description. - *@param planetModel is the planet model. - *@param plane is the plane. - *@param bounds are the membership bounds for points along the arc. + /** + * Add a general plane to the bounds description. + * + * @param planetModel is the planet model. + * @param plane is the plane. + * @param bounds are the membership bounds for points along the arc. */ - public Bounds addPlane(final PlanetModel planetModel, final Plane plane, final Membership... bounds); - - /** Add a horizontal plane to the bounds description. - * This method should EITHER use the supplied latitude, OR use the supplied - * plane, depending on what is most efficient. - *@param planetModel is the planet model. - *@param latitude is the latitude. - *@param horizontalPlane is the plane. - *@param bounds are the constraints on the plane. - *@return updated Bounds object. - */ - public Bounds addHorizontalPlane(final PlanetModel planetModel, - final double latitude, - final Plane horizontalPlane, - final Membership... bounds); - - /** Add a vertical plane to the bounds description. - * This method should EITHER use the supplied longitude, OR use the supplied - * plane, depending on what is most efficient. - *@param planetModel is the planet model. - *@param longitude is the longitude. - *@param verticalPlane is the plane. - *@param bounds are the constraints on the plane. - *@return updated Bounds object. - */ - public Bounds addVerticalPlane(final PlanetModel planetModel, - final double longitude, - final Plane verticalPlane, - final Membership... bounds); + public Bounds addPlane( + final PlanetModel planetModel, final Plane plane, final Membership... bounds); - /** Add the intersection between two planes to the bounds description. - * Where the shape has intersecting planes, it is better to use this method - * than just adding the point, since this method takes each plane's error envelope into - * account. - *@param planetModel is the planet model. - *@param plane1 is the first plane. - *@param plane2 is the second plane. - *@param bounds are the membership bounds for the intersection. + /** + * Add a horizontal plane to the bounds description. This method should EITHER use the supplied + * latitude, OR use the supplied plane, depending on what is most efficient. + * + * @param planetModel is the planet model. + * @param latitude is the latitude. + * @param horizontalPlane is the plane. + * @param bounds are the constraints on the plane. + * @return updated Bounds object. */ - public Bounds addIntersection(final PlanetModel planetModel, final Plane plane1, final Plane plane2, final Membership... bounds); + public Bounds addHorizontalPlane( + final PlanetModel planetModel, + final double latitude, + final Plane horizontalPlane, + final Membership... bounds); - /** Add a single point. - *@param point is the point. - *@return the updated Bounds object. + /** + * Add a vertical plane to the bounds description. This method should EITHER use the supplied + * longitude, OR use the supplied plane, depending on what is most efficient. + * + * @param planetModel is the planet model. + * @param longitude is the longitude. + * @param verticalPlane is the plane. + * @param bounds are the constraints on the plane. + * @return updated Bounds object. + */ + public Bounds addVerticalPlane( + final PlanetModel planetModel, + final double longitude, + final Plane verticalPlane, + final Membership... bounds); + + /** + * Add the intersection between two planes to the bounds description. Where the shape has + * intersecting planes, it is better to use this method than just adding the point, since this + * method takes each plane's error envelope into account. + * + * @param planetModel is the planet model. + * @param plane1 is the first plane. + * @param plane2 is the second plane. + * @param bounds are the membership bounds for the intersection. + */ + public Bounds addIntersection( + final PlanetModel planetModel, + final Plane plane1, + final Plane plane2, + final Membership... bounds); + + /** + * Add a single point. + * + * @param point is the point. + * @return the updated Bounds object. */ public Bounds addPoint(final GeoPoint point); - /** Add an X value. - *@param point is the point to take the x value from. - *@return the updated object. + /** + * Add an X value. + * + * @param point is the point to take the x value from. + * @return the updated object. */ public Bounds addXValue(final GeoPoint point); - /** Add a Y value. - *@param point is the point to take the y value from. - *@return the updated object. + /** + * Add a Y value. + * + * @param point is the point to take the y value from. + * @return the updated object. */ public Bounds addYValue(final GeoPoint point); - /** Add a Z value. - *@param point is the point to take the z value from. - *@return the updated object. + /** + * Add a Z value. + * + * @param point is the point to take the z value from. + * @return the updated object. */ public Bounds addZValue(final GeoPoint point); - - /** Signal that the shape exceeds Math.PI in longitude. - *@return the updated Bounds object. + + /** + * Signal that the shape exceeds Math.PI in longitude. + * + * @return the updated Bounds object. */ public Bounds isWide(); - - /** Signal that there is no longitude bound. - *@return the updated Bounds object. + + /** + * Signal that there is no longitude bound. + * + * @return the updated Bounds object. */ public Bounds noLongitudeBound(); - /** Signal that there is no top latitude bound. - *@return the updated Bounds object. + /** + * Signal that there is no top latitude bound. + * + * @return the updated Bounds object. */ public Bounds noTopLatitudeBound(); - /** Signal that there is no bottom latitude bound. - *@return the updated Bounds object. + /** + * Signal that there is no bottom latitude bound. + * + * @return the updated Bounds object. */ public Bounds noBottomLatitudeBound(); - - /** Signal that there is no bound whatsoever. - * The bound is limited only by the constraints of the + + /** + * Signal that there is no bound whatsoever. The bound is limited only by the constraints of the * planet. - *@return the updated Bounds object., + * + * @return the updated Bounds object., */ public Bounds noBound(final PlanetModel planetModel); - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/DistanceStyle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/DistanceStyle.java index 44919e33b2c..b90ff18e46d 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/DistanceStyle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/DistanceStyle.java @@ -17,8 +17,7 @@ package org.apache.lucene.spatial3d.geom; /** - * Distance computation styles, supporting various ways of computing - * distance to shapes. + * Distance computation styles, supporting various ways of computing distance to shapes. * * @lucene.experimental */ @@ -37,7 +36,9 @@ public interface DistanceStyle { /** Normal distance squared calculator */ public static final NormalSquaredDistance NORMAL_SQUARED = NormalSquaredDistance.INSTANCE; - /** Compute the distance from a point to another point. + /** + * Compute the distance from a point to another point. + * * @param point1 Starting point * @param point2 Final point * @return the distance @@ -45,29 +46,39 @@ public interface DistanceStyle { public default double computeDistance(final GeoPoint point1, final GeoPoint point2) { return computeDistance(point1, point2.x, point2.y, point2.z); } - - /** Compute the distance from a point to another point. + + /** + * Compute the distance from a point to another point. + * * @param point1 Starting point * @param x2 Final point x * @param y2 Final point y * @param z2 Final point z * @return the distance */ - public double computeDistance(final GeoPoint point1, final double x2, final double y2, final double z2); + public double computeDistance( + final GeoPoint point1, final double x2, final double y2, final double z2); - /** Compute the distance from a plane to a point. + /** + * Compute the distance from a plane to a point. + * * @param planetModel The planet model * @param plane The plane * @param point The point * @param bounds are the plane bounds * @return the distance */ - public default double computeDistance(final PlanetModel planetModel, final Plane plane, final GeoPoint point, - final Membership... bounds) { + public default double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final GeoPoint point, + final Membership... bounds) { return computeDistance(planetModel, plane, point.x, point.y, point.z, bounds); } - - /** Compute the distance from a plane to a point. + + /** + * Compute the distance from a plane to a point. + * * @param planetModel The planet model * @param plane The plane * @param x The point x @@ -76,23 +87,33 @@ public interface DistanceStyle { * @param bounds are the plane bounds * @return the distance */ - public double computeDistance(final PlanetModel planetModel, final Plane plane, final double x, final double y, final double z, final Membership... bounds); + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final double x, + final double y, + final double z, + final Membership... bounds); - /** Convert a distance to a form meant for aggregation. - * This is meant to be used in conjunction with aggregateDistances() and fromAggregationForm(). - * Distances should be converted to aggregation form before aggregation is attempted, - * and they should be converted back from aggregation form to yield a final result. + /** + * Convert a distance to a form meant for aggregation. This is meant to be used in conjunction + * with aggregateDistances() and fromAggregationForm(). Distances should be converted to + * aggregation form before aggregation is attempted, and they should be converted back from + * aggregation form to yield a final result. + * * @param distance is an output of computeDistance(). * @return the distance, converted to aggregation form. */ public default double toAggregationForm(final double distance) { return distance; } - - /** Aggregate two distances together to produce a "sum". - * This is usually just an addition operation, but in the case of squared distances it is more complex. - * Distances should be converted to aggregation form before aggregation is attempted, - * and they should be converted back from aggregation form to yield a final result. + + /** + * Aggregate two distances together to produce a "sum". This is usually just an addition + * operation, but in the case of squared distances it is more complex. Distances should be + * converted to aggregation form before aggregation is attempted, and they should be converted + * back from aggregation form to yield a final result. + * * @param distance1 is the first aggregation form distance. * @param distance2 is the second aggregation form distance. * @return the combined aggregation form distance. @@ -100,46 +121,57 @@ public interface DistanceStyle { public default double aggregateDistances(final double distance1, final double distance2) { return distance1 + distance2; } - - /** Convert an aggregation form distance value back to an actual distance. - * This is meant to be used in conjunctiion with toAggregationForm() and aggregateDistances(). - * Distances should be converted to aggregation form before aggregation is attempted, - * and they should be converted back from aggregation form to yield a final result. + + /** + * Convert an aggregation form distance value back to an actual distance. This is meant to be used + * in conjunction with toAggregationForm() and aggregateDistances(). Distances should be converted + * to aggregation form before aggregation is attempted, and they should be converted back from + * aggregation form to yield a final result. + * * @param aggregateDistance is the aggregate form of the distance. * @return the combined distance. */ public default double fromAggregationForm(final double aggregateDistance) { return aggregateDistance; } - + // The following methods are used to go from a distance value back to something // that can be used to construct a constrained shape. - - /** Find a GeoPoint, at a specified distance from a starting point, within the - * specified bounds. The GeoPoint must be in the specified plane. + + /** + * Find a GeoPoint, at a specified distance from a starting point, within the specified bounds. + * The GeoPoint must be in the specified plane. + * * @param planetModel is the planet model. - * @param distanceValue is the distance to set the new point at, measured from point1 and on the way to point2. + * @param distanceValue is the distance to set the new point at, measured from point1 and on the + * way to point2. * @param startPoint is the starting point. * @param plane is the plane that the point must be in. * @param bounds are the constraints on where the point can be found. * @return zero, one, or two points at the proper distance from startPoint. */ - public GeoPoint[] findDistancePoints(final PlanetModel planetModel, final double distanceValue, final GeoPoint startPoint, final Plane plane, final Membership... bounds); - - /** Given a distance metric, find the minimum arc distance represented by that distance metric. + public GeoPoint[] findDistancePoints( + final PlanetModel planetModel, + final double distanceValue, + final GeoPoint startPoint, + final Plane plane, + final Membership... bounds); + + /** + * Given a distance metric, find the minimum arc distance represented by that distance metric. + * * @param planetModel is the planet model. * @param distanceValue is the distance metric. * @return the minimum arc distance that that distance value can represent given the planet model. */ public double findMinimumArcDistance(final PlanetModel planetModel, final double distanceValue); - - /** Given a distance metric, find the maximum arc distance represented by the distance metric. + + /** + * Given a distance metric, find the maximum arc distance represented by the distance metric. + * * @param planetModel is the planet model. * @param distanceValue is the distance metric. * @return the maximum arc distance that that distance value can represent given the planet model. */ public double findMaximumArcDistance(final PlanetModel planetModel, final double distanceValue); - } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoArea.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoArea.java index 5a6db0da676..fe9954e7fb8 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoArea.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoArea.java @@ -17,10 +17,9 @@ package org.apache.lucene.spatial3d.geom; /** - * A GeoArea represents a standard 2-D breakdown of a part of sphere. It can - * be bounded in latitude, or bounded in both latitude and longitude, or not - * bounded at all. The purpose of the interface is to describe bounding shapes used for - * computation of geo hashes. + * A GeoArea represents a standard 2-D breakdown of a part of sphere. It can be bounded in latitude, + * or bounded in both latitude and longitude, or not bounded at all. The purpose of the interface is + * to describe bounding shapes used for computation of geo hashes. * * @lucene.experimental */ @@ -31,7 +30,7 @@ public interface GeoArea extends Membership { // the underlying GeoShape class. // Relationship values for "getRelationship()" - + /** The referenced shape CONTAINS this area */ public static final int CONTAINS = 0; /** The referenced shape IS WITHIN this area */ @@ -42,26 +41,23 @@ public interface GeoArea extends Membership { public static final int DISJOINT = 3; /** - * Find the spatial relationship between a shape and the current geo area. - * Note: return value is how the GeoShape relates to the GeoArea, not the - * other way around. For example, if this GeoArea is entirely within the - * shape, then CONTAINS should be returned. If the shape is entirely enclosed - * by this GeoArea, then WITHIN should be returned. + * Find the spatial relationship between a shape and the current geo area. Note: return value is + * how the GeoShape relates to the GeoArea, not the other way around. For example, if this GeoArea + * is entirely within the shape, then CONTAINS should be returned. If the shape is entirely + * enclosed by this GeoArea, then WITHIN should be returned. * - * It is permissible to return OVERLAPS instead of WITHIN if the shape - * intersects with the area at even a single point. So, a circle inscribed in - * a rectangle could return either OVERLAPS or WITHIN, depending on - * implementation. It is not permissible to return CONTAINS or DISJOINT - * in this circumstance, however. + *

    It is permissible to return OVERLAPS instead of WITHIN if the shape intersects with the area + * at even a single point. So, a circle inscribed in a rectangle could return either OVERLAPS or + * WITHIN, depending on implementation. It is not permissible to return CONTAINS or DISJOINT in + * this circumstance, however. * - * Similarly, it is permissible to return OVERLAPS instead of CONTAINS - * under conditions where the shape consists of multiple independent overlapping - * subshapes, and the area overlaps one of the subshapes. It is not permissible - * to return WITHIN or DISJOINT in this circumstance, however. + *

    Similarly, it is permissible to return OVERLAPS instead of CONTAINS under conditions where + * the shape consists of multiple independent overlapping subshapes, and the area overlaps one of + * the subshapes. It is not permissible to return WITHIN or DISJOINT in this circumstance, + * however. * * @param shape is the shape to consider. * @return the relationship, from the perspective of the shape. */ public int getRelationship(GeoShape shape); } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoAreaFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoAreaFactory.java index 0c3caa99439..70f10c6f60e 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoAreaFactory.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoAreaFactory.java @@ -22,24 +22,30 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class GeoAreaFactory { - private GeoAreaFactory() { - } + private GeoAreaFactory() {} /** * Create a GeoArea of the right kind given the specified bounds. + * * @param planetModel is the planet model - * @param topLat is the top latitude + * @param topLat is the top latitude * @param bottomLat is the bottom latitude - * @param leftLon is the left longitude - * @param rightLon is the right longitude + * @param leftLon is the left longitude + * @param rightLon is the right longitude * @return a GeoArea corresponding to what was specified. */ - public static GeoArea makeGeoArea(final PlanetModel planetModel, final double topLat, final double bottomLat, final double leftLon, final double rightLon) { + public static GeoArea makeGeoArea( + final PlanetModel planetModel, + final double topLat, + final double bottomLat, + final double leftLon, + final double rightLon) { return GeoBBoxFactory.makeGeoBBox(planetModel, topLat, bottomLat, leftLon, rightLon); } /** * Create a GeoArea of the right kind given (x,y,z) bounds. + * * @param planetModel is the planet model * @param minX is the min X boundary * @param maxX is the max X boundary @@ -48,8 +54,14 @@ public class GeoAreaFactory { * @param minZ is the min Z boundary * @param maxZ is the max Z boundary */ - public static GeoArea makeGeoArea(final PlanetModel planetModel, final double minX, final double maxX, final double minY, final double maxY, final double minZ, final double maxZ) { + public static GeoArea makeGeoArea( + final PlanetModel planetModel, + final double minX, + final double maxX, + final double minY, + final double maxY, + final double minZ, + final double maxZ) { return XYZSolidFactory.makeXYZSolid(planetModel, minX, maxX, minY, maxY, minZ, maxZ); } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoAreaShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoAreaShape.java index d977901266b..59321073358 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoAreaShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoAreaShape.java @@ -18,22 +18,20 @@ package org.apache.lucene.spatial3d.geom; /** - * Shape that implements GeoArea. This type of shapes are able to resolve the - * spatial relationship of other shapes with itself. + * Shape that implements GeoArea. This type of shapes are able to resolve the spatial relationship + * of other shapes with itself. * * @lucene.experimental */ - -public interface GeoAreaShape extends GeoMembershipShape, GeoArea{ +public interface GeoAreaShape extends GeoMembershipShape, GeoArea { /** - * Assess whether a shape intersects with any of the edges of this shape. - * Note well that this method must return false if the shape contains or is disjoint - * with the given shape. It is permissible to return true if the shape is within the - * specified shape, if it is difficult to compute intersection with edges. + * Assess whether a shape intersects with any of the edges of this shape. Note well that this + * method must return false if the shape contains or is disjoint with the given shape. It is + * permissible to return true if the shape is within the specified shape, if it is difficult to + * compute intersection with edges. * * @param geoShape is the shape to assess for intersection with this shape's edges. - * * @return true if there's such an intersection, false if not. */ boolean intersects(GeoShape geoShape); diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBBox.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBBox.java index 683333cbe9d..1386dfef265 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBBox.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBBox.java @@ -17,9 +17,8 @@ package org.apache.lucene.spatial3d.geom; /** - * All bounding box shapes have this interface in common. - * This describes methods that bounding boxes have above and beyond - * GeoMembershipShape's. + * All bounding box shapes have this interface in common. This describes methods that bounding boxes + * have above and beyond GeoMembershipShape's. * * @lucene.experimental */ @@ -32,5 +31,4 @@ public interface GeoBBox extends GeoAreaShape, GeoSizeable { * @return a new GeoBBox. */ public GeoBBox expand(double angle); - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBBoxFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBBoxFactory.java index 28b3a799753..26b201c3b05 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBBoxFactory.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBBoxFactory.java @@ -22,64 +22,84 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class GeoBBoxFactory { - private GeoBBoxFactory() { - } + private GeoBBoxFactory() {} /** * Create a geobbox of the right kind given the specified bounds. * * @param planetModel is the planet model - * @param topLat is the top latitude + * @param topLat is the top latitude * @param bottomLat is the bottom latitude - * @param leftLon is the left longitude - * @param rightLon is the right longitude + * @param leftLon is the left longitude + * @param rightLon is the right longitude * @return a GeoBBox corresponding to what was specified. */ - public static GeoBBox makeGeoBBox(final PlanetModel planetModel, double topLat, double bottomLat, double leftLon, double rightLon) { - //System.err.println("Making rectangle for topLat="+topLat*180.0/Math.PI+", bottomLat="+bottomLat*180.0/Math.PI+", leftLon="+leftLon*180.0/Math.PI+", rightlon="+rightLon*180.0/Math.PI); - if (topLat > Math.PI * 0.5) + public static GeoBBox makeGeoBBox( + final PlanetModel planetModel, + double topLat, + double bottomLat, + double leftLon, + double rightLon) { + // System.err.println("Making rectangle for topLat="+topLat*180.0/Math.PI+", + // bottomLat="+bottomLat*180.0/Math.PI+", leftLon="+leftLon*180.0/Math.PI+", + // rightlon="+rightLon*180.0/Math.PI); + if (topLat > Math.PI * 0.5) { topLat = Math.PI * 0.5; - if (bottomLat < -Math.PI * 0.5) + } + if (bottomLat < -Math.PI * 0.5) { bottomLat = -Math.PI * 0.5; - if (leftLon < -Math.PI) + } + if (leftLon < -Math.PI) { leftLon = -Math.PI; - if (rightLon > Math.PI) + } + if (rightLon > Math.PI) { rightLon = Math.PI; - if ((Math.abs(leftLon + Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION && Math.abs(rightLon - Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION) || - (Math.abs(rightLon + Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION && Math.abs(leftLon - Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION)) { - if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION && Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) + } + if ((Math.abs(leftLon + Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION + && Math.abs(rightLon - Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION) + || (Math.abs(rightLon + Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION + && Math.abs(leftLon - Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION)) { + if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION + && Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoWorld(planetModel); + } if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_ANGULAR_RESOLUTION) { - if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION || Math.abs(topLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) + if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION + || Math.abs(topLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoDegeneratePoint(planetModel, topLat, 0.0); + } return new GeoDegenerateLatitudeZone(planetModel, topLat); } - if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) + if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoNorthLatitudeZone(planetModel, bottomLat); - else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) + } else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoSouthLatitudeZone(planetModel, topLat); + } return new GeoLatitudeZone(planetModel, topLat, bottomLat); } - //System.err.println(" not latitude zone"); + // System.err.println(" not latitude zone"); double extent = rightLon - leftLon; - if (extent < 0.0) + if (extent < 0.0) { extent += Math.PI * 2.0; + } if (topLat == Math.PI * 0.5 && bottomLat == -Math.PI * 0.5) { if (Math.abs(leftLon - rightLon) < Vector.MINIMUM_ANGULAR_RESOLUTION) return new GeoDegenerateLongitudeSlice(planetModel, leftLon); - if (extent >= Math.PI) + if (extent >= Math.PI) { return new GeoWideLongitudeSlice(planetModel, leftLon, rightLon); + } return new GeoLongitudeSlice(planetModel, leftLon, rightLon); } - //System.err.println(" not longitude slice"); + // System.err.println(" not longitude slice"); if (Math.abs(leftLon - rightLon) < Vector.MINIMUM_ANGULAR_RESOLUTION) { - if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_ANGULAR_RESOLUTION) + if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoDegeneratePoint(planetModel, topLat, leftLon); + } return new GeoDegenerateVerticalLine(planetModel, topLat, bottomLat, leftLon); } - //System.err.println(" not vertical line"); + // System.err.println(" not vertical line"); if (extent >= Math.PI) { if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_ANGULAR_RESOLUTION) { if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { @@ -87,7 +107,7 @@ public class GeoBBoxFactory { } else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoDegeneratePoint(planetModel, bottomLat, 0.0); } - //System.err.println(" wide degenerate line"); + // System.err.println(" wide degenerate line"); return new GeoWideDegenerateHorizontalLine(planetModel, topLat, leftLon, rightLon); } if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { @@ -95,7 +115,7 @@ public class GeoBBoxFactory { } else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoWideSouthRectangle(planetModel, topLat, leftLon, rightLon); } - //System.err.println(" wide rect"); + // System.err.println(" wide rect"); return new GeoWideRectangle(planetModel, topLat, bottomLat, leftLon, rightLon); } if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_ANGULAR_RESOLUTION) { @@ -104,15 +124,15 @@ public class GeoBBoxFactory { } else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoDegeneratePoint(planetModel, bottomLat, 0.0); } - //System.err.println(" horizontal line"); + // System.err.println(" horizontal line"); return new GeoDegenerateHorizontalLine(planetModel, topLat, leftLon, rightLon); } if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoNorthRectangle(planetModel, bottomLat, leftLon, rightLon); - } else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { + } else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoSouthRectangle(planetModel, topLat, leftLon, rightLon); } - //System.err.println(" rectangle"); + // System.err.println(" rectangle"); return new GeoRectangle(planetModel, topLat, bottomLat, leftLon, rightLon); } @@ -120,15 +140,16 @@ public class GeoBBoxFactory { * Create a geobbox of the right kind given the specified {@link LatLonBounds}. * * @param planetModel is the planet model - * @param bounds are the bounds + * @param bounds are the bounds * @return a GeoBBox corresponding to what was specified. */ public static GeoBBox makeGeoBBox(final PlanetModel planetModel, LatLonBounds bounds) { - final double topLat = (bounds.checkNoTopLatitudeBound()) ? Math.PI * 0.5 : bounds.getMaxLatitude(); - final double bottomLat = (bounds.checkNoBottomLatitudeBound()) ? -Math.PI * 0.5 : bounds.getMinLatitude(); + final double topLat = + (bounds.checkNoTopLatitudeBound()) ? Math.PI * 0.5 : bounds.getMaxLatitude(); + final double bottomLat = + (bounds.checkNoBottomLatitudeBound()) ? -Math.PI * 0.5 : bounds.getMinLatitude(); final double leftLon = (bounds.checkNoLongitudeBound()) ? -Math.PI : bounds.getLeftLongitude(); final double rightLon = (bounds.checkNoLongitudeBound()) ? Math.PI : bounds.getRightLongitude(); return makeGeoBBox(planetModel, topLat, bottomLat, leftLon, rightLon); } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseAreaShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseAreaShape.java index 0a1d5799e78..f89a91f98b7 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseAreaShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseAreaShape.java @@ -24,26 +24,29 @@ package org.apache.lucene.spatial3d.geom; */ abstract class GeoBaseAreaShape extends GeoBaseMembershipShape implements GeoAreaShape { - /** Constructor. - *@param planetModel is the planet model to use. + /** + * Constructor. + * + * @param planetModel is the planet model to use. */ public GeoBaseAreaShape(final PlanetModel planetModel) { super(planetModel); } /** All edgepoints inside shape */ - protected final static int ALL_INSIDE = 0; + protected static final int ALL_INSIDE = 0; /** Some edgepoints inside shape */ - protected final static int SOME_INSIDE = 1; + protected static final int SOME_INSIDE = 1; /** No edgepoints inside shape */ - protected final static int NONE_INSIDE = 2; + protected static final int NONE_INSIDE = 2; - /** Determine the relationship between the GeoAreShape and the - * shape's edgepoints. - *@param geoShape is the shape. - *@return the relationship. + /** + * Determine the relationship between the GeoAreShape and the shape's edgepoints. + * + * @param geoShape is the shape. + * @return the relationship. */ - protected int isShapeInsideGeoAreaShape(final GeoShape geoShape) { + protected int isShapeInsideGeoAreaShape(final GeoShape geoShape) { boolean foundOutside = false; boolean foundInside = false; for (GeoPoint p : geoShape.getEdgePoints()) { @@ -56,21 +59,19 @@ abstract class GeoBaseAreaShape extends GeoBaseMembershipShape implements GeoAre return SOME_INSIDE; } } - if (!foundInside && !foundOutside) - return NONE_INSIDE; - if (foundInside && !foundOutside) - return ALL_INSIDE; - if (foundOutside && !foundInside) - return NONE_INSIDE; + if (!foundInside && !foundOutside) return NONE_INSIDE; + if (foundInside && !foundOutside) return ALL_INSIDE; + if (foundOutside && !foundInside) return NONE_INSIDE; return SOME_INSIDE; } - /** Determine the relationship between the GeoAreaShape's edgepoints and the - * provided shape. - *@param geoshape is the shape. - *@return the relationship. + /** + * Determine the relationship between the GeoAreaShape's edgepoints and the provided shape. + * + * @param geoshape is the shape. + * @return the relationship. */ - protected int isGeoAreaShapeInsideShape(final GeoShape geoshape) { + protected int isGeoAreaShapeInsideShape(final GeoShape geoshape) { boolean foundOutside = false; boolean foundInside = false; for (GeoPoint p : getEdgePoints()) { @@ -83,12 +84,15 @@ abstract class GeoBaseAreaShape extends GeoBaseMembershipShape implements GeoAre return SOME_INSIDE; } } - if (!foundInside && !foundOutside) + if (!foundInside && !foundOutside) { return NONE_INSIDE; - if (foundInside && !foundOutside) + } + if (foundInside && !foundOutside) { return ALL_INSIDE; - if (foundOutside && !foundInside) + } + if (foundOutside && !foundInside) { return NONE_INSIDE; + } return SOME_INSIDE; } @@ -107,19 +111,19 @@ abstract class GeoBaseAreaShape extends GeoBaseMembershipShape implements GeoAre return GeoArea.OVERLAPS; } - if (insideGeoAreaShape == ALL_INSIDE && insideShape==ALL_INSIDE) { + if (insideGeoAreaShape == ALL_INSIDE && insideShape == ALL_INSIDE) { return GeoArea.OVERLAPS; } - - if (intersects(geoShape)){ - return GeoArea.OVERLAPS; + + if (intersects(geoShape)) { + return GeoArea.OVERLAPS; } if (insideGeoAreaShape == ALL_INSIDE) { return GeoArea.WITHIN; } - if (insideShape==ALL_INSIDE) { + if (insideShape == ALL_INSIDE) { return GeoArea.CONTAINS; } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseBBox.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseBBox.java index 59562beca87..9552833979c 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseBBox.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseBBox.java @@ -17,20 +17,18 @@ package org.apache.lucene.spatial3d.geom; /** - * All bounding box shapes can derive from this base class, which furnishes - * some common code + * All bounding box shapes can derive from this base class, which furnishes some common code * * @lucene.internal */ abstract class GeoBaseBBox extends GeoBaseAreaShape implements GeoBBox { - /** Construct, given planet model. - *@param planetModel is the planet model. + /** + * Construct, given planet model. + * + * @param planetModel is the planet model. */ public GeoBaseBBox(final PlanetModel planetModel) { super(planetModel); } - - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCircle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCircle.java index 0599c91aa5c..e6f369e808a 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCircle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCircle.java @@ -23,12 +23,12 @@ package org.apache.lucene.spatial3d.geom; */ abstract class GeoBaseCircle extends GeoBaseDistanceShape implements GeoCircle { - /** Constructor. - *@param planetModel is the planet model to use. + /** + * Constructor. + * + * @param planetModel is the planet model to use. */ public GeoBaseCircle(final PlanetModel planetModel) { super(planetModel); } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeAreaShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeAreaShape.java index 986e42ccedb..d23566e43f8 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeAreaShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeAreaShape.java @@ -17,8 +17,8 @@ package org.apache.lucene.spatial3d.geom; -import java.io.InputStream; import java.io.IOException; +import java.io.InputStream; /** * Base class to create a composite of GeoAreaShapes @@ -26,36 +26,38 @@ import java.io.IOException; * @param is the type of GeoAreaShapes of the composite. * @lucene.internal */ -abstract class GeoBaseCompositeAreaShape extends GeoBaseCompositeMembershipShape implements GeoAreaShape { +abstract class GeoBaseCompositeAreaShape + extends GeoBaseCompositeMembershipShape implements GeoAreaShape { /** All edgepoints inside shape */ - protected final static int ALL_INSIDE = 0; + protected static final int ALL_INSIDE = 0; /** Some edgepoints inside shape */ - protected final static int SOME_INSIDE = 1; + protected static final int SOME_INSIDE = 1; /** No edgepoints inside shape */ - protected final static int NONE_INSIDE = 2; + protected static final int NONE_INSIDE = 2; - /** - * Constructor. - */ + /** Constructor. */ public GeoBaseCompositeAreaShape(PlanetModel planetModel) { super(planetModel); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. * @param clazz is the class of the generic. */ - public GeoBaseCompositeAreaShape(final PlanetModel planetModel, final InputStream inputStream, final Class clazz) throws IOException { + public GeoBaseCompositeAreaShape( + final PlanetModel planetModel, final InputStream inputStream, final Class clazz) + throws IOException { super(planetModel, inputStream, clazz); } @Override - public boolean intersects(GeoShape geoShape){ - for(GeoAreaShape geoAreaShape : shapes){ - if (geoAreaShape.intersects(geoShape)){ + public boolean intersects(GeoShape geoShape) { + for (GeoAreaShape geoAreaShape : shapes) { + if (geoAreaShape.intersects(geoShape)) { return true; } } @@ -77,31 +79,32 @@ abstract class GeoBaseCompositeAreaShape extends GeoBase return GeoArea.OVERLAPS; } - if (insideGeoAreaShape == ALL_INSIDE && insideShape==ALL_INSIDE) { + if (insideGeoAreaShape == ALL_INSIDE && insideShape == ALL_INSIDE) { return GeoArea.OVERLAPS; } - if (intersects(geoShape)){ - return GeoArea.OVERLAPS; + if (intersects(geoShape)) { + return GeoArea.OVERLAPS; } if (insideGeoAreaShape == ALL_INSIDE) { return GeoArea.WITHIN; } - if (insideShape==ALL_INSIDE) { + if (insideShape == ALL_INSIDE) { return GeoArea.CONTAINS; } return GeoArea.DISJOINT; } - /** Determine the relationship between the GeoAreShape and the - * shape's edgepoints. - *@param geoShape is the shape. - *@return the relationship. + /** + * Determine the relationship between the GeoAreShape and the shape's edgepoints. + * + * @param geoShape is the shape. + * @return the relationship. */ - protected int isShapeInsideGeoAreaShape(final GeoShape geoShape) { + protected int isShapeInsideGeoAreaShape(final GeoShape geoShape) { boolean foundOutside = false; boolean foundInside = false; for (GeoPoint p : geoShape.getEdgePoints()) { @@ -114,21 +117,25 @@ abstract class GeoBaseCompositeAreaShape extends GeoBase return SOME_INSIDE; } } - if (!foundInside && !foundOutside) + if (!foundInside && !foundOutside) { return NONE_INSIDE; - if (foundInside && !foundOutside) + } + if (foundInside && !foundOutside) { return ALL_INSIDE; - if (foundOutside && !foundInside) + } + if (foundOutside && !foundInside) { return NONE_INSIDE; + } return SOME_INSIDE; } - /** Determine the relationship between the GeoAreShape's edgepoints and the - * provided shape. - *@param geoshape is the shape. - *@return the relationship. + /** + * Determine the relationship between the GeoAreShape's edgepoints and the provided shape. + * + * @param geoshape is the shape. + * @return the relationship. */ - protected int isGeoAreaShapeInsideShape(final GeoShape geoshape) { + protected int isGeoAreaShapeInsideShape(final GeoShape geoshape) { boolean foundOutside = false; boolean foundInside = false; for (GeoPoint p : getEdgePoints()) { @@ -141,12 +148,15 @@ abstract class GeoBaseCompositeAreaShape extends GeoBase return SOME_INSIDE; } } - if (!foundInside && !foundOutside) + if (!foundInside && !foundOutside) { return NONE_INSIDE; - if (foundInside && !foundOutside) + } + if (foundInside && !foundOutside) { return ALL_INSIDE; - if (foundOutside && !foundInside) + } + if (foundOutside && !foundInside) { return NONE_INSIDE; + } return SOME_INSIDE; } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeMembershipShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeMembershipShape.java index d4eab4e37ba..44e57bbea60 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeMembershipShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeMembershipShape.java @@ -17,8 +17,8 @@ package org.apache.lucene.spatial3d.geom; -import java.io.InputStream; import java.io.IOException; +import java.io.InputStream; /** * Base class to create a composite of GeoMembershipShapes @@ -27,34 +27,37 @@ import java.io.IOException; * @lucene.internal */ abstract class GeoBaseCompositeMembershipShape - extends GeoBaseCompositeShape implements GeoMembershipShape{ + extends GeoBaseCompositeShape implements GeoMembershipShape { - /** - * Constructor. - */ + /** Constructor. */ GeoBaseCompositeMembershipShape(PlanetModel planetModel) { super(planetModel); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. * @param clazz is the class of the generic. */ - GeoBaseCompositeMembershipShape(final PlanetModel planetModel, final InputStream inputStream, final Class clazz) throws IOException { + GeoBaseCompositeMembershipShape( + final PlanetModel planetModel, final InputStream inputStream, final Class clazz) + throws IOException { super(planetModel, inputStream, clazz); } - + @Override public double computeOutsideDistance(final DistanceStyle distanceStyle, final GeoPoint point) { return computeOutsideDistance(distanceStyle, point.x, point.y, point.z); } @Override - public double computeOutsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (isWithin(x,y,z)) + public double computeOutsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + if (isWithin(x, y, z)) { return 0.0; + } double distance = Double.POSITIVE_INFINITY; for (GeoMembershipShape shape : shapes) { final double normalDistance = shape.computeOutsideDistance(distanceStyle, x, y, z); diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeShape.java index 0d841c81e8c..8c877068423 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseCompositeShape.java @@ -17,12 +17,12 @@ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.IOException; /** * Base class to create a composite of GeoShapes. @@ -30,16 +30,13 @@ import java.io.IOException; * @param is the type of GeoShapes of the composite. * @lucene.experimental */ -public abstract class GeoBaseCompositeShape extends BasePlanetObject implements GeoShape { +public abstract class GeoBaseCompositeShape extends BasePlanetObject + implements GeoShape { - /** - * Shape's container - */ + /** Shape's container */ protected final List shapes = new ArrayList<>(); - /** - * Constructor. - */ + /** Constructor. */ public GeoBaseCompositeShape(PlanetModel planetModel) { super(planetModel); } @@ -51,7 +48,8 @@ public abstract class GeoBaseCompositeShape extends BasePlan */ public void addShape(final T shape) { if (!shape.getPlanetModel().equals(planetModel)) { - throw new IllegalArgumentException("Cannot add a shape into a composite with different planet models."); + throw new IllegalArgumentException( + "Cannot add a shape into a composite with different planet models."); } shapes.add(shape); } @@ -76,11 +74,14 @@ public abstract class GeoBaseCompositeShape extends BasePlan /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. * @param clazz is the class of the generic. */ - public GeoBaseCompositeShape(final PlanetModel planetModel, final InputStream inputStream, final Class clazz) throws IOException { + public GeoBaseCompositeShape( + final PlanetModel planetModel, final InputStream inputStream, final Class clazz) + throws IOException { this(planetModel); final T[] array = SerializableObject.readHeterogeneousArray(planetModel, inputStream, clazz); for (final SerializableObject member : array) { @@ -101,8 +102,9 @@ public abstract class GeoBaseCompositeShape extends BasePlan @Override public boolean isWithin(final double x, final double y, final double z) { for (GeoShape shape : shapes) { - if (shape.isWithin(x, y, z)) + if (shape.isWithin(x, y, z)) { return true; + } } return false; } @@ -117,10 +119,12 @@ public abstract class GeoBaseCompositeShape extends BasePlan } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { for (GeoShape shape : shapes) { - if (shape.intersects(p, notablePoints, bounds)) + if (shape.intersects(p, notablePoints, bounds)) { return true; + } } return false; } @@ -139,8 +143,9 @@ public abstract class GeoBaseCompositeShape extends BasePlan @Override public boolean equals(Object o) { - if (!(o instanceof GeoBaseCompositeShape)) + if (!(o instanceof GeoBaseCompositeShape)) { return false; + } GeoBaseCompositeShape other = (GeoBaseCompositeShape) o; return super.equals(other) && shapes.equals(other.shapes); } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseDistanceShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseDistanceShape.java index 2bdd54d4462..920389b9212 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseDistanceShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseDistanceShape.java @@ -17,15 +17,17 @@ package org.apache.lucene.spatial3d.geom; /** - * Distance shapes have capabilities of both geohashing and distance - * computation (which also includes point membership determination). + * Distance shapes have capabilities of both geohashing and distance computation (which also + * includes point membership determination). * * @lucene.experimental */ public abstract class GeoBaseDistanceShape extends GeoBaseAreaShape implements GeoDistanceShape { - /** Constructor. - *@param planetModel is the planet model to use. + /** + * Constructor. + * + * @param planetModel is the planet model to use. */ public GeoBaseDistanceShape(final PlanetModel planetModel) { super(planetModel); @@ -42,15 +44,17 @@ public abstract class GeoBaseDistanceShape extends GeoBaseAreaShape implements G } @Override - public double computeDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (!isWithin(x,y,z)) { + public double computeDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + if (!isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } return distance(distanceStyle, x, y, z); } /** Called by a {@code computeDistance} method if X/Y/Z is within this shape. */ - protected abstract double distance(final DistanceStyle distanceStyle, final double x, final double y, final double z); + protected abstract double distance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); @Override public double computeDeltaDistance(final DistanceStyle distanceStyle, final GeoPoint point) { @@ -58,29 +62,33 @@ public abstract class GeoBaseDistanceShape extends GeoBaseAreaShape implements G } @Override - public double computeDeltaDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (!isWithin(x,y,z)) { + public double computeDeltaDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + if (!isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } return deltaDistance(distanceStyle, x, y, z); } /** Called by a {@code computeDeltaDistance} method if X/Y/Z is within this shape. */ - protected double deltaDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double deltaDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return distance(distanceStyle, x, y, z) * 2.0; } @Override - public void getDistanceBounds(final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { + public void getDistanceBounds( + final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { if (distanceValue == Double.POSITIVE_INFINITY) { getBounds(bounds); return; } distanceBounds(bounds, distanceStyle, distanceValue); } - - /** Called by a {@code getDistanceBounds} method if distanceValue is not Double.POSITIVE_INFINITY. */ - protected abstract void distanceBounds(final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue); + /** + * Called by a {@code getDistanceBounds} method if distanceValue is not Double.POSITIVE_INFINITY. + */ + protected abstract void distanceBounds( + final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue); } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseMembershipShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseMembershipShape.java index 831a7c6af9e..9e3dc724a6c 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseMembershipShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseMembershipShape.java @@ -17,15 +17,17 @@ package org.apache.lucene.spatial3d.geom; /** - * Membership shapes have capabilities of both geohashing and membership - * determination. This is a useful baseclass for them. + * Membership shapes have capabilities of both geohashing and membership determination. This is a + * useful baseclass for them. * * @lucene.experimental */ public abstract class GeoBaseMembershipShape extends GeoBaseShape implements GeoMembershipShape { - /** Constructor. - *@param planetModel is the planet model to use. + /** + * Constructor. + * + * @param planetModel is the planet model to use. */ public GeoBaseMembershipShape(final PlanetModel planetModel) { super(planetModel); @@ -42,15 +44,15 @@ public abstract class GeoBaseMembershipShape extends GeoBaseShape implements Geo } @Override - public double computeOutsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (isWithin(x,y,z)) { + public double computeOutsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + if (isWithin(x, y, z)) { return 0.0; } - return outsideDistance(distanceStyle, x,y,z); + return outsideDistance(distanceStyle, x, y, z); } /** Called by a {@code computeOutsideDistance} method if X/Y/Z is not within this shape. */ - protected abstract double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); - + protected abstract double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBasePath.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBasePath.java index c726b3af2ba..69a0f671f9f 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBasePath.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBasePath.java @@ -23,12 +23,12 @@ package org.apache.lucene.spatial3d.geom; */ abstract class GeoBasePath extends GeoBaseDistanceShape implements GeoPath { - /** Constructor. - *@param planetModel is the planet model to use. + /** + * Constructor. + * + * @param planetModel is the planet model to use. */ public GeoBasePath(final PlanetModel planetModel) { super(planetModel); } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBasePolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBasePolygon.java index 7fe8c98d049..bf453840016 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBasePolygon.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBasePolygon.java @@ -23,12 +23,12 @@ package org.apache.lucene.spatial3d.geom; */ abstract class GeoBasePolygon extends GeoBaseAreaShape implements GeoPolygon { - /** Constructor. - *@param planetModel is the planet model to use. + /** + * Constructor. + * + * @param planetModel is the planet model to use. */ public GeoBasePolygon(final PlanetModel planetModel) { super(planetModel); } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseShape.java index 54896fc67f3..a5992392563 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoBaseShape.java @@ -23,8 +23,10 @@ package org.apache.lucene.spatial3d.geom; */ public abstract class GeoBaseShape extends BasePlanetObject implements GeoShape { - /** Constructor. - *@param planetModel is the planet model to use. + /** + * Constructor. + * + * @param planetModel is the planet model to use. */ public GeoBaseShape(final PlanetModel planetModel) { super(planetModel); @@ -33,12 +35,10 @@ public abstract class GeoBaseShape extends BasePlanetObject implements GeoShape @Override public void getBounds(Bounds bounds) { if (isWithin(planetModel.NORTH_POLE)) { - bounds.noTopLatitudeBound().noLongitudeBound() - .addPoint(planetModel.NORTH_POLE); + bounds.noTopLatitudeBound().noLongitudeBound().addPoint(planetModel.NORTH_POLE); } if (isWithin(planetModel.SOUTH_POLE)) { - bounds.noBottomLatitudeBound().noLongitudeBound() - .addPoint(planetModel.SOUTH_POLE); + bounds.noBottomLatitudeBound().noLongitudeBound().addPoint(planetModel.SOUTH_POLE); } if (isWithin(planetModel.MIN_X_POLE)) { bounds.addPoint(planetModel.MIN_X_POLE); @@ -53,7 +53,4 @@ public abstract class GeoBaseShape extends BasePlanetObject implements GeoShape bounds.addPoint(planetModel.MAX_Y_POLE); } } - } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCircle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCircle.java index b05dff6af6c..07510d026e4 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCircle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCircle.java @@ -21,5 +21,4 @@ package org.apache.lucene.spatial3d.geom; * * @lucene.experimental */ -public interface GeoCircle extends GeoDistanceShape, GeoSizeable { -} +public interface GeoCircle extends GeoDistanceShape, GeoSizeable {} diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCircleFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCircleFactory.java index f32f366e62e..0ecd428b4d7 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCircleFactory.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCircleFactory.java @@ -22,12 +22,12 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class GeoCircleFactory { - private GeoCircleFactory() { - } + private GeoCircleFactory() {} /** - * Create a GeoCircle from a center and a cutoff angle. The resulting shape is a circle in spherical - * planets, otherwise is an ellipse. It is the most efficient shape to represent a circle on a sphere. + * Create a GeoCircle from a center and a cutoff angle. The resulting shape is a circle in + * spherical planets, otherwise is an ellipse. It is the most efficient shape to represent a + * circle on a sphere. * * @param planetModel is the planet model. * @param latitude is the center latitude. @@ -35,7 +35,11 @@ public class GeoCircleFactory { * @param cutoffAngle is the cutoff angle. * @return a GeoCircle corresponding to what was specified. */ - public static GeoCircle makeGeoCircle(final PlanetModel planetModel, final double latitude, final double longitude, final double cutoffAngle) { + public static GeoCircle makeGeoCircle( + final PlanetModel planetModel, + final double latitude, + final double longitude, + final double cutoffAngle) { if (cutoffAngle < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoDegeneratePoint(planetModel, latitude, longitude); } @@ -43,30 +47,35 @@ public class GeoCircleFactory { } /** - * Create an GeoCircle from a center, a radius and a desired accuracy. It is the most accurate shape to represent - * a circle in non-spherical planets. - *

    - * The accuracy of the circle is defined as the maximum linear distance between any point on the - * surface circle and planes that describe the circle. Therefore, with planet model WSG84, since the - * radius of earth is 6,371,000 meters, an accuracy of 1e-6 corresponds to 6.3 meters. - * For an accuracy of 1.0 meters, the accuracy value would be 1.6e-7. The maximum accuracy possible is 1e-12. - *

    - * Note that this method may thrown an IllegalArgumentException if the circle being specified cannot be - * represented by plane approximation given the planet model provided. + * Create an GeoCircle from a center, a radius and a desired accuracy. It is the most accurate + * shape to represent a circle in non-spherical planets. + * + *

    The accuracy of the circle is defined as the maximum linear distance between any point on + * the surface circle and planes that describe the circle. Therefore, with planet model WSG84, + * since the radius of earth is 6,371,000 meters, an accuracy of 1e-6 corresponds to 6.3 meters. + * For an accuracy of 1.0 meters, the accuracy value would be 1.6e-7. The maximum accuracy + * possible is 1e-12. + * + *

    Note that this method may thrown an IllegalArgumentException if the circle being specified + * cannot be represented by plane approximation given the planet model provided. * * @param planetModel is the planet model. * @param latitude is the center latitude. * @param longitude is the center longitude. * @param radius is the radius surface distance. - * @param accuracy is the maximum linear distance between the circle approximation and the real circle, - * as computed using the Vincenty formula. + * @param accuracy is the maximum linear distance between the circle approximation and the real + * circle, as computed using the Vincenty formula. * @return a GeoCircle corresponding to what was specified. */ - public static GeoCircle makeExactGeoCircle(final PlanetModel planetModel, final double latitude, final double longitude, final double radius, final double accuracy) { + public static GeoCircle makeExactGeoCircle( + final PlanetModel planetModel, + final double latitude, + final double longitude, + final double radius, + final double accuracy) { if (radius < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoDegeneratePoint(planetModel, latitude, longitude); } return new GeoExactCircle(planetModel, latitude, longitude, radius, accuracy); } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java index af6e7854d1f..cf46234b03a 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java @@ -16,35 +16,35 @@ */ package org.apache.lucene.spatial3d.geom; -import java.util.Arrays; -import java.util.List; -import java.util.ArrayList; -import java.util.Set; -import java.util.HashSet; -import java.util.Collections; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; /** - * GeoComplexPolygon objects are structures designed to handle very large numbers of edges. - * They perform very well in this case compared to the alternatives, which all have O(N) evaluation - * and O(N^2) setup times. Complex polygons have O(N) setup times and best case O(log(N)) - * evaluation times. + * GeoComplexPolygon objects are structures designed to handle very large numbers of edges. They + * perform very well in this case compared to the alternatives, which all have O(N) evaluation and + * O(N^2) setup times. Complex polygons have O(N) setup times and best case O(log(N)) evaluation + * times. * - * The tradeoff is that these objects perform object creation when evaluating intersects() and + *

    The tradeoff is that these objects perform object creation when evaluating intersects() and * isWithin(). * * @lucene.internal */ class GeoComplexPolygon extends GeoBasePolygon { - + private final Tree xTree; private final Tree yTree; private final Tree zTree; - + private final List> pointsList; - + private final boolean testPoint1InSet; private final GeoPoint testPoint1; @@ -60,26 +60,33 @@ class GeoComplexPolygon extends GeoBasePolygon { private final GeoPoint[] edgePoints; private final Edge[] shapeStartEdges; - - private final static double NEAR_EDGE_CUTOFF = -Vector.MINIMUM_RESOLUTION * 10000.0; - + + private static final double NEAR_EDGE_CUTOFF = -Vector.MINIMUM_RESOLUTION * 10000.0; + /** - * Create a complex polygon from multiple lists of points, and a single point which is known to be in or out of - * set. - *@param planetModel is the planet model. - *@param pointsList is the list of lists of edge points. The edge points describe edges, and have an implied - * return boundary, so that N edges require N points. These points have furthermore been filtered so that - * no adjacent points are identical (within the bounds of the definition used by this package). It is assumed - * that no edges intersect, but the structure can contain both outer rings as well as holes. - *@param testPoint is the point whose in/out of setness is known. - *@param testPointInSet is true if the test point is considered "within" the polygon. + * Create a complex polygon from multiple lists of points, and a single point which is known to be + * in or out of set. + * + * @param planetModel is the planet model. + * @param pointsList is the list of lists of edge points. The edge points describe edges, and have + * an implied return boundary, so that N edges require N points. These points have furthermore + * been filtered so that no adjacent points are identical (within the bounds of the definition + * used by this package). It is assumed that no edges intersect, but the structure can contain + * both outer rings as well as holes. + * @param testPoint is the point whose in/out of setness is known. + * @param testPointInSet is true if the test point is considered "within" the polygon. */ - public GeoComplexPolygon(final PlanetModel planetModel, final List> pointsList, final GeoPoint testPoint, final boolean testPointInSet) { + public GeoComplexPolygon( + final PlanetModel planetModel, + final List> pointsList, + final GeoPoint testPoint, + final boolean testPointInSet) { super(planetModel); - - assert planetModel.pointOnSurface(testPoint.x, testPoint.y, testPoint.z) : "Test point is not on the ellipsoid surface"; - - this.pointsList = pointsList; // For serialization + + assert planetModel.pointOnSurface(testPoint.x, testPoint.y, testPoint.z) + : "Test point is not on the ellipsoid surface"; + + this.pointsList = pointsList; // For serialization // Construct and index edges this.edgePoints = new GeoPoint[pointsList.size()]; @@ -88,12 +95,13 @@ class GeoComplexPolygon extends GeoBasePolygon { int edgePointIndex = 0; for (final List shapePoints : pointsList) { allEdges.ensureCapacity(allEdges.size() + shapePoints.size()); - GeoPoint lastGeoPoint = shapePoints.get(shapePoints.size()-1); + GeoPoint lastGeoPoint = shapePoints.get(shapePoints.size() - 1); edgePoints[edgePointIndex] = lastGeoPoint; Edge lastEdge = null; Edge firstEdge = null; for (final GeoPoint thisGeoPoint : shapePoints) { - assert planetModel.pointOnSurface(thisGeoPoint) : "Polygon edge point must be on surface; "+thisGeoPoint+" is not"; + assert planetModel.pointOnSurface(thisGeoPoint) + : "Polygon edge point must be on surface; " + thisGeoPoint + " is not"; final Edge edge = new Edge(planetModel, lastGeoPoint, thisGeoPoint); if (edge.isWithin(testPoint.x, testPoint.y, testPoint.z)) { throw new IllegalArgumentException("Test point is on polygon edge: not allowed"); @@ -127,43 +135,50 @@ class GeoComplexPolygon extends GeoBasePolygon { this.testPoint1FixedYPlane = new Plane(0.0, 1.0, 0.0, -testPoint1.y); this.testPoint1FixedXPlane = new Plane(1.0, 0.0, 0.0, -testPoint1.x); this.testPoint1FixedZPlane = new Plane(0.0, 0.0, 1.0, -testPoint1.z); - + Plane testPoint1FixedYAbovePlane = new Plane(testPoint1FixedYPlane, true); - - // We compare the plane's Y value (etc), which is -D, with the planet's maximum and minimum Y poles. - - if (-testPoint1FixedYAbovePlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumYValue() + testPoint1FixedYAbovePlane.D > NEAR_EDGE_CUTOFF) { - testPoint1FixedYAbovePlane = null; + + // We compare the plane's Y value (etc), which is -D, with the planet's maximum and minimum Y + // poles. + + if (-testPoint1FixedYAbovePlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumYValue() + testPoint1FixedYAbovePlane.D > NEAR_EDGE_CUTOFF) { + testPoint1FixedYAbovePlane = null; } this.testPoint1FixedYAbovePlane = testPoint1FixedYAbovePlane; - + Plane testPoint1FixedYBelowPlane = new Plane(testPoint1FixedYPlane, false); - if (-testPoint1FixedYBelowPlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumYValue() + testPoint1FixedYBelowPlane.D > NEAR_EDGE_CUTOFF) { - testPoint1FixedYBelowPlane = null; + if (-testPoint1FixedYBelowPlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumYValue() + testPoint1FixedYBelowPlane.D > NEAR_EDGE_CUTOFF) { + testPoint1FixedYBelowPlane = null; } this.testPoint1FixedYBelowPlane = testPoint1FixedYBelowPlane; - + Plane testPoint1FixedXAbovePlane = new Plane(testPoint1FixedXPlane, true); - if (-testPoint1FixedXAbovePlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumXValue() + testPoint1FixedXAbovePlane.D > NEAR_EDGE_CUTOFF) { - testPoint1FixedXAbovePlane = null; + if (-testPoint1FixedXAbovePlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumXValue() + testPoint1FixedXAbovePlane.D > NEAR_EDGE_CUTOFF) { + testPoint1FixedXAbovePlane = null; } this.testPoint1FixedXAbovePlane = testPoint1FixedXAbovePlane; - + Plane testPoint1FixedXBelowPlane = new Plane(testPoint1FixedXPlane, false); - if (-testPoint1FixedXBelowPlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumXValue() + testPoint1FixedXBelowPlane.D > NEAR_EDGE_CUTOFF) { - testPoint1FixedXBelowPlane = null; + if (-testPoint1FixedXBelowPlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumXValue() + testPoint1FixedXBelowPlane.D > NEAR_EDGE_CUTOFF) { + testPoint1FixedXBelowPlane = null; } this.testPoint1FixedXBelowPlane = testPoint1FixedXBelowPlane; - + Plane testPoint1FixedZAbovePlane = new Plane(testPoint1FixedZPlane, true); - if (-testPoint1FixedZAbovePlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF ||planetModel.getMinimumZValue() + testPoint1FixedZAbovePlane.D > NEAR_EDGE_CUTOFF) { - testPoint1FixedZAbovePlane = null; + if (-testPoint1FixedZAbovePlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumZValue() + testPoint1FixedZAbovePlane.D > NEAR_EDGE_CUTOFF) { + testPoint1FixedZAbovePlane = null; } this.testPoint1FixedZAbovePlane = testPoint1FixedZAbovePlane; - + Plane testPoint1FixedZBelowPlane = new Plane(testPoint1FixedZPlane, false); - if (-testPoint1FixedZBelowPlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumZValue() + testPoint1FixedZBelowPlane.D > NEAR_EDGE_CUTOFF) { - testPoint1FixedZBelowPlane = null; + if (-testPoint1FixedZBelowPlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumZValue() + testPoint1FixedZBelowPlane.D > NEAR_EDGE_CUTOFF) { + testPoint1FixedZBelowPlane = null; } this.testPoint1FixedZBelowPlane = testPoint1FixedZBelowPlane; @@ -173,25 +188,30 @@ class GeoComplexPolygon extends GeoBasePolygon { /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoComplexPolygon(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - readPointsList(planetModel, inputStream), - new GeoPoint(planetModel, inputStream), - SerializableObject.readBoolean(inputStream)); + public GeoComplexPolygon(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + readPointsList(planetModel, inputStream), + new GeoPoint(planetModel, inputStream), + SerializableObject.readBoolean(inputStream)); } - private static List> readPointsList(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + private static List> readPointsList( + final PlanetModel planetModel, final InputStream inputStream) throws IOException { final int count = SerializableObject.readInt(inputStream); final List> array = new ArrayList<>(count); for (int i = 0; i < count; i++) { - array.add(java.util.Arrays.asList(SerializableObject.readPointArray(planetModel, inputStream))); + array.add( + java.util.Arrays.asList(SerializableObject.readPointArray(planetModel, inputStream))); } return array; } - + @Override public void write(final OutputStream outputStream) throws IOException { writePointsList(outputStream, pointsList); @@ -199,253 +219,529 @@ class GeoComplexPolygon extends GeoBasePolygon { SerializableObject.writeBoolean(outputStream, testPoint1InSet); } - private static void writePointsList(final OutputStream outputStream, final List> pointsList) throws IOException { + private static void writePointsList( + final OutputStream outputStream, final List> pointsList) throws IOException { SerializableObject.writeInt(outputStream, pointsList.size()); for (final List points : pointsList) { SerializableObject.writePointArray(outputStream, points); } } - + @Override public boolean isWithin(final double x, final double y, final double z) { - //System.out.println("IsWithin() for ["+x+","+y+","+z+"]"); - return isInSet(x, y, z, - testPoint1, - testPoint1InSet, - testPoint1FixedXPlane, testPoint1FixedXAbovePlane, testPoint1FixedXBelowPlane, - testPoint1FixedYPlane, testPoint1FixedYAbovePlane, testPoint1FixedYBelowPlane, - testPoint1FixedZPlane, testPoint1FixedZAbovePlane, testPoint1FixedZBelowPlane); + // System.out.println("IsWithin() for [" + x + "," + y + "," + z + "]"); + return isInSet( + x, + y, + z, + testPoint1, + testPoint1InSet, + testPoint1FixedXPlane, + testPoint1FixedXAbovePlane, + testPoint1FixedXBelowPlane, + testPoint1FixedYPlane, + testPoint1FixedYAbovePlane, + testPoint1FixedYBelowPlane, + testPoint1FixedZPlane, + testPoint1FixedZAbovePlane, + testPoint1FixedZBelowPlane); } - - /** Given a test point, whether it is in set, and the associated planes, figure out if another point - * is in set or not. - */ - private boolean isInSet(final double x, final double y, final double z, - final GeoPoint testPoint, - final boolean testPointInSet, - final Plane testPointFixedXPlane, final Plane testPointFixedXAbovePlane, final Plane testPointFixedXBelowPlane, - final Plane testPointFixedYPlane, final Plane testPointFixedYAbovePlane, final Plane testPointFixedYBelowPlane, - final Plane testPointFixedZPlane, final Plane testPointFixedZAbovePlane, final Plane testPointFixedZBelowPlane) { - //System.out.println("\nIsInSet called for ["+x+","+y+","+z+"], testPoint="+testPoint+"; is in set? "+testPointInSet); + /** + * Given a test point, whether it is in set, and the associated planes, figure out if another + * point is in set or not. + */ + private boolean isInSet( + final double x, + final double y, + final double z, + final GeoPoint testPoint, + final boolean testPointInSet, + final Plane testPointFixedXPlane, + final Plane testPointFixedXAbovePlane, + final Plane testPointFixedXBelowPlane, + final Plane testPointFixedYPlane, + final Plane testPointFixedYAbovePlane, + final Plane testPointFixedYBelowPlane, + final Plane testPointFixedZPlane, + final Plane testPointFixedZAbovePlane, + final Plane testPointFixedZBelowPlane) { + + // System.out.println("\nIsInSet called for [" + x + "," + y + "," + z + "], testPoint=" + // + testPoint + "; is in set? " + testPointInSet); // If we're right on top of the point, we know the answer. if (testPoint.isNumericallyIdentical(x, y, z)) { return testPointInSet; } - + // If we're right on top of any of the test planes, we navigate solely on that plane. - if (testPointFixedYAbovePlane != null && testPointFixedYBelowPlane != null && testPointFixedYPlane.evaluateIsZero(x, y, z)) { + if (testPointFixedYAbovePlane != null + && testPointFixedYBelowPlane != null + && testPointFixedYPlane.evaluateIsZero(x, y, z)) { // Use the XZ plane exclusively. - //System.out.println(" Using XZ plane alone"); - final CountingEdgeIterator crossingEdgeIterator = createLinearCrossingEdgeIterator(testPoint, testPointFixedYPlane, testPointFixedYAbovePlane, testPointFixedYBelowPlane, x, y, z); - // Traverse our way from the test point to the check point. Use the y tree because that's fixed. + // System.out.println(" Using XZ plane alone"); + final CountingEdgeIterator crossingEdgeIterator = + createLinearCrossingEdgeIterator( + testPoint, + testPointFixedYPlane, + testPointFixedYAbovePlane, + testPointFixedYBelowPlane, + x, + y, + z); + // Traverse our way from the test point to the check point. Use the y tree because that's + // fixed. yTree.traverse(crossingEdgeIterator, testPoint.y); - return crossingEdgeIterator.isOnEdge() || (((crossingEdgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet); - } else if (testPointFixedXAbovePlane != null && testPointFixedXBelowPlane != null && testPointFixedXPlane.evaluateIsZero(x, y, z)) { + return crossingEdgeIterator.isOnEdge() + || (((crossingEdgeIterator.getCrossingCount() & 1) == 0) + ? testPointInSet + : !testPointInSet); + } else if (testPointFixedXAbovePlane != null + && testPointFixedXBelowPlane != null + && testPointFixedXPlane.evaluateIsZero(x, y, z)) { // Use the YZ plane exclusively. - //System.out.println(" Using YZ plane alone"); - final CountingEdgeIterator crossingEdgeIterator = createLinearCrossingEdgeIterator(testPoint, testPointFixedXPlane, testPointFixedXAbovePlane, testPointFixedXBelowPlane, x, y, z); - // Traverse our way from the test point to the check point. Use the x tree because that's fixed. + // System.out.println(" Using YZ plane alone"); + final CountingEdgeIterator crossingEdgeIterator = + createLinearCrossingEdgeIterator( + testPoint, + testPointFixedXPlane, + testPointFixedXAbovePlane, + testPointFixedXBelowPlane, + x, + y, + z); + // Traverse our way from the test point to the check point. Use the x tree because that's + // fixed. xTree.traverse(crossingEdgeIterator, testPoint.x); - return crossingEdgeIterator.isOnEdge() || (((crossingEdgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet); - } else if (testPointFixedZAbovePlane != null && testPointFixedZBelowPlane != null && testPointFixedZPlane.evaluateIsZero(x, y, z)) { - //System.out.println(" Using XY plane alone"); - final CountingEdgeIterator crossingEdgeIterator = createLinearCrossingEdgeIterator(testPoint, testPointFixedZPlane, testPointFixedZAbovePlane, testPointFixedZBelowPlane, x, y, z); - // Traverse our way from the test point to the check point. Use the z tree because that's fixed. + return crossingEdgeIterator.isOnEdge() + || (((crossingEdgeIterator.getCrossingCount() & 1) == 0) + ? testPointInSet + : !testPointInSet); + } else if (testPointFixedZAbovePlane != null + && testPointFixedZBelowPlane != null + && testPointFixedZPlane.evaluateIsZero(x, y, z)) { + // System.out.println(" Using XY plane alone"); + final CountingEdgeIterator crossingEdgeIterator = + createLinearCrossingEdgeIterator( + testPoint, + testPointFixedZPlane, + testPointFixedZAbovePlane, + testPointFixedZBelowPlane, + x, + y, + z); + // Traverse our way from the test point to the check point. Use the z tree because that's + // fixed. zTree.traverse(crossingEdgeIterator, testPoint.z); - return crossingEdgeIterator.isOnEdge() || (((crossingEdgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet); + return crossingEdgeIterator.isOnEdge() + || (((crossingEdgeIterator.getCrossingCount() & 1) == 0) + ? testPointInSet + : !testPointInSet); } else { - //System.out.println(" Using two planes"); + // System.out.println(" Using two planes"); // This is the expensive part!! - // Changing the code below has an enormous impact on the queries per second we see with the benchmark. - - // We need to use two planes to get there. We don't know which two planes will do it but we can figure it out. + // Changing the code below has an enormous impact on the queries per second we see with the + // benchmark. + + // We need to use two planes to get there. We don't know which two planes will do it but we + // can figure it out. final Plane travelPlaneFixedX = new Plane(1.0, 0.0, 0.0, -x); final Plane travelPlaneFixedY = new Plane(0.0, 1.0, 0.0, -y); final Plane travelPlaneFixedZ = new Plane(0.0, 0.0, 1.0, -z); Plane fixedYAbovePlane = new Plane(travelPlaneFixedY, true); - if (-fixedYAbovePlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumYValue() + fixedYAbovePlane.D > NEAR_EDGE_CUTOFF) { - fixedYAbovePlane = null; + if (-fixedYAbovePlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumYValue() + fixedYAbovePlane.D > NEAR_EDGE_CUTOFF) { + fixedYAbovePlane = null; } - + Plane fixedYBelowPlane = new Plane(travelPlaneFixedY, false); - if (-fixedYBelowPlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumYValue() + fixedYBelowPlane.D > NEAR_EDGE_CUTOFF) { - fixedYBelowPlane = null; + if (-fixedYBelowPlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumYValue() + fixedYBelowPlane.D > NEAR_EDGE_CUTOFF) { + fixedYBelowPlane = null; } - + Plane fixedXAbovePlane = new Plane(travelPlaneFixedX, true); - if (-fixedXAbovePlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumXValue() + fixedXAbovePlane.D > NEAR_EDGE_CUTOFF) { - fixedXAbovePlane = null; + if (-fixedXAbovePlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumXValue() + fixedXAbovePlane.D > NEAR_EDGE_CUTOFF) { + fixedXAbovePlane = null; } - + Plane fixedXBelowPlane = new Plane(travelPlaneFixedX, false); - if (-fixedXBelowPlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumXValue() + fixedXBelowPlane.D > NEAR_EDGE_CUTOFF) { - fixedXBelowPlane = null; + if (-fixedXBelowPlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumXValue() + fixedXBelowPlane.D > NEAR_EDGE_CUTOFF) { + fixedXBelowPlane = null; } - + Plane fixedZAbovePlane = new Plane(travelPlaneFixedZ, true); - if (-fixedZAbovePlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumZValue() + fixedZAbovePlane.D > NEAR_EDGE_CUTOFF) { - fixedZAbovePlane = null; + if (-fixedZAbovePlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumZValue() + fixedZAbovePlane.D > NEAR_EDGE_CUTOFF) { + fixedZAbovePlane = null; } - + Plane fixedZBelowPlane = new Plane(travelPlaneFixedZ, false); - if (-fixedZBelowPlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumZValue() + fixedZBelowPlane.D > NEAR_EDGE_CUTOFF) { - fixedZBelowPlane = null; + if (-fixedZBelowPlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF + || planetModel.getMinimumZValue() + fixedZBelowPlane.D > NEAR_EDGE_CUTOFF) { + fixedZBelowPlane = null; } // Find the intersection points for each one of these and the complementary test point planes. final List traversalStrategies = new ArrayList<>(12); - - if (testPointFixedYAbovePlane != null && testPointFixedYBelowPlane != null && fixedXAbovePlane != null && fixedXBelowPlane != null) { - //check if planes intersects inside world - final double checkAbove = 4.0 * (fixedXAbovePlane.D * fixedXAbovePlane.D * planetModel.inverseXYScalingSquared + testPointFixedYAbovePlane.D * testPointFixedYAbovePlane.D * planetModel.inverseXYScalingSquared - 1.0); - final double checkBelow = 4.0 * (fixedXBelowPlane.D * fixedXBelowPlane.D * planetModel.inverseXYScalingSquared + testPointFixedYBelowPlane.D * testPointFixedYBelowPlane.D * planetModel.inverseXYScalingSquared - 1.0); - if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Looking for intersections between travel and test point planes..."); - final GeoPoint[] XIntersectionsY = travelPlaneFixedX.findIntersections(planetModel, testPointFixedYPlane); + + if (testPointFixedYAbovePlane != null + && testPointFixedYBelowPlane != null + && fixedXAbovePlane != null + && fixedXBelowPlane != null) { + // check if planes intersects inside world + final double checkAbove = + 4.0 + * (fixedXAbovePlane.D * fixedXAbovePlane.D * planetModel.inverseXYScalingSquared + + testPointFixedYAbovePlane.D + * testPointFixedYAbovePlane.D + * planetModel.inverseXYScalingSquared + - 1.0); + final double checkBelow = + 4.0 + * (fixedXBelowPlane.D * fixedXBelowPlane.D * planetModel.inverseXYScalingSquared + + testPointFixedYBelowPlane.D + * testPointFixedYBelowPlane.D + * planetModel.inverseXYScalingSquared + - 1.0); + if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED + && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { + // System.out.println(" Looking for intersections between travel and test point + // planes..."); + final GeoPoint[] XIntersectionsY = + travelPlaneFixedX.findIntersections(planetModel, testPointFixedYPlane); for (final GeoPoint p : XIntersectionsY) { // Travel would be in YZ plane (fixed x) then in XZ (fixed y) - // We compute distance we need to travel as a placeholder for the number of intersections we might encounter. - //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); + // We compute distance we need to travel as a placeholder for the number of + // intersections we might encounter. + // final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); final double tpDelta1 = testPoint.x - p.x; final double tpDelta2 = testPoint.z - p.z; final double cpDelta1 = y - p.y; final double cpDelta2 = z - p.z; - final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2; - //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.z - p.z) * (testPoint.z - p.z) + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.z - p.z) * (thePoint.z - p.z); - //final double newDistance = Math.abs(testPoint.x - p.x) + Math.abs(thePoint.y - p.y); - traversalStrategies.add(new TraversalStrategy(newDistance, testPoint.y, x, - testPointFixedYPlane, testPointFixedYAbovePlane, testPointFixedYBelowPlane, - travelPlaneFixedX, fixedXAbovePlane, fixedXBelowPlane, - yTree, xTree, p)); + final double newDistance = + tpDelta1 * tpDelta1 + + tpDelta2 * tpDelta2 + + cpDelta1 * cpDelta1 + + cpDelta2 * cpDelta2; + // final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.z - + // p.z) * (testPoint.z - p.z) + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.z - + // p.z) * (thePoint.z - p.z); + // final double newDistance = Math.abs(testPoint.x - p.x) + Math.abs(thePoint.y - p.y); + traversalStrategies.add( + new TraversalStrategy( + newDistance, + testPoint.y, + x, + testPointFixedYPlane, + testPointFixedYAbovePlane, + testPointFixedYBelowPlane, + travelPlaneFixedX, + fixedXAbovePlane, + fixedXBelowPlane, + yTree, + xTree, + p)); } } } - if (testPointFixedZAbovePlane != null && testPointFixedZBelowPlane != null && fixedXAbovePlane != null && fixedXBelowPlane != null) { - //check if planes intersects inside world - final double checkAbove = 4.0 * (fixedXAbovePlane.D * fixedXAbovePlane.D * planetModel.inverseXYScalingSquared + testPointFixedZAbovePlane.D * testPointFixedZAbovePlane.D * planetModel.inverseZScalingSquared - 1.0); - final double checkBelow = 4.0 * (fixedXBelowPlane.D * fixedXBelowPlane.D * planetModel.inverseXYScalingSquared + testPointFixedZBelowPlane.D * testPointFixedZBelowPlane.D * planetModel.inverseZScalingSquared - 1.0); - if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Looking for intersections between travel and test point planes..."); - final GeoPoint[] XIntersectionsZ = travelPlaneFixedX.findIntersections(planetModel, testPointFixedZPlane); + if (testPointFixedZAbovePlane != null + && testPointFixedZBelowPlane != null + && fixedXAbovePlane != null + && fixedXBelowPlane != null) { + // check if planes intersects inside world + final double checkAbove = + 4.0 + * (fixedXAbovePlane.D * fixedXAbovePlane.D * planetModel.inverseXYScalingSquared + + testPointFixedZAbovePlane.D + * testPointFixedZAbovePlane.D + * planetModel.inverseZScalingSquared + - 1.0); + final double checkBelow = + 4.0 + * (fixedXBelowPlane.D * fixedXBelowPlane.D * planetModel.inverseXYScalingSquared + + testPointFixedZBelowPlane.D + * testPointFixedZBelowPlane.D + * planetModel.inverseZScalingSquared + - 1.0); + if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED + && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { + // System.out.println(" Looking for intersections between travel and test point + // planes..."); + final GeoPoint[] XIntersectionsZ = + travelPlaneFixedX.findIntersections(planetModel, testPointFixedZPlane); for (final GeoPoint p : XIntersectionsZ) { // Travel would be in YZ plane (fixed x) then in XY (fixed z) - //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); + // final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); final double tpDelta1 = testPoint.x - p.x; final double tpDelta2 = testPoint.y - p.y; final double cpDelta1 = y - p.y; final double cpDelta2 = z - p.z; - final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2; - //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.y - p.y) * (testPoint.y - p.y) + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.z - p.z) * (thePoint.z - p.z); - //final double newDistance = Math.abs(testPoint.x - p.x) + Math.abs(thePoint.z - p.z); - traversalStrategies.add(new TraversalStrategy(newDistance, testPoint.z, x, - testPointFixedZPlane, testPointFixedZAbovePlane, testPointFixedZBelowPlane, - travelPlaneFixedX, fixedXAbovePlane, fixedXBelowPlane, - zTree, xTree, p)); + final double newDistance = + tpDelta1 * tpDelta1 + + tpDelta2 * tpDelta2 + + cpDelta1 * cpDelta1 + + cpDelta2 * cpDelta2; + // final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.y - + // p.y) * (testPoint.y - p.y) + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.z - + // p.z) * (thePoint.z - p.z); + // final double newDistance = Math.abs(testPoint.x - p.x) + Math.abs(thePoint.z - p.z); + traversalStrategies.add( + new TraversalStrategy( + newDistance, + testPoint.z, + x, + testPointFixedZPlane, + testPointFixedZAbovePlane, + testPointFixedZBelowPlane, + travelPlaneFixedX, + fixedXAbovePlane, + fixedXBelowPlane, + zTree, + xTree, + p)); } } } - if (testPointFixedXAbovePlane != null && testPointFixedXBelowPlane != null && fixedYAbovePlane != null && fixedYBelowPlane != null) { - //check if planes intersects inside world - final double checkAbove = 4.0 * (testPointFixedXAbovePlane.D * testPointFixedXAbovePlane.D * planetModel.inverseXYScalingSquared + fixedYAbovePlane.D * fixedYAbovePlane.D * planetModel.inverseXYScalingSquared - 1.0); - final double checkBelow = 4.0 * (testPointFixedXBelowPlane.D * testPointFixedXBelowPlane.D * planetModel.inverseXYScalingSquared + fixedYBelowPlane.D * fixedYBelowPlane.D * planetModel.inverseXYScalingSquared - 1.0); - if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Looking for intersections between travel and test point planes..."); - final GeoPoint[] YIntersectionsX = travelPlaneFixedY.findIntersections(planetModel, testPointFixedXPlane); + if (testPointFixedXAbovePlane != null + && testPointFixedXBelowPlane != null + && fixedYAbovePlane != null + && fixedYBelowPlane != null) { + // check if planes intersects inside world + final double checkAbove = + 4.0 + * (testPointFixedXAbovePlane.D + * testPointFixedXAbovePlane.D + * planetModel.inverseXYScalingSquared + + fixedYAbovePlane.D * fixedYAbovePlane.D * planetModel.inverseXYScalingSquared + - 1.0); + final double checkBelow = + 4.0 + * (testPointFixedXBelowPlane.D + * testPointFixedXBelowPlane.D + * planetModel.inverseXYScalingSquared + + fixedYBelowPlane.D * fixedYBelowPlane.D * planetModel.inverseXYScalingSquared + - 1.0); + if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED + && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { + // System.out.println(" Looking for intersections between travel and test point + // planes..."); + final GeoPoint[] YIntersectionsX = + travelPlaneFixedY.findIntersections(planetModel, testPointFixedXPlane); for (final GeoPoint p : YIntersectionsX) { // Travel would be in XZ plane (fixed y) then in YZ (fixed x) - //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); + // final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); final double tpDelta1 = testPoint.y - p.y; final double tpDelta2 = testPoint.z - p.z; final double cpDelta1 = x - p.x; final double cpDelta2 = z - p.z; - final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2; - //final double newDistance = (testPoint.y - p.y) * (testPoint.y - p.y) + (testPoint.z - p.z) * (testPoint.z - p.z) + (thePoint.x - p.x) * (thePoint.x - p.x) + (thePoint.z - p.z) * (thePoint.z - p.z); - //final double newDistance = Math.abs(testPoint.y - p.y) + Math.abs(thePoint.x - p.x); - traversalStrategies.add(new TraversalStrategy(newDistance, testPoint.x, y, - testPointFixedXPlane, testPointFixedXAbovePlane, testPointFixedXBelowPlane, - travelPlaneFixedY, fixedYAbovePlane, fixedYBelowPlane, - xTree, yTree, p)); + final double newDistance = + tpDelta1 * tpDelta1 + + tpDelta2 * tpDelta2 + + cpDelta1 * cpDelta1 + + cpDelta2 * cpDelta2; + // final double newDistance = (testPoint.y - p.y) * (testPoint.y - p.y) + (testPoint.z - + // p.z) * (testPoint.z - p.z) + (thePoint.x - p.x) * (thePoint.x - p.x) + (thePoint.z - + // p.z) * (thePoint.z - p.z); + // final double newDistance = Math.abs(testPoint.y - p.y) + Math.abs(thePoint.x - p.x); + traversalStrategies.add( + new TraversalStrategy( + newDistance, + testPoint.x, + y, + testPointFixedXPlane, + testPointFixedXAbovePlane, + testPointFixedXBelowPlane, + travelPlaneFixedY, + fixedYAbovePlane, + fixedYBelowPlane, + xTree, + yTree, + p)); } } } - if (testPointFixedZAbovePlane != null && testPointFixedZBelowPlane != null && fixedYAbovePlane != null && fixedYBelowPlane != null) { - //check if planes intersects inside world - final double checkAbove = 4.0 * (testPointFixedZAbovePlane.D * testPointFixedZAbovePlane.D * planetModel.inverseZScalingSquared + fixedYAbovePlane.D * fixedYAbovePlane.D * planetModel.inverseXYScalingSquared - 1.0); - final double checkBelow = 4.0 * (testPointFixedZBelowPlane.D * testPointFixedZBelowPlane.D * planetModel.inverseZScalingSquared + fixedYBelowPlane.D * fixedYBelowPlane.D * planetModel.inverseXYScalingSquared - 1.0); - if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Looking for intersections between travel and test point planes..."); - final GeoPoint[] YIntersectionsZ = travelPlaneFixedY.findIntersections(planetModel, testPointFixedZPlane); + if (testPointFixedZAbovePlane != null + && testPointFixedZBelowPlane != null + && fixedYAbovePlane != null + && fixedYBelowPlane != null) { + // check if planes intersects inside world + final double checkAbove = + 4.0 + * (testPointFixedZAbovePlane.D + * testPointFixedZAbovePlane.D + * planetModel.inverseZScalingSquared + + fixedYAbovePlane.D * fixedYAbovePlane.D * planetModel.inverseXYScalingSquared + - 1.0); + final double checkBelow = + 4.0 + * (testPointFixedZBelowPlane.D + * testPointFixedZBelowPlane.D + * planetModel.inverseZScalingSquared + + fixedYBelowPlane.D * fixedYBelowPlane.D * planetModel.inverseXYScalingSquared + - 1.0); + if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED + && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { + // System.out.println(" Looking for intersections between travel and test point + // planes..."); + final GeoPoint[] YIntersectionsZ = + travelPlaneFixedY.findIntersections(planetModel, testPointFixedZPlane); for (final GeoPoint p : YIntersectionsZ) { // Travel would be in XZ plane (fixed y) then in XY (fixed z) - //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); + // final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); final double tpDelta1 = testPoint.x - p.x; final double tpDelta2 = testPoint.y - p.y; final double cpDelta1 = x - p.x; final double cpDelta2 = z - p.z; - final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2; - //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.y - p.y) * (testPoint.y - p.y) + (thePoint.x - p.x) * (thePoint.x - p.x) + (thePoint.z - p.z) * (thePoint.z - p.z); - //final double newDistance = Math.abs(testPoint.y - p.y) + Math.abs(thePoint.z - p.z); - traversalStrategies.add(new TraversalStrategy(newDistance, testPoint.z, y, - testPointFixedZPlane, testPointFixedZAbovePlane, testPointFixedZBelowPlane, - travelPlaneFixedY, fixedYAbovePlane, fixedYBelowPlane, - zTree, yTree, p)); + final double newDistance = + tpDelta1 * tpDelta1 + + tpDelta2 * tpDelta2 + + cpDelta1 * cpDelta1 + + cpDelta2 * cpDelta2; + // final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.y - + // p.y) * (testPoint.y - p.y) + (thePoint.x - p.x) * (thePoint.x - p.x) + (thePoint.z - + // p.z) * (thePoint.z - p.z); + // final double newDistance = Math.abs(testPoint.y - p.y) + Math.abs(thePoint.z - p.z); + traversalStrategies.add( + new TraversalStrategy( + newDistance, + testPoint.z, + y, + testPointFixedZPlane, + testPointFixedZAbovePlane, + testPointFixedZBelowPlane, + travelPlaneFixedY, + fixedYAbovePlane, + fixedYBelowPlane, + zTree, + yTree, + p)); } } } - if (testPointFixedXAbovePlane != null && testPointFixedXBelowPlane != null && fixedZAbovePlane != null && fixedZBelowPlane != null) { - //check if planes intersects inside world - final double checkAbove = 4.0 * (testPointFixedXAbovePlane.D * testPointFixedXAbovePlane.D * planetModel.inverseXYScalingSquared + fixedZAbovePlane.D * fixedZAbovePlane.D * planetModel.inverseZScalingSquared - 1.0); - final double checkBelow = 4.0 * (testPointFixedXBelowPlane.D * testPointFixedXBelowPlane.D * planetModel.inverseXYScalingSquared + fixedZBelowPlane.D * fixedZBelowPlane.D * planetModel.inverseZScalingSquared - 1.0); - if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Looking for intersections between travel and test point planes..."); - final GeoPoint[] ZIntersectionsX = travelPlaneFixedZ.findIntersections(planetModel, testPointFixedXPlane); + if (testPointFixedXAbovePlane != null + && testPointFixedXBelowPlane != null + && fixedZAbovePlane != null + && fixedZBelowPlane != null) { + // check if planes intersects inside world + final double checkAbove = + 4.0 + * (testPointFixedXAbovePlane.D + * testPointFixedXAbovePlane.D + * planetModel.inverseXYScalingSquared + + fixedZAbovePlane.D * fixedZAbovePlane.D * planetModel.inverseZScalingSquared + - 1.0); + final double checkBelow = + 4.0 + * (testPointFixedXBelowPlane.D + * testPointFixedXBelowPlane.D + * planetModel.inverseXYScalingSquared + + fixedZBelowPlane.D * fixedZBelowPlane.D * planetModel.inverseZScalingSquared + - 1.0); + if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED + && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { + // System.out.println(" Looking for intersections between travel and test point + // planes..."); + final GeoPoint[] ZIntersectionsX = + travelPlaneFixedZ.findIntersections(planetModel, testPointFixedXPlane); for (final GeoPoint p : ZIntersectionsX) { // Travel would be in XY plane (fixed z) then in YZ (fixed x) - //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); + // final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); final double tpDelta1 = testPoint.y - p.y; final double tpDelta2 = testPoint.z - p.z; final double cpDelta1 = y - p.y; final double cpDelta2 = x - p.x; - final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2; - //final double newDistance = (testPoint.y - p.y) * (testPoint.y - p.y) + (testPoint.z - p.z) * (testPoint.z - p.z) + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.x - p.x) * (thePoint.x - p.x); - //final double newDistance = Math.abs(testPoint.z - p.z) + Math.abs(thePoint.x - p.x); - traversalStrategies.add(new TraversalStrategy(newDistance, testPoint.x, z, - testPointFixedXPlane, testPointFixedXAbovePlane, testPointFixedXBelowPlane, - travelPlaneFixedZ, fixedZAbovePlane, fixedZBelowPlane, - xTree, zTree, p)); + final double newDistance = + tpDelta1 * tpDelta1 + + tpDelta2 * tpDelta2 + + cpDelta1 * cpDelta1 + + cpDelta2 * cpDelta2; + // final double newDistance = (testPoint.y - p.y) * (testPoint.y - p.y) + (testPoint.z - + // p.z) * (testPoint.z - p.z) + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.x - + // p.x) * (thePoint.x - p.x); + // final double newDistance = Math.abs(testPoint.z - p.z) + Math.abs(thePoint.x - p.x); + traversalStrategies.add( + new TraversalStrategy( + newDistance, + testPoint.x, + z, + testPointFixedXPlane, + testPointFixedXAbovePlane, + testPointFixedXBelowPlane, + travelPlaneFixedZ, + fixedZAbovePlane, + fixedZBelowPlane, + xTree, + zTree, + p)); } } } - if (testPointFixedYAbovePlane != null && testPointFixedYBelowPlane != null && fixedZAbovePlane != null && fixedZBelowPlane != null) { - //check if planes intersects inside world - final double checkAbove = 4.0 * (testPointFixedYAbovePlane.D * testPointFixedYAbovePlane.D * planetModel.inverseXYScalingSquared + fixedZAbovePlane.D * fixedZAbovePlane.D * planetModel.inverseZScalingSquared - 1.0); - final double checkBelow = 4.0 * (testPointFixedYBelowPlane.D * testPointFixedYBelowPlane.D * planetModel.inverseXYScalingSquared + fixedZBelowPlane.D * fixedZBelowPlane.D * planetModel.inverseZScalingSquared - 1.0); - if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Looking for intersections between travel and test point planes..."); - final GeoPoint[] ZIntersectionsY = travelPlaneFixedZ.findIntersections(planetModel, testPointFixedYPlane); + if (testPointFixedYAbovePlane != null + && testPointFixedYBelowPlane != null + && fixedZAbovePlane != null + && fixedZBelowPlane != null) { + // check if planes intersects inside world + final double checkAbove = + 4.0 + * (testPointFixedYAbovePlane.D + * testPointFixedYAbovePlane.D + * planetModel.inverseXYScalingSquared + + fixedZAbovePlane.D * fixedZAbovePlane.D * planetModel.inverseZScalingSquared + - 1.0); + final double checkBelow = + 4.0 + * (testPointFixedYBelowPlane.D + * testPointFixedYBelowPlane.D + * planetModel.inverseXYScalingSquared + + fixedZBelowPlane.D * fixedZBelowPlane.D * planetModel.inverseZScalingSquared + - 1.0); + if (checkAbove < Vector.MINIMUM_RESOLUTION_SQUARED + && checkBelow < Vector.MINIMUM_RESOLUTION_SQUARED) { + // System.out.println(" Looking for intersections between travel and test point + // planes..."); + final GeoPoint[] ZIntersectionsY = + travelPlaneFixedZ.findIntersections(planetModel, testPointFixedYPlane); for (final GeoPoint p : ZIntersectionsY) { // Travel would be in XY plane (fixed z) then in XZ (fixed y) - //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); + // final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint); final double tpDelta1 = testPoint.x - p.x; final double tpDelta2 = testPoint.z - p.z; final double cpDelta1 = y - p.y; final double cpDelta2 = x - p.x; - final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2; - //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.z - p.z) * (testPoint.z - p.z) + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.x - p.x) * (thePoint.x - p.x); - //final double newDistance = Math.abs(testPoint.z - p.z) + Math.abs(thePoint.y - p.y); - traversalStrategies.add(new TraversalStrategy(newDistance, testPoint.y, z, - testPointFixedYPlane, testPointFixedYAbovePlane, testPointFixedYBelowPlane, - travelPlaneFixedZ, fixedZAbovePlane, fixedZBelowPlane, - yTree, zTree, p)); + final double newDistance = + tpDelta1 * tpDelta1 + + tpDelta2 * tpDelta2 + + cpDelta1 * cpDelta1 + + cpDelta2 * cpDelta2; + // final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.z - + // p.z) * (testPoint.z - p.z) + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.x - + // p.x) * (thePoint.x - p.x); + // final double newDistance = Math.abs(testPoint.z - p.z) + Math.abs(thePoint.y - p.y); + traversalStrategies.add( + new TraversalStrategy( + newDistance, + testPoint.y, + z, + testPointFixedYPlane, + testPointFixedYAbovePlane, + testPointFixedYBelowPlane, + travelPlaneFixedZ, + fixedZAbovePlane, + fixedZBelowPlane, + yTree, + zTree, + p)); } } } Collections.sort(traversalStrategies); - + if (traversalStrategies.size() == 0) { throw new IllegalArgumentException("No dual-plane travel strategies were found"); } @@ -458,18 +754,19 @@ class GeoComplexPolygon extends GeoBasePolygon { // Continue } } - + throw new IllegalArgumentException("Exhausted all traversal strategies"); } } - + @Override public GeoPoint[] getEdgePoints() { return edgePoints; } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { // Create the intersector final EdgeIterator intersector = new IntersectorEdgeIterator(p, notablePoints, bounds); // First, compute the bounds for the the plane @@ -479,9 +776,12 @@ class GeoComplexPolygon extends GeoBasePolygon { xyzBounds.addPoint(point); } // If we have no bounds at all then the answer is "false" - if (xyzBounds.getMaximumX() == null || xyzBounds.getMinimumX() == null || - xyzBounds.getMaximumY() == null || xyzBounds.getMinimumY() == null || - xyzBounds.getMaximumZ() == null || xyzBounds.getMinimumZ() == null) { + if (xyzBounds.getMaximumX() == null + || xyzBounds.getMinimumX() == null + || xyzBounds.getMaximumY() == null + || xyzBounds.getMinimumY() == null + || xyzBounds.getMaximumZ() == null + || xyzBounds.getMinimumZ() == null) { return false; } // Figure out which tree likely works best @@ -529,7 +829,6 @@ class GeoComplexPolygon extends GeoBasePolygon { return true; } - @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); @@ -537,7 +836,8 @@ class GeoComplexPolygon extends GeoBasePolygon { Edge currentEdge = startEdge; while (true) { bounds.addPoint(currentEdge.startPoint); - bounds.addPlane(this.planetModel, currentEdge.plane, currentEdge.startPlane, currentEdge.endPlane); + bounds.addPlane( + this.planetModel, currentEdge.plane, currentEdge.startPlane, currentEdge.endPlane); currentEdge = currentEdge.next; if (currentEdge == startEdge) { break; @@ -547,7 +847,8 @@ class GeoComplexPolygon extends GeoBasePolygon { } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { double minimumDistance = Double.POSITIVE_INFINITY; for (final Edge shapeStartEdge : shapeStartEdges) { Edge shapeEdge = shapeStartEdge; @@ -556,7 +857,9 @@ class GeoComplexPolygon extends GeoBasePolygon { if (newDist < minimumDistance) { minimumDistance = newDist; } - final double newPlaneDist = distanceStyle.computeDistance(planetModel, shapeEdge.plane, x, y, z, shapeEdge.startPlane, shapeEdge.endPlane); + final double newPlaneDist = + distanceStyle.computeDistance( + planetModel, shapeEdge.plane, x, y, z, shapeEdge.startPlane, shapeEdge.endPlane); if (newPlaneDist < minimumDistance) { minimumDistance = newPlaneDist; } @@ -569,33 +872,44 @@ class GeoComplexPolygon extends GeoBasePolygon { return minimumDistance; } - /** Create a linear crossing edge iterator with the appropriate cutoff planes given the geometry. + /** + * Create a linear crossing edge iterator with the appropriate cutoff planes given the geometry. */ - private CountingEdgeIterator createLinearCrossingEdgeIterator(final GeoPoint testPoint, - final Plane plane, final Plane abovePlane, final Plane belowPlane, - final double thePointX, final double thePointY, final double thePointZ) { - // If thePoint and testPoint are parallel, we won't be able to determine sidedness of the bounding planes. So detect that case, and build the iterator differently if we find it. + private CountingEdgeIterator createLinearCrossingEdgeIterator( + final GeoPoint testPoint, + final Plane plane, + final Plane abovePlane, + final Plane belowPlane, + final double thePointX, + final double thePointY, + final double thePointZ) { + // If thePoint and testPoint are parallel, we won't be able to determine sidedness of the + // bounding planes. So detect that case, and build the iterator differently if we find it. // This didn't work; not sure why not: - //if (testPoint.isParallel(thePointX, thePointY, thePointZ)) { - // return new FullLinearCrossingEdgeIterator(plane, abovePlane, belowPlane, thePointX, thePointY, thePointZ); - //} - //return new SectorLinearCrossingEdgeIterator(plane, abovePlane, belowPlane, thePointX, thePointY, thePointZ); + // if (testPoint.isParallel(thePointX, thePointY, thePointZ)) { + // return new FullLinearCrossingEdgeIterator(plane, abovePlane, belowPlane, thePointX, + // thePointY, thePointZ); + // } + // return new SectorLinearCrossingEdgeIterator(plane, abovePlane, belowPlane, thePointX, + // thePointY, thePointZ); // try { - //System.out.println(" creating sector linear crossing edge iterator"); - return new SectorLinearCrossingEdgeIterator(testPoint, plane, abovePlane, belowPlane, thePointX, thePointY, thePointZ); + // System.out.println(" creating sector linear crossing edge iterator"); + return new SectorLinearCrossingEdgeIterator( + testPoint, plane, abovePlane, belowPlane, thePointX, thePointY, thePointZ); } catch (IllegalArgumentException e) { // Assume we failed because we could not construct bounding planes, so do it another way. - //System.out.println(" create full linear crossing edge iterator"); - return new FullLinearCrossingEdgeIterator(testPoint, plane, abovePlane, belowPlane, thePointX, thePointY, thePointZ); + // System.out.println(" create full linear crossing edge iterator"); + return new FullLinearCrossingEdgeIterator( + testPoint, plane, abovePlane, belowPlane, thePointX, thePointY, thePointZ); } } - private final static double[] halfProportions = new double[]{0.5}; - + private static final double[] halfProportions = new double[] {0.5}; + /** - * An instance of this class describes a single edge, and includes what is necessary to reliably determine intersection - * in the context of the even/odd algorithm used. + * An instance of this class describes a single edge, and includes what is necessary to reliably + * determine intersection in the context of the even/odd algorithm used. */ private static class Edge { public final GeoPoint startPoint; @@ -608,33 +922,40 @@ class GeoComplexPolygon extends GeoBasePolygon { public final XYZBounds planeBounds; public Edge previous = null; public Edge next = null; - + public Edge(final PlanetModel pm, final GeoPoint startPoint, final GeoPoint endPoint) { this.startPoint = startPoint; this.endPoint = endPoint; - this.notablePoints = new GeoPoint[]{startPoint, endPoint}; + this.notablePoints = new GeoPoint[] {startPoint, endPoint}; this.plane = new Plane(startPoint, endPoint); - this.startPlane = new SidedPlane(endPoint, plane, startPoint); + this.startPlane = new SidedPlane(endPoint, plane, startPoint); this.endPlane = new SidedPlane(startPoint, plane, endPoint); - final GeoPoint interpolationPoint = plane.interpolate(pm, startPoint, endPoint, halfProportions)[0]; + final GeoPoint interpolationPoint = + plane.interpolate(pm, startPoint, endPoint, halfProportions)[0]; this.backingPlane = new SidedPlane(interpolationPoint, interpolationPoint, 0.0); this.planeBounds = new XYZBounds(); this.planeBounds.addPoint(startPoint); this.planeBounds.addPoint(endPoint); this.planeBounds.addPlane(pm, this.plane, this.startPlane, this.endPlane, this.backingPlane); - //System.out.println("Recording edge ["+startPoint+" --> "+endPoint+"]; bounds = "+planeBounds); + // System.out.println("Recording edge [" + startPoint + " --> " + endPoint + "];" + // + " bounds =" + planeBounds); } - public boolean isWithin(final double thePointX, final double thePointY, final double thePointZ) { - return plane.evaluateIsZero(thePointX, thePointY, thePointZ) && startPlane.isWithin(thePointX, thePointY, thePointZ) && endPlane.isWithin(thePointX, thePointY, thePointZ) && backingPlane.isWithin(thePointX, thePointY, thePointZ); + public boolean isWithin( + final double thePointX, final double thePointY, final double thePointZ) { + return plane.evaluateIsZero(thePointX, thePointY, thePointZ) + && startPlane.isWithin(thePointX, thePointY, thePointZ) + && endPlane.isWithin(thePointX, thePointY, thePointZ) + && backingPlane.isWithin(thePointX, thePointY, thePointZ); } // Hashcode and equals are system default!! } - - /** Strategy class for describing traversals. - * Implements Comparable so that these can be ordered by Collections.sort(). - */ + + /** + * Strategy class for describing traversals. Implements Comparable so that these can be ordered by + * Collections.sort(). + */ private class TraversalStrategy implements Comparable { private final double traversalDistance; private final double firstLegValue; @@ -648,12 +969,20 @@ class GeoComplexPolygon extends GeoBasePolygon { private final Tree firstLegTree; private final Tree secondLegTree; private final GeoPoint intersectionPoint; - - public TraversalStrategy(final double traversalDistance, final double firstLegValue, final double secondLegValue, - final Plane firstLegPlane, final Plane firstLegAbovePlane, final Plane firstLegBelowPlane, - final Plane secondLegPlane, final Plane secondLegAbovePlane, final Plane secondLegBelowPlane, - final Tree firstLegTree, final Tree secondLegTree, - final GeoPoint intersectionPoint) { + + public TraversalStrategy( + final double traversalDistance, + final double firstLegValue, + final double secondLegValue, + final Plane firstLegPlane, + final Plane firstLegAbovePlane, + final Plane firstLegBelowPlane, + final Plane secondLegPlane, + final Plane secondLegAbovePlane, + final Plane secondLegBelowPlane, + final Tree firstLegTree, + final Tree secondLegTree, + final GeoPoint intersectionPoint) { this.traversalDistance = traversalDistance; this.firstLegValue = firstLegValue; this.secondLegValue = secondLegValue; @@ -668,57 +997,108 @@ class GeoComplexPolygon extends GeoBasePolygon { this.intersectionPoint = intersectionPoint; } - public boolean apply(final GeoPoint testPoint, final boolean testPointInSet, - final double x, final double y, final double z) { + public boolean apply( + final GeoPoint testPoint, + final boolean testPointInSet, + final double x, + final double y, + final double z) { // First, try with two individual legs. If that doesn't work, try the DualCrossingIterator. try { // First, we'll determine if the intersection point is in set or not - //System.out.println(" Finding whether "+intersectionPoint+" is in-set, based on travel from "+testPoint+" along "+firstLegPlane+" (value="+firstLegValue+")"); - final CountingEdgeIterator testPointEdgeIterator = createLinearCrossingEdgeIterator(testPoint, - firstLegPlane, firstLegAbovePlane, firstLegBelowPlane, - intersectionPoint.x, intersectionPoint.y, intersectionPoint.z); - // Traverse our way from the test point to the check point. Use the z tree because that's fixed. + // System.out.println(" Finding whether "+intersectionPoint+" is in-set, based on travel + // from " + testPoint + " along " + firstLegPlane + " (value=" + firstLegValue + ")"); + final CountingEdgeIterator testPointEdgeIterator = + createLinearCrossingEdgeIterator( + testPoint, + firstLegPlane, + firstLegAbovePlane, + firstLegBelowPlane, + intersectionPoint.x, + intersectionPoint.y, + intersectionPoint.z); + // Traverse our way from the test point to the check point. Use the z tree because that's + // fixed. firstLegTree.traverse(testPointEdgeIterator, firstLegValue); final boolean intersectionPointOnEdge = testPointEdgeIterator.isOnEdge(); - // If the intersection point is on the edge, we cannot use this combination of legs, since it's not logically possible to compute in-set or out-of-set + // If the intersection point is on the edge, we cannot use this combination of legs, since + // it's not logically possible to compute in-set or out-of-set // with such a starting point. if (intersectionPointOnEdge) { - throw new IllegalArgumentException("Intersection point landed on an edge -- illegal path"); + throw new IllegalArgumentException( + "Intersection point landed on an edge -- illegal path"); } - final boolean intersectionPointInSet = intersectionPointOnEdge || (((testPointEdgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet); - - //System.out.println(" Intersection point in-set? "+intersectionPointInSet+" On edge? "+intersectionPointOnEdge); + final boolean intersectionPointInSet = + intersectionPointOnEdge + || (((testPointEdgeIterator.getCrossingCount() & 1) == 0) + ? testPointInSet + : !testPointInSet); + + // System.out.println(" Intersection point in-set? " + intersectionPointInSet + // + " On edge? " + intersectionPointOnEdge); // Now do the final leg - //System.out.println(" Finding whether ["+x+","+y+","+z+"] is in-set, based on travel from "+intersectionPoint+" along "+secondLegPlane+" (value="+secondLegValue+")"); - final CountingEdgeIterator travelEdgeIterator = createLinearCrossingEdgeIterator(intersectionPoint, - secondLegPlane, secondLegAbovePlane, secondLegBelowPlane, - x, y, z); + // System.out.println(" Finding whether [" + x + "," + y + "," + z + "] is in-set" + // + ", based on travel from " + intersectionPoint + " along " + secondLegPlane + // + " (value=" + secondLegValue + ")"); + final CountingEdgeIterator travelEdgeIterator = + createLinearCrossingEdgeIterator( + intersectionPoint, + secondLegPlane, + secondLegAbovePlane, + secondLegBelowPlane, + x, + y, + z); // Traverse our way from the test point to the check point. secondLegTree.traverse(travelEdgeIterator, secondLegValue); - final boolean rval = travelEdgeIterator.isOnEdge() || (((travelEdgeIterator.getCrossingCount() & 1) == 0)?intersectionPointInSet:!intersectionPointInSet); - - //System.out.println(" Check point in set? "+rval); + final boolean rval = + travelEdgeIterator.isOnEdge() + || (((travelEdgeIterator.getCrossingCount() & 1) == 0) + ? intersectionPointInSet + : !intersectionPointInSet); + + // System.out.println(" Check point in set? " + rval); return rval; } catch (IllegalArgumentException e) { // Intersection point apparently was on edge, so try another strategy - //System.out.println(" Trying dual crossing edge iterator"); - final CountingEdgeIterator edgeIterator = new DualCrossingEdgeIterator(testPoint, - firstLegPlane, firstLegAbovePlane, firstLegBelowPlane, - secondLegPlane, secondLegAbovePlane, secondLegBelowPlane, - x, y, z, intersectionPoint); + // System.out.println(" Trying dual crossing edge iterator"); + final CountingEdgeIterator edgeIterator = + new DualCrossingEdgeIterator( + testPoint, + firstLegPlane, + firstLegAbovePlane, + firstLegBelowPlane, + secondLegPlane, + secondLegAbovePlane, + secondLegBelowPlane, + x, + y, + z, + intersectionPoint); firstLegTree.traverse(edgeIterator, firstLegValue); if (edgeIterator.isOnEdge()) { return true; } secondLegTree.traverse(edgeIterator, secondLegValue); - return edgeIterator.isOnEdge() || (((edgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet); + return edgeIterator.isOnEdge() + || (((edgeIterator.getCrossingCount() & 1) == 0) ? testPointInSet : !testPointInSet); } } @Override public String toString() { - return "{firstLegValue="+firstLegValue+"; secondLegValue="+secondLegValue+"; firstLegPlane="+firstLegPlane+"; secondLegPlane="+secondLegPlane+"; intersectionPoint="+intersectionPoint+"}"; + return "{firstLegValue=" + + firstLegValue + + "; secondLegValue=" + + secondLegValue + + "; firstLegPlane=" + + firstLegPlane + + "; secondLegPlane=" + + secondLegPlane + + "; intersectionPoint=" + + intersectionPoint + + "}"; } @Override @@ -730,11 +1110,10 @@ class GeoComplexPolygon extends GeoBasePolygon { } return 0; } - } - + /** - * Iterator execution interface, for tree traversal. Pass an object implementing this interface + * Iterator execution interface, for tree traversal. Pass an object implementing this interface * into the traversal method of a tree, and each edge that matches will cause this object to be * called. */ @@ -747,33 +1126,26 @@ class GeoComplexPolygon extends GeoBasePolygon { } /** - * Iterator execution interface, for tree traversal, plus count retrieval. Pass an object implementing this interface - * into the traversal method of a tree, and each edge that matches will cause this object to be - * called. + * Iterator execution interface, for tree traversal, plus count retrieval. Pass an object + * implementing this interface into the traversal method of a tree, and each edge that matches + * will cause this object to be called. */ private static interface CountingEdgeIterator extends EdgeIterator { - /** - * @return the number of edges that were crossed. - */ + /** @return the number of edges that were crossed. */ public int getCrossingCount(); - - /** - * @return true if the endpoint was on an edge. - */ - public boolean isOnEdge(); + /** @return true if the endpoint was on an edge. */ + public boolean isOnEdge(); } - + /** - * An instance of this class represents a node in a tree. The tree is designed to be given - * a value and from that to iterate over a list of edges. - * In order to do this efficiently, each new edge is dropped into the tree using its minimum and - * maximum value. If the new edge's value does not overlap the range, then it gets added - * either to the lesser side or the greater side, accordingly. If it does overlap, then the - * "overlapping" chain is instead traversed. - * - * This class is generic and can be used for any definition of "value". + * An instance of this class represents a node in a tree. The tree is designed to be given a value + * and from that to iterate over a list of edges. In order to do this efficiently, each new edge + * is dropped into the tree using its minimum and maximum value. If the new edge's value does not + * overlap the range, then it gets added either to the lesser side or the greater side, + * accordingly. If it does overlap, then the "overlapping" chain is instead traversed. * + *

    This class is generic and can be used for any definition of "value". */ private static class Node { public final Edge edge; @@ -783,7 +1155,6 @@ class GeoComplexPolygon extends GeoBasePolygon { public Node right = null; public double max; - public Node(final Edge edge, final double minimumValue, final double maximumValue) { this.edge = edge; this.low = minimumValue; @@ -791,36 +1162,39 @@ class GeoComplexPolygon extends GeoBasePolygon { this.max = maximumValue; } - public boolean traverse(final EdgeIterator edgeIterator, final double minValue, final double maxValue) { + public boolean traverse( + final EdgeIterator edgeIterator, final double minValue, final double maxValue) { if (minValue <= max) { - + // Does this node overlap? if (minValue <= high && maxValue >= low) { if (edgeIterator.matches(edge) == false) { return false; } } - + if (left != null && left.traverse(edgeIterator, minValue, maxValue) == false) { return false; } - if (right != null && maxValue >= low && right.traverse(edgeIterator, minValue, maxValue) == false) { + if (right != null + && maxValue >= low + && right.traverse(edgeIterator, minValue, maxValue) == false) { return false; } } return true; } - } - - /** An interface describing a tree. - */ - private static abstract class Tree { + + /** An interface describing a tree. */ + private abstract static class Tree { private final Node rootNode; - + protected static final Edge[] EMPTY_ARRAY = new Edge[0]; - - /** Constructor. + + /** + * Constructor. + * * @param allEdges is the list of all edges for the tree. */ public Tree(final List allEdges) { @@ -830,16 +1204,18 @@ class GeoComplexPolygon extends GeoBasePolygon { for (final Edge edge : allEdges) { edges[i++] = new Node(edge, getMinimum(edge), getMaximum(edge)); } - Arrays.sort(edges, (left, right) -> { - int ret = Double.compare(left.low, right.low); - if (ret == 0) { - ret = Double.compare(left.max, right.max); - } - return ret; - }); + Arrays.sort( + edges, + (left, right) -> { + int ret = Double.compare(left.low, right.low); + if (ret == 0) { + ret = Double.compare(left.max, right.max); + } + return ret; + }); rootNode = createTree(edges, 0, edges.length - 1); } - + private static Node createTree(final Node[] edges, final int low, final int high) { if (low > high) { return null; @@ -860,19 +1236,25 @@ class GeoComplexPolygon extends GeoBasePolygon { return newNode; } - /** Get the minimum value from the edge. + /** + * Get the minimum value from the edge. + * * @param edge is the edge. * @return the minimum value. */ protected abstract double getMinimum(final Edge edge); - - /** Get the maximum value from the edge. + + /** + * Get the maximum value from the edge. + * * @param edge is the edge. * @return the maximum value. */ protected abstract double getMaximum(final Edge edge); - - /** Traverse the tree, finding all edges that intersect the provided value. + + /** + * Traverse the tree, finding all edges that intersect the provided value. + * * @param edgeIterator provides the method to call for any encountered matching edge. * @param value is the value to match. * @return false if the traversal was aborted before completion. @@ -880,33 +1262,33 @@ class GeoComplexPolygon extends GeoBasePolygon { public boolean traverse(final EdgeIterator edgeIterator, final double value) { return traverse(edgeIterator, value, value); } - - /** Traverse the tree, finding all edges that intersect the provided value range. - * @param edgeIterator provides the method to call for any encountered matching edge. - * Edges will not be invoked more than once. + + /** + * Traverse the tree, finding all edges that intersect the provided value range. + * + * @param edgeIterator provides the method to call for any encountered matching edge. Edges will + * not be invoked more than once. * @param minValue is the minimum value. * @param maxValue is the maximum value. * @return false if the traversal was aborted before completion. */ - public boolean traverse(final EdgeIterator edgeIterator, final double minValue, final double maxValue) { + public boolean traverse( + final EdgeIterator edgeIterator, final double minValue, final double maxValue) { if (rootNode == null) { return true; } return rootNode.traverse(edgeIterator, minValue, maxValue); } - - } - - /** This is the z-tree. - */ + + /** This is the z-tree. */ private static class ZTree extends Tree { public Node rootNode = null; - + public ZTree(final List allEdges) { super(allEdges); } - + /* @Override public boolean traverse(final EdgeIterator edgeIterator, final double value) { @@ -914,23 +1296,21 @@ class GeoComplexPolygon extends GeoBasePolygon { return super.traverse(edgeIterator, value); } */ - + @Override protected double getMinimum(final Edge edge) { return edge.planeBounds.getMinimumZ(); } - + @Override protected double getMaximum(final Edge edge) { return edge.planeBounds.getMaximumZ(); } - } - - /** This is the y-tree. - */ + + /** This is the y-tree. */ private static class YTree extends Tree { - + public YTree(final List allEdges) { super(allEdges); } @@ -942,27 +1322,25 @@ class GeoComplexPolygon extends GeoBasePolygon { return super.traverse(edgeIterator, value); } */ - + @Override protected double getMinimum(final Edge edge) { return edge.planeBounds.getMinimumY(); } - + @Override protected double getMaximum(final Edge edge) { return edge.planeBounds.getMaximumY(); } - } - /** This is the x-tree. - */ + /** This is the x-tree. */ private static class XTree extends Tree { - + public XTree(final List allEdges) { super(allEdges); } - + /* @Override public boolean traverse(final EdgeIterator edgeIterator, final double value) { @@ -970,43 +1348,46 @@ class GeoComplexPolygon extends GeoBasePolygon { return super.traverse(edgeIterator, value); } */ - + @Override protected double getMinimum(final Edge edge) { return edge.planeBounds.getMinimumX(); } - + @Override protected double getMaximum(final Edge edge) { return edge.planeBounds.getMaximumX(); } - } - /** Assess whether edge intersects the provided plane plus bounds. - */ + /** Assess whether edge intersects the provided plane plus bounds. */ private class IntersectorEdgeIterator implements EdgeIterator { - + private final Plane plane; private final GeoPoint[] notablePoints; private final Membership[] bounds; - - public IntersectorEdgeIterator(final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds) { + + public IntersectorEdgeIterator( + final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds) { this.plane = plane; this.notablePoints = notablePoints; this.bounds = bounds; } - + @Override public boolean matches(final Edge edge) { - return !plane.intersects(planetModel, edge.plane, notablePoints, edge.notablePoints, bounds, edge.startPlane, edge.endPlane); + return !plane.intersects( + planetModel, + edge.plane, + notablePoints, + edge.notablePoints, + bounds, + edge.startPlane, + edge.endPlane); } - } - - /** Assess whether edge intersects the provided shape. - */ + /** Assess whether edge intersects the provided shape. */ private class IntersectorShapeIterator implements EdgeIterator { private final GeoShape shape; @@ -1059,11 +1440,10 @@ class GeoComplexPolygon extends GeoBasePolygon { System.out.println(" ...done\n"); } */ - - /** Count the number of verifiable edge crossings for a full 1/2 a world. - */ + + /** Count the number of verifiable edge crossings for a full 1/2 a world. */ private class FullLinearCrossingEdgeIterator implements CountingEdgeIterator { - + private final GeoPoint testPoint; private final Plane plane; private final Plane abovePlane; @@ -1072,15 +1452,21 @@ class GeoComplexPolygon extends GeoBasePolygon { private final double thePointX; private final double thePointY; private final double thePointZ; - + private boolean onEdge = false; private int aboveCrossingCount = 0; private int belowCrossingCount = 0; - - public FullLinearCrossingEdgeIterator(final GeoPoint testPoint, - final Plane plane, final Plane abovePlane, final Plane belowPlane, - final double thePointX, final double thePointY, final double thePointZ) { - assert plane.evaluateIsZero(thePointX, thePointY, thePointZ) : "Check point is not on travel plane"; + + public FullLinearCrossingEdgeIterator( + final GeoPoint testPoint, + final Plane plane, + final Plane abovePlane, + final Plane belowPlane, + final double thePointX, + final double thePointY, + final double thePointZ) { + assert plane.evaluateIsZero(thePointX, thePointY, thePointZ) + : "Check point is not on travel plane"; assert plane.evaluateIsZero(testPoint) : "Test point is not on travel plane"; this.testPoint = testPoint; this.plane = plane; @@ -1094,10 +1480,10 @@ class GeoComplexPolygon extends GeoBasePolygon { this.thePointX = thePointX; this.thePointY = thePointY; this.thePointZ = thePointZ; - //System.out.println(" Constructing full linear crossing edge iterator"); - //debugIntersectAllEdges(plane, bound); + // System.out.println(" Constructing full linear crossing edge iterator"); + // debugIntersectAllEdges(plane, bound); } - + @Override public int getCrossingCount() { return Math.min(aboveCrossingCount, belowCrossingCount); @@ -1110,17 +1496,21 @@ class GeoComplexPolygon extends GeoBasePolygon { @Override public boolean matches(final Edge edge) { - //System.out.println(" Edge ["+edge.startPoint+" --> "+edge.endPoint+"] potentially crosses travel plane "+plane); + // System.out.println(" Edge [" + edge.startPoint + " --> " + edge.endPoint + // + "] potentially crosses travel plane " + plane); // Early exit if the point is on the edge. if (edge.isWithin(thePointX, thePointY, thePointZ)) { - //System.out.println(" Point is on the edge; in-set"); + // System.out.println(" Point is on the edge; in-set"); onEdge = true; return false; } - - // This should precisely mirror what is in DualCrossingIterator, but without the dual crossings. - // Some edges are going to be given to us even when there's no real intersection, so do that as a sanity check, first. - final GeoPoint[] planeCrossings = plane.findIntersections(planetModel, edge.plane, bound, edge.startPlane, edge.endPlane); + + // This should precisely mirror what is in DualCrossingIterator, but without the dual + // crossings. + // Some edges are going to be given to us even when there's no real intersection, so do that + // as a sanity check, first. + final GeoPoint[] planeCrossings = + plane.findIntersections(planetModel, edge.plane, bound, edge.startPlane, edge.endPlane); if (planeCrossings != null && planeCrossings.length == 0) { // Sometimes on the hairy edge an intersection will be missed. This check finds those. if (!plane.evaluateIsZero(edge.startPoint) && !plane.evaluateIsZero(edge.endPoint)) { @@ -1128,38 +1518,45 @@ class GeoComplexPolygon extends GeoBasePolygon { } } - //System.out.println(" Edge intersects travel plane "+plane); - - // Determine crossings of this edge against all inside/outside planes. There's no further need to look at the actual travel plane itself. + // System.out.println(" Edge intersects travel plane " + plane); + + // Determine crossings of this edge against all inside/outside planes. There's no further + // need to look at the actual travel plane itself. final int aboveCrossings = countCrossings(edge, abovePlane, bound); aboveCrossingCount += aboveCrossings; final int belowCrossings = countCrossings(edge, belowPlane, bound); belowCrossingCount += belowCrossings; - //System.out.println(" Above crossings = "+aboveCrossings+"; below crossings = "+belowCrossings); + // System.out.println(" Above crossings = " + aboveCrossings + "; below crossings = " + // + belowCrossings); return true; } - /** Find the intersections with an envelope plane, and assess those intersections for - * whether they truly describe crossings. - */ - private int countCrossings(final Edge edge, - final Plane envelopePlane, final Membership envelopeBound) { - final GeoPoint[] intersections = edge.plane.findIntersections(planetModel, envelopePlane, envelopeBound); + /** + * Find the intersections with an envelope plane, and assess those intersections for whether + * they truly describe crossings. + */ + private int countCrossings( + final Edge edge, final Plane envelopePlane, final Membership envelopeBound) { + final GeoPoint[] intersections = + edge.plane.findIntersections(planetModel, envelopePlane, envelopeBound); int crossings = 0; if (intersections != null) { for (final GeoPoint intersection : intersections) { - if (edge.startPlane.strictlyWithin(intersection) && edge.endPlane.strictlyWithin(intersection)) { + if (edge.startPlane.strictlyWithin(intersection) + && edge.endPlane.strictlyWithin(intersection)) { // It's unique, so assess it - crossings += edgeCrossesEnvelope(edge.plane, intersection, envelopePlane)?1:0; + crossings += edgeCrossesEnvelope(edge.plane, intersection, envelopePlane) ? 1 : 0; } } } return crossings; } - private boolean edgeCrossesEnvelope(final Plane edgePlane, final GeoPoint intersectionPoint, final Plane envelopePlane) { - final GeoPoint[] adjoiningPoints = findAdjoiningPoints(edgePlane, intersectionPoint, envelopePlane); + private boolean edgeCrossesEnvelope( + final Plane edgePlane, final GeoPoint intersectionPoint, final Plane envelopePlane) { + final GeoPoint[] adjoiningPoints = + findAdjoiningPoints(edgePlane, intersectionPoint, envelopePlane); if (adjoiningPoints == null) { return true; } @@ -1171,14 +1568,11 @@ class GeoComplexPolygon extends GeoBasePolygon { } return (withinCount & 1) != 0; } - - } - /** Count the number of verifiable edge crossings for less than 1/2 a world. - */ + /** Count the number of verifiable edge crossings for less than 1/2 a world. */ private class SectorLinearCrossingEdgeIterator implements CountingEdgeIterator { - + private final GeoPoint testPoint; private final Plane plane; private final Plane abovePlane; @@ -1188,15 +1582,21 @@ class GeoComplexPolygon extends GeoBasePolygon { private final double thePointX; private final double thePointY; private final double thePointZ; - + private boolean onEdge = false; private int aboveCrossingCount = 0; private int belowCrossingCount = 0; - - public SectorLinearCrossingEdgeIterator(final GeoPoint testPoint, - final Plane plane, final Plane abovePlane, final Plane belowPlane, - final double thePointX, final double thePointY, final double thePointZ) { - assert plane.evaluateIsZero(thePointX, thePointY, thePointZ) : "Check point is not on travel plane"; + + public SectorLinearCrossingEdgeIterator( + final GeoPoint testPoint, + final Plane plane, + final Plane abovePlane, + final Plane belowPlane, + final double thePointX, + final double thePointY, + final double thePointZ) { + assert plane.evaluateIsZero(thePointX, thePointY, thePointZ) + : "Check point is not on travel plane"; assert plane.evaluateIsZero(testPoint) : "Test point is not on travel plane"; this.testPoint = testPoint; this.plane = plane; @@ -1204,50 +1604,58 @@ class GeoComplexPolygon extends GeoBasePolygon { this.belowPlane = belowPlane; // We have to be sure we don't accidently create two bounds that would exclude all points. // Not sure this can happen but... - final SidedPlane bound1Plane = new SidedPlane(thePointX, thePointY, thePointZ, plane, testPoint); - final SidedPlane bound2Plane = new SidedPlane(testPoint, plane, thePointX, thePointY, thePointZ); + final SidedPlane bound1Plane = + new SidedPlane(thePointX, thePointY, thePointZ, plane, testPoint); + final SidedPlane bound2Plane = + new SidedPlane(testPoint, plane, thePointX, thePointY, thePointZ); if (bound1Plane.isNumericallyIdentical(bound2Plane)) { - throw new IllegalArgumentException("Sector iterator unreliable when bounds planes are numerically identical"); + throw new IllegalArgumentException( + "Sector iterator unreliable when bounds planes are numerically identical"); } this.bound1 = bound1Plane; this.bound2 = bound2Plane; this.thePointX = thePointX; this.thePointY = thePointY; this.thePointZ = thePointZ; - //System.out.println(" Constructing sector linear crossing edge iterator"); - //debugIntersectAllEdges(plane, bound1, bound2); + // System.out.println(" Constructing sector linear crossing edge iterator"); + // debugIntersectAllEdges(plane, bound1, bound2); } - + @Override public int getCrossingCount() { return Math.min(aboveCrossingCount, belowCrossingCount); } - + @Override public boolean isOnEdge() { return onEdge; } - + @Override public boolean matches(final Edge edge) { - //System.out.println(" Edge ["+edge.startPoint+" --> "+edge.endPoint+"] potentially crosses travel plane "+plane); + // System.out.println(" Edge ["+edge.startPoint + " --> " + edge.endPoint + "]" + // + " potentially crosses travel plane " + plane); // Early exit if the point is on the edge. if (edge.isWithin(thePointX, thePointY, thePointZ)) { // The point is on the edge. This means it's "in-set" by definition, so abort. - //System.out.println(" Point is on the edge; in-set"); + // System.out.println(" Point is on the edge; in-set"); onEdge = true; return false; } - - //System.out.println(" Finding intersections between edge plane and travel plane..."); - // This should precisely mirror what is in DualCrossingIterator, but without the dual crossings. - // Some edges are going to be given to us even when there's no real intersection, so do that as a sanity check, first. - final GeoPoint[] planeCrossings = plane.findIntersections(planetModel, edge.plane, bound1, bound2, edge.startPlane, edge.endPlane); + // System.out.println(" Finding intersections between edge plane and travel plane..."); + + // This should precisely mirror what is in DualCrossingIterator, but without the dual + // crossings. + // Some edges are going to be given to us even when there's no real intersection, so do that + // as a sanity check, first. + final GeoPoint[] planeCrossings = + plane.findIntersections( + planetModel, edge.plane, bound1, bound2, edge.startPlane, edge.endPlane); if (planeCrossings == null) { - //System.out.println(" Planes were identical"); + // System.out.println(" Planes were identical"); } else if (planeCrossings.length == 0) { - //System.out.println(" There are no intersection points within bounds."); + // System.out.println(" There are no intersection points within bounds."); /* // For debugging purposes, let's repeat the intersection check without bounds, and figure out which bound(s) rejected it final GeoPoint[] unboundedCrossings = plane.findIntersections(planetModel, edge.plane); @@ -1268,83 +1676,96 @@ class GeoComplexPolygon extends GeoBasePolygon { */ // Sometimes on the hairy edge an intersection will be missed. This check finds those. if (!plane.evaluateIsZero(edge.startPoint) && !plane.evaluateIsZero(edge.endPoint)) { - //System.out.println(" Endpoint(s) of edge are not on travel plane; distances: "+plane.evaluate(edge.startPoint)+" and "+plane.evaluate(edge.endPoint)); + // System.out.println(" Endpoint(s) of edge are not on travel plane; distances:" + // + plane.evaluate(edge.startPoint) + " and " + plane.evaluate(edge.endPoint)); // Edge doesn't actually intersect the travel plane. return true; } else { - //System.out.println(" Endpoint(s) of edge are on travel plane!"); + // System.out.println(" Endpoint(s) of edge are on travel plane!"); } } else { - //System.out.println(" There were intersection points!"); + // System.out.println(" There were intersection points!"); } - - //System.out.println(" Edge intersects travel plane"); - // Determine crossings of this edge against all inside/outside planes. There's no further need to look at the actual travel plane itself. - //System.out.println(" Getting above crossings..."); + // System.out.println(" Edge intersects travel plane"); + + // Determine crossings of this edge against all inside/outside planes. There's no further + // need to look at the actual travel plane itself. + // System.out.println(" Getting above crossings..."); final int aboveCrossings = countCrossings(edge, abovePlane, bound1, bound2); aboveCrossingCount += aboveCrossings; - //System.out.println(" Getting below crossings..."); + // System.out.println(" Getting below crossings..."); final int belowCrossings = countCrossings(edge, belowPlane, bound1, bound2); belowCrossingCount += belowCrossings; - //System.out.println(" Above crossings = "+aboveCrossings+"; below crossings = "+belowCrossings); + // System.out.println(" Above crossings = "+aboveCrossings+"; below crossings = " + // + belowCrossings); return true; } - /** Find the intersections with an envelope plane, and assess those intersections for - * whether they truly describe crossings. - */ - private int countCrossings(final Edge edge, - final Plane envelopePlane, final Membership envelopeBound1, final Membership envelopeBound2) { - final GeoPoint[] intersections = edge.plane.findIntersections(planetModel, envelopePlane, envelopeBound1, envelopeBound2); + /** + * Find the intersections with an envelope plane, and assess those intersections for whether + * they truly describe crossings. + */ + private int countCrossings( + final Edge edge, + final Plane envelopePlane, + final Membership envelopeBound1, + final Membership envelopeBound2) { + final GeoPoint[] intersections = + edge.plane.findIntersections(planetModel, envelopePlane, envelopeBound1, envelopeBound2); int crossings = 0; if (intersections != null) { for (final GeoPoint intersection : intersections) { - if (edge.startPlane.strictlyWithin(intersection) && edge.endPlane.strictlyWithin(intersection)) { - //System.out.println(" Envelope intersection point = "+intersection); + if (edge.startPlane.strictlyWithin(intersection) + && edge.endPlane.strictlyWithin(intersection)) { + // System.out.println(" Envelope intersection point = "+intersection); // It's unique, so assess it - final int counter = edgeCrossesEnvelope(edge.plane, intersection, envelopePlane)?1:0; - //System.out.println(" Edge crosses envelope "+counter+" times"); + final int counter = + edgeCrossesEnvelope(edge.plane, intersection, envelopePlane) ? 1 : 0; + // System.out.println(" Edge crosses envelope "+counter+" times"); crossings += counter; } } } else { - //System.out.println(" Intersections = null"); + // System.out.println(" Intersections = null"); } return crossings; } - private boolean edgeCrossesEnvelope(final Plane edgePlane, final GeoPoint intersectionPoint, final Plane envelopePlane) { - final GeoPoint[] adjoiningPoints = findAdjoiningPoints(edgePlane, intersectionPoint, envelopePlane); + private boolean edgeCrossesEnvelope( + final Plane edgePlane, final GeoPoint intersectionPoint, final Plane envelopePlane) { + final GeoPoint[] adjoiningPoints = + findAdjoiningPoints(edgePlane, intersectionPoint, envelopePlane); if (adjoiningPoints == null) { - //System.out.println(" No adjoining points"); + // System.out.println(" No adjoining points"); return true; } int withinCount = 0; for (final GeoPoint adjoining : adjoiningPoints) { - //System.out.println(" Adjoining point "+adjoining); - if (plane.evaluateIsZero(adjoining) && bound1.isWithin(adjoining) && bound2.isWithin(adjoining)) { - //System.out.println(" within!!"); + // System.out.println(" Adjoining point " + adjoining); + if (plane.evaluateIsZero(adjoining) + && bound1.isWithin(adjoining) + && bound2.isWithin(adjoining)) { + // System.out.println(" within!!"); withinCount++; } else { - //System.out.println(" evaluateIsZero? "+plane.evaluateIsZero(adjoining)+" bound1.isWithin? "+bound1.isWithin(adjoining)+" bound2.isWithin? "+bound2.isWithin(adjoining)); + // System.out.println(" evaluateIsZero? " + plane.evaluateIsZero(adjoining) + // + " bound1.isWithin? " + bound1.isWithin(adjoining) + // + " bound2.isWithin? " + bound2.isWithin(adjoining)); } } return (withinCount & 1) != 0; } - } - - /** Count the number of verifiable edge crossings for a dual-leg journey. - */ + /** Count the number of verifiable edge crossings for a dual-leg journey. */ private class DualCrossingEdgeIterator implements CountingEdgeIterator { - - // This is a hash of which edges we've already looked at and tallied, so we don't repeat ourselves. - // It is lazily initialized since most transitions cross no edges at all. + + // This is a hash of which edges we've already looked at and tallied, so we don't repeat + // ourselves. It is lazily initialized since most transitions cross no edges at all. private Set seenEdges = null; - + private final GeoPoint testPoint; private final Plane testPointPlane; private final Plane testPointAbovePlane; @@ -1355,16 +1776,16 @@ class GeoComplexPolygon extends GeoBasePolygon { private final double thePointX; private final double thePointY; private final double thePointZ; - + private final GeoPoint intersectionPoint; - + private final SidedPlane testPointCutoffPlane; private final SidedPlane checkPointCutoffPlane; private final SidedPlane testPointOtherCutoffPlane; private final SidedPlane checkPointOtherCutoffPlane; // These are computed on an as-needed basis - + private boolean computedInsideOutside = false; private Plane testPointInsidePlane; private Plane testPointOutsidePlane; @@ -1374,16 +1795,24 @@ class GeoComplexPolygon extends GeoBasePolygon { private SidedPlane insideTravelCutoffPlane; private SidedPlane outsideTestPointCutoffPlane; private SidedPlane outsideTravelCutoffPlane; - + // The counters private boolean onEdge = false; private int innerCrossingCount = 0; private int outerCrossingCount = 0; - public DualCrossingEdgeIterator(final GeoPoint testPoint, - final Plane testPointPlane, final Plane testPointAbovePlane, final Plane testPointBelowPlane, - final Plane travelPlane, final Plane travelAbovePlane, final Plane travelBelowPlane, - final double thePointX, final double thePointY, final double thePointZ, final GeoPoint intersectionPoint) { + public DualCrossingEdgeIterator( + final GeoPoint testPoint, + final Plane testPointPlane, + final Plane testPointAbovePlane, + final Plane testPointBelowPlane, + final Plane travelPlane, + final Plane travelAbovePlane, + final Plane travelBelowPlane, + final double thePointX, + final double thePointY, + final double thePointZ, + final GeoPoint intersectionPoint) { this.testPoint = testPoint; this.testPointPlane = testPointPlane; this.testPointAbovePlane = testPointAbovePlane; @@ -1395,19 +1824,26 @@ class GeoComplexPolygon extends GeoBasePolygon { this.thePointY = thePointY; this.thePointZ = thePointZ; this.intersectionPoint = intersectionPoint; - - //System.out.println("Intersection point = "+intersectionPoint); - //System.out.println("TestPoint plane: "+testPoint+" -> "+intersectionPoint); - //System.out.println("Travel plane: ["+thePointX+","+thePointY+","+thePointZ+"] -> "+intersectionPoint); - - assert travelPlane.evaluateIsZero(intersectionPoint) : "intersection point must be on travel plane"; - assert testPointPlane.evaluateIsZero(intersectionPoint) : "intersection point must be on test point plane"; - - //System.out.println("Test point distance to intersection point: "+intersectionPoint.linearDistance(testPoint)); - //System.out.println("Check point distance to intersection point: "+intersectionPoint.linearDistance(thePointX, thePointY, thePointZ)); - assert !testPoint.isNumericallyIdentical(intersectionPoint) : "test point is the same as intersection point"; - assert !intersectionPoint.isNumericallyIdentical(thePointX, thePointY, thePointZ) : "check point is same as intersection point"; + // System.out.println("Intersection point = " + intersectionPoint); + // System.out.println("TestPoint plane: " + testPoint + " -> " + intersectionPoint); + // System.out.println("Travel plane: [" + thePointX + "," + thePointY + "," + thePointZ + // + "] -> " + intersectionPoint); + + assert travelPlane.evaluateIsZero(intersectionPoint) + : "intersection point must be on travel plane"; + assert testPointPlane.evaluateIsZero(intersectionPoint) + : "intersection point must be on test point plane"; + + // System.out.println("Test point distance to intersection point: " + // + intersectionPoint.linearDistance(testPoint)); + // System.out.println("Check point distance to intersection point: " + // + intersectionPoint.linearDistance(thePointX, thePointY, thePointZ)); + + assert !testPoint.isNumericallyIdentical(intersectionPoint) + : "test point is the same as intersection point"; + assert !intersectionPoint.isNumericallyIdentical(thePointX, thePointY, thePointZ) + : "check point is same as intersection point"; /* final SidedPlane bound1Plane = new SidedPlane(thePointX, thePointY, thePointZ, plane, testPoint); @@ -1416,58 +1852,92 @@ class GeoComplexPolygon extends GeoBasePolygon { throw new IllegalArgumentException("Sector iterator unreliable when bounds planes are numerically identical"); } */ - - final SidedPlane testPointBound1 = new SidedPlane(intersectionPoint, testPointPlane, testPoint); - final SidedPlane testPointBound2 = new SidedPlane(testPoint, testPointPlane, intersectionPoint); + + final SidedPlane testPointBound1 = + new SidedPlane(intersectionPoint, testPointPlane, testPoint); + final SidedPlane testPointBound2 = + new SidedPlane(testPoint, testPointPlane, intersectionPoint); if (testPointBound1.isFunctionallyIdentical(testPointBound2)) { - throw new IllegalArgumentException("Dual iterator unreliable when bounds planes are functionally identical"); + throw new IllegalArgumentException( + "Dual iterator unreliable when bounds planes are functionally identical"); } this.testPointCutoffPlane = testPointBound1; this.testPointOtherCutoffPlane = testPointBound2; - final SidedPlane checkPointBound1 = new SidedPlane(intersectionPoint, travelPlane, thePointX, thePointY, thePointZ); - final SidedPlane checkPointBound2 = new SidedPlane(thePointX, thePointY, thePointZ, travelPlane, intersectionPoint); + final SidedPlane checkPointBound1 = + new SidedPlane(intersectionPoint, travelPlane, thePointX, thePointY, thePointZ); + final SidedPlane checkPointBound2 = + new SidedPlane(thePointX, thePointY, thePointZ, travelPlane, intersectionPoint); if (checkPointBound1.isFunctionallyIdentical(checkPointBound2)) { - throw new IllegalArgumentException("Dual iterator unreliable when bounds planes are functionally identical"); + throw new IllegalArgumentException( + "Dual iterator unreliable when bounds planes are functionally identical"); } this.checkPointCutoffPlane = checkPointBound1; this.checkPointOtherCutoffPlane = checkPointBound2; // Sanity check - assert testPointCutoffPlane.isWithin(intersectionPoint) : "intersection must be within testPointCutoffPlane"; - assert testPointOtherCutoffPlane.isWithin(intersectionPoint) : "intersection must be within testPointOtherCutoffPlane"; - assert checkPointCutoffPlane.isWithin(intersectionPoint) : "intersection must be within checkPointCutoffPlane"; - assert checkPointOtherCutoffPlane.isWithin(intersectionPoint) : "intersection must be within checkPointOtherCutoffPlane"; - + assert testPointCutoffPlane.isWithin(intersectionPoint) + : "intersection must be within testPointCutoffPlane"; + assert testPointOtherCutoffPlane.isWithin(intersectionPoint) + : "intersection must be within testPointOtherCutoffPlane"; + assert checkPointCutoffPlane.isWithin(intersectionPoint) + : "intersection must be within checkPointCutoffPlane"; + assert checkPointOtherCutoffPlane.isWithin(intersectionPoint) + : "intersection must be within checkPointOtherCutoffPlane"; } - + protected void computeInsideOutside() { if (!computedInsideOutside) { // Convert travel plane to a sided plane final Membership intersectionBound1 = new SidedPlane(testPoint, travelPlane, travelPlane.D); // Convert testPoint plane to a sided plane - final Membership intersectionBound2 = new SidedPlane(thePointX, thePointY, thePointZ, testPointPlane, testPointPlane.D); + final Membership intersectionBound2 = + new SidedPlane(thePointX, thePointY, thePointZ, testPointPlane, testPointPlane.D); - assert intersectionBound1.isWithin(intersectionPoint) : "intersection must be within intersectionBound1"; - assert intersectionBound2.isWithin(intersectionPoint) : "intersection must be within intersectionBound2"; + assert intersectionBound1.isWithin(intersectionPoint) + : "intersection must be within intersectionBound1"; + assert intersectionBound2.isWithin(intersectionPoint) + : "intersection must be within intersectionBound2"; // Figure out which of the above/below planes are inside vs. outside. To do this, - // we look for the point that is within the bounds of the testPointPlane and travelPlane. The two sides that intersected there are the inside - // borders. - // Each of these can generate two solutions. We need to refine them to generate only one somehow -- the one in the same area of the world as intersectionPoint. - // Since the travel/testpoint planes have one fixed coordinate, and that is represented by the plane's D value, it should be possible to choose based on the - // point's coordinates. - final GeoPoint[] aboveAbove = travelAbovePlane.findIntersections(planetModel, testPointAbovePlane, intersectionBound1, intersectionBound2); + // we look for the point that is within the bounds of the testPointPlane and travelPlane. + // The two sides that intersected there are the inside borders. + // Each of these can generate two solutions. We need to refine them to generate only one + // somehow -- the one in the same area of the world as intersectionPoint. + // Since the travel/testpoint planes have one fixed coordinate, and that is represented by + // the plane's D value, it should be possible to choose based on the + // point's coordinates. + final GeoPoint[] aboveAbove = + travelAbovePlane.findIntersections( + planetModel, testPointAbovePlane, intersectionBound1, intersectionBound2); assert aboveAbove != null : "Above + above should not be coplanar"; - final GeoPoint[] aboveBelow = travelAbovePlane.findIntersections(planetModel, testPointBelowPlane, intersectionBound1, intersectionBound2); + final GeoPoint[] aboveBelow = + travelAbovePlane.findIntersections( + planetModel, testPointBelowPlane, intersectionBound1, intersectionBound2); assert aboveBelow != null : "Above + below should not be coplanar"; - final GeoPoint[] belowBelow = travelBelowPlane.findIntersections(planetModel, testPointBelowPlane, intersectionBound1, intersectionBound2); + final GeoPoint[] belowBelow = + travelBelowPlane.findIntersections( + planetModel, testPointBelowPlane, intersectionBound1, intersectionBound2); assert belowBelow != null : "Below + below should not be coplanar"; - final GeoPoint[] belowAbove = travelBelowPlane.findIntersections(planetModel, testPointAbovePlane, intersectionBound1, intersectionBound2); + final GeoPoint[] belowAbove = + travelBelowPlane.findIntersections( + planetModel, testPointAbovePlane, intersectionBound1, intersectionBound2); assert belowAbove != null : "Below + above should not be coplanar"; - assert ((aboveAbove.length > 0)?1:0) + ((aboveBelow.length > 0)?1:0) + ((belowBelow.length > 0)?1:0) + ((belowAbove.length > 0)?1:0) == 1 : "Can be exactly one inside point, instead was: aa="+aboveAbove.length+" xyScaling=" + aboveBelow.length+" bb="+ belowBelow.length+" ba=" + belowAbove.length; - + assert ((aboveAbove.length > 0) ? 1 : 0) + + ((aboveBelow.length > 0) ? 1 : 0) + + ((belowBelow.length > 0) ? 1 : 0) + + ((belowAbove.length > 0) ? 1 : 0) + == 1 + : "Can be exactly one inside point, instead was: aa=" + + aboveAbove.length + + " xyScaling=" + + aboveBelow.length + + " bb=" + + belowBelow.length + + " ba=" + + belowAbove.length; + final GeoPoint[] insideInsidePoints; if (aboveAbove.length > 0) { travelInsidePlane = travelAbovePlane; @@ -1494,38 +1964,57 @@ class GeoComplexPolygon extends GeoBasePolygon { testPointOutsidePlane = testPointBelowPlane; insideInsidePoints = belowAbove; } else { - throw new IllegalStateException("Can't find traversal intersection among: "+travelAbovePlane+", "+testPointAbovePlane+", "+travelBelowPlane+", "+testPointBelowPlane); + throw new IllegalStateException( + "Can't find traversal intersection among: " + + travelAbovePlane + + ", " + + testPointAbovePlane + + ", " + + travelBelowPlane + + ", " + + testPointBelowPlane); } - + // Get the inside-inside intersection point - // Picking which point, out of two, that corresponds to the already-selected intersectionPoint, is tricky, but it must be done. - // We expect the choice to be within a small delta of the intersection point in 2 of the dimensions, but not the third + // Picking which point, out of two, that corresponds to the already-selected + // intersectionPoint, is tricky, but it must be done. + // We expect the choice to be within a small delta of the intersection point in 2 of the + // dimensions, but not the third. final GeoPoint insideInsidePoint = pickProximate(insideInsidePoints); - + // Get the outside-outside intersection point - //System.out.println("Computing outside-outside intersection"); - final GeoPoint[] outsideOutsidePoints = testPointOutsidePlane.findIntersections(planetModel, travelOutsidePlane); //these don't add anything: , checkPointCutoffPlane, testPointCutoffPlane); + // System.out.println("Computing outside-outside intersection"); + final GeoPoint[] outsideOutsidePoints = + testPointOutsidePlane.findIntersections( + planetModel, + travelOutsidePlane); // these don't add anything: , checkPointCutoffPlane, + // testPointCutoffPlane); final GeoPoint outsideOutsidePoint = pickProximate(outsideOutsidePoints); - - insideTravelCutoffPlane = new SidedPlane(thePointX, thePointY, thePointZ, travelInsidePlane, insideInsidePoint); - outsideTravelCutoffPlane = new SidedPlane(thePointX, thePointY, thePointZ, travelInsidePlane, outsideOutsidePoint); - insideTestPointCutoffPlane = new SidedPlane(testPoint, testPointInsidePlane, insideInsidePoint); - outsideTestPointCutoffPlane = new SidedPlane(testPoint, testPointOutsidePlane, outsideOutsidePoint); - + + insideTravelCutoffPlane = + new SidedPlane(thePointX, thePointY, thePointZ, travelInsidePlane, insideInsidePoint); + outsideTravelCutoffPlane = + new SidedPlane(thePointX, thePointY, thePointZ, travelInsidePlane, outsideOutsidePoint); + insideTestPointCutoffPlane = + new SidedPlane(testPoint, testPointInsidePlane, insideInsidePoint); + outsideTestPointCutoffPlane = + new SidedPlane(testPoint, testPointOutsidePlane, outsideOutsidePoint); + /* System.out.println("insideTravelCutoffPlane = "+insideTravelCutoffPlane); System.out.println("outsideTravelCutoffPlane = "+outsideTravelCutoffPlane); System.out.println("insideTestPointCutoffPlane = "+insideTestPointCutoffPlane); System.out.println("outsideTestPointCutoffPlane = "+outsideTestPointCutoffPlane); */ - + computedInsideOutside = true; } } private GeoPoint pickProximate(final GeoPoint[] points) { if (points.length == 0) { - throw new IllegalArgumentException("No off-plane intersection points were found; can't compute traversal"); + throw new IllegalArgumentException( + "No off-plane intersection points were found; can't compute traversal"); } else if (points.length == 1) { return points[0]; } else { @@ -1536,38 +2025,50 @@ class GeoComplexPolygon extends GeoBasePolygon { } else if (p2dist < p1dist) { return points[1]; } else { - throw new IllegalArgumentException("Neither off-plane intersection point matched intersection point; intersection = "+intersectionPoint+"; offplane choice 0: "+points[0]+"; offplane choice 1: "+points[1]); + throw new IllegalArgumentException( + "Neither off-plane intersection point matched intersection point; intersection = " + + intersectionPoint + + "; offplane choice 0: " + + points[0] + + "; offplane choice 1: " + + points[1]); } } } - + @Override public int getCrossingCount() { // Doesn't return the actual crossing count -- just gets the even/odd part right return Math.min(innerCrossingCount, outerCrossingCount); } - + @Override public boolean isOnEdge() { return onEdge; } - + @Override public boolean matches(final Edge edge) { - // Early exit if the point is on the edge, in which case we accidentally discovered the answer. + // Early exit if the point is on the edge, in which case we accidentally discovered the + // answer. if (edge.isWithin(thePointX, thePointY, thePointZ)) { onEdge = true; return false; } - - // All edges that touch the travel planes get assessed the same. So, for each intersecting edge on both legs: - // (1) If the edge contains the intersection point, we analyze it on only one leg. For the other leg, we do nothing. + + // All edges that touch the travel planes get assessed the same. So, for each intersecting + // edge on both legs: + // (1) If the edge contains the intersection point, we analyze it on only one leg. For the + // other leg, we do nothing. // (2) We compute the crossings of the edge with ALL FOUR inner and outer bounding planes. - // (3) We add the numbers of each kind of crossing to the total for that class of crossing (innerTotal and outerTotal). - // (4) When done all edges tallied in this way, we take min(innerTotal, outerTotal) and assume that is the number of crossings. + // (3) We add the numbers of each kind of crossing to the total for that class of crossing + // (innerTotal and outerTotal). + // (4) When done all edges tallied in this way, we take min(innerTotal, outerTotal) and assume + // that is the number of crossings. // // Q: What if we see the same edge in both traversals? - // A: We should really evaluate it only in one. Keep a hash of the edges we've looked at already and don't process edges twice. + // A: We should really evaluate it only in one. Keep a hash of the edges we've looked at + // already and don't process edges twice. // Every edge should be looked at only once. if (seenEdges != null && seenEdges.contains(edge)) { @@ -1577,7 +2078,7 @@ class GeoComplexPolygon extends GeoBasePolygon { seenEdges = new HashSet<>(); } seenEdges.add(edge); - + // We've never seen this edge before. Evaluate it in the context of inner and outer planes. computeInsideOutside(); @@ -1599,34 +2100,56 @@ class GeoComplexPolygon extends GeoBasePolygon { } } */ - - //System.out.println(""); - //System.out.println("Considering edge "+(edge.startPoint)+" -> "+(edge.endPoint)); - // Some edges are going to be given to us even when there's no real intersection, so do that as a sanity check, first. - final GeoPoint[] travelCrossings = travelPlane.findIntersections(planetModel, edge.plane, checkPointCutoffPlane, checkPointOtherCutoffPlane, edge.startPlane, edge.endPlane); + // System.out.println(""); + // System.out.println("Considering edge " + (edge.startPoint) + " -> " + (edge.endPoint)); + + // Some edges are going to be given to us even when there's no real intersection, so do that + // as a sanity check, first. + final GeoPoint[] travelCrossings = + travelPlane.findIntersections( + planetModel, + edge.plane, + checkPointCutoffPlane, + checkPointOtherCutoffPlane, + edge.startPlane, + edge.endPlane); if (travelCrossings != null && travelCrossings.length == 0) { - //System.out.println(" No intersections with travel plane..."); - final GeoPoint[] testPointCrossings = testPointPlane.findIntersections(planetModel, edge.plane, testPointCutoffPlane, testPointOtherCutoffPlane, edge.startPlane, edge.endPlane); + // System.out.println(" No intersections with travel plane..."); + final GeoPoint[] testPointCrossings = + testPointPlane.findIntersections( + planetModel, + edge.plane, + testPointCutoffPlane, + testPointOtherCutoffPlane, + edge.startPlane, + edge.endPlane); if (testPointCrossings != null && testPointCrossings.length == 0) { - // As a last resort, see if the edge endpoints are on either plane. This is sometimes necessary because the - // intersection computation logic might not detect near-miss edges otherwise. - //System.out.println(" No intersections with testpoint plane..."); - if (!travelPlane.evaluateIsZero(edge.startPoint) && !travelPlane.evaluateIsZero(edge.endPoint) && - !testPointPlane.evaluateIsZero(edge.startPoint) && !testPointPlane.evaluateIsZero(edge.endPoint)) { + // As a last resort, see if the edge endpoints are on either plane. This is sometimes + // necessary because the intersection computation logic might not detect near-miss + // edges otherwise. + // System.out.println(" No intersections with testpoint plane..."); + if (!travelPlane.evaluateIsZero(edge.startPoint) + && !travelPlane.evaluateIsZero(edge.endPoint) + && !testPointPlane.evaluateIsZero(edge.startPoint) + && !testPointPlane.evaluateIsZero(edge.endPoint)) { return true; } else { - //System.out.println(" Startpoint/travelPlane="+travelPlane.evaluate(edge.startPoint)+" Startpoint/testPointPlane="+testPointPlane.evaluate(edge.startPoint)); - //System.out.println(" Endpoint/travelPlane="+travelPlane.evaluate(edge.endPoint)+" Endpoint/testPointPlane="+testPointPlane.evaluate(edge.endPoint)); + // System.out.println(" Startpoint/travelPlane=" + // + travelPlane.evaluate(edge.startPoint)" + // + " Startpoint/testPointPlane=" + testPointPlane.evaluate(edge.startPoint)); + // System.out.println(" Endpoint/travelPlane=" + // + travelPlane.evaluate(edge.endPoint) + // + " Endpoint/testPointPlane=" + testPointPlane.evaluate(edge.endPoint)); } } else { - //System.out.println(" Intersection found with testPoint plane..."); + // System.out.println(" Intersection found with testPoint plane..."); } } else { - //System.out.println(" Intersection found with travel plane..."); + // System.out.println(" Intersection found with travel plane..."); } - //System.out.println(" Edge intersects travel or testPoint plane"); + // System.out.println(" Edge intersects travel or testPoint plane"); /* System.out.println( " start point travel dist="+travelPlane.evaluate(edge.startPoint)+"; end point travel dist="+travelPlane.evaluate(edge.endPoint)); @@ -1641,40 +2164,74 @@ class GeoComplexPolygon extends GeoBasePolygon { System.out.println( " start point testpoint below dist="+testPointBelowPlane.evaluate(edge.startPoint)+"; end point testpoint below dist="+testPointBelowPlane.evaluate(edge.endPoint)); */ - - // Determine crossings of this edge against all inside/outside planes. There's no further need to look at the actual travel plane itself. - //System.out.println(" Assessing inner crossings..."); - innerCrossingCount += countCrossings(edge, travelInsidePlane, checkPointCutoffPlane, insideTravelCutoffPlane, testPointInsidePlane, testPointCutoffPlane, insideTestPointCutoffPlane); - //System.out.println(" Assessing outer crossings..."); - outerCrossingCount += countCrossings(edge, travelOutsidePlane, checkPointCutoffPlane, outsideTravelCutoffPlane, testPointOutsidePlane, testPointCutoffPlane, outsideTestPointCutoffPlane); + + // Determine crossings of this edge against all inside/outside planes. There's no further + // need to look at the actual travel plane itself. + // System.out.println(" Assessing inner crossings..."); + innerCrossingCount += + countCrossings( + edge, + travelInsidePlane, + checkPointCutoffPlane, + insideTravelCutoffPlane, + testPointInsidePlane, + testPointCutoffPlane, + insideTestPointCutoffPlane); + // System.out.println(" Assessing outer crossings..."); + outerCrossingCount += + countCrossings( + edge, + travelOutsidePlane, + checkPointCutoffPlane, + outsideTravelCutoffPlane, + testPointOutsidePlane, + testPointCutoffPlane, + outsideTestPointCutoffPlane); /* final GeoPoint[] travelInnerCrossings = computeCrossings(travelInsidePlane, edge, checkPointCutoffPlane, insideTravelCutoffPlane); final GeoPoint[] travelOuterCrossings = computeCrossings(travelOutsidePlane, edge, checkPointCutoffPlane, outsideTravelCutoffPlane); final GeoPoint[] testPointInnerCrossings = computeCrossings(testPointInsidePlane, edge, testPointCutoffPlane, insideTestPointCutoffPlane); final GeoPoint[] testPointOuterCrossings = computeCrossings(testPointOutsidePlane, edge, testPointCutoffPlane, outsideTestPointCutoffPlane); */ - + return true; } - /** Find the intersections with a pair of envelope planes, and assess those intersections for duplication and for - * whether they truly describe crossings. - */ - private int countCrossings(final Edge edge, - final Plane travelEnvelopePlane, final Membership travelEnvelopeBound1, final Membership travelEnvelopeBound2, - final Plane testPointEnvelopePlane, final Membership testPointEnvelopeBound1, final Membership testPointEnvelopeBound2) { - final GeoPoint[] travelIntersections = edge.plane.findIntersections(planetModel, travelEnvelopePlane, travelEnvelopeBound1, travelEnvelopeBound2); - final GeoPoint[] testPointIntersections = edge.plane.findIntersections(planetModel, testPointEnvelopePlane, testPointEnvelopeBound1, testPointEnvelopeBound2); + /** + * Find the intersections with a pair of envelope planes, and assess those intersections for + * duplication and for whether they truly describe crossings. + */ + private int countCrossings( + final Edge edge, + final Plane travelEnvelopePlane, + final Membership travelEnvelopeBound1, + final Membership travelEnvelopeBound2, + final Plane testPointEnvelopePlane, + final Membership testPointEnvelopeBound1, + final Membership testPointEnvelopeBound2) { + final GeoPoint[] travelIntersections = + edge.plane.findIntersections( + planetModel, travelEnvelopePlane, travelEnvelopeBound1, travelEnvelopeBound2); + final GeoPoint[] testPointIntersections = + edge.plane.findIntersections( + planetModel, + testPointEnvelopePlane, + testPointEnvelopeBound1, + testPointEnvelopeBound2); int crossings = 0; if (travelIntersections != null) { for (final GeoPoint intersection : travelIntersections) { - if (edge.startPlane.strictlyWithin(intersection) && edge.endPlane.strictlyWithin(intersection)) { + if (edge.startPlane.strictlyWithin(intersection) + && edge.endPlane.strictlyWithin(intersection)) { // Make sure it's not a dup boolean notDup = true; if (testPointIntersections != null) { for (final GeoPoint otherIntersection : testPointIntersections) { - if (edge.startPlane.strictlyWithin(otherIntersection) && edge.endPlane.strictlyWithin(otherIntersection) && intersection.isNumericallyIdentical(otherIntersection)) { - //System.out.println(" Points "+intersection+" and "+otherIntersection+" are duplicates"); + if (edge.startPlane.strictlyWithin(otherIntersection) + && edge.endPlane.strictlyWithin(otherIntersection) + && intersection.isNumericallyIdentical(otherIntersection)) { + // System.out.println(" Points " + intersection + " and " + // + otherIntersection + " are duplicates"); notDup = false; break; } @@ -1684,99 +2241,135 @@ class GeoComplexPolygon extends GeoBasePolygon { continue; } // It's unique, so assess it - //System.out.println(" Assessing travel envelope intersection point "+intersection+", travelPlane distance="+travelPlane.evaluate(intersection)+"..."); - crossings += edgeCrossesEnvelope(edge.plane, intersection, travelEnvelopePlane)?1:0; + // System.out.println(" Assessing travel envelope intersection point " + // + intersection + ", travelPlane distance=" + // + travelPlane.evaluate(intersection) + "..."); + crossings += edgeCrossesEnvelope(edge.plane, intersection, travelEnvelopePlane) ? 1 : 0; } } } if (testPointIntersections != null) { for (final GeoPoint intersection : testPointIntersections) { - if (edge.startPlane.strictlyWithin(intersection) && edge.endPlane.strictlyWithin(intersection)) { + if (edge.startPlane.strictlyWithin(intersection) + && edge.endPlane.strictlyWithin(intersection)) { // It's unique, so assess it - //System.out.println(" Assessing testpoint envelope intersection point "+intersection+", testPointPlane distance="+testPointPlane.evaluate(intersection)+"..."); - crossings += edgeCrossesEnvelope(edge.plane, intersection, testPointEnvelopePlane)?1:0; + // System.out.println(" Assessing testpoint envelope intersection point " + // + intersection + ", testPointPlane distance=" + // + testPointPlane.evaluate(intersection) + "..."); + crossings += + edgeCrossesEnvelope(edge.plane, intersection, testPointEnvelopePlane) ? 1 : 0; } } } return crossings; } - /** Return true if the edge crosses the envelope plane, given the envelope intersection point. - */ - private boolean edgeCrossesEnvelope(final Plane edgePlane, final GeoPoint intersectionPoint, final Plane envelopePlane) { - final GeoPoint[] adjoiningPoints = findAdjoiningPoints(edgePlane, intersectionPoint, envelopePlane); + /** + * Return true if the edge crosses the envelope plane, given the envelope intersection point. + */ + private boolean edgeCrossesEnvelope( + final Plane edgePlane, final GeoPoint intersectionPoint, final Plane envelopePlane) { + final GeoPoint[] adjoiningPoints = + findAdjoiningPoints(edgePlane, intersectionPoint, envelopePlane); if (adjoiningPoints == null) { // Couldn't find good adjoining points, so just assume there is a crossing. return true; } int withinCount = 0; for (final GeoPoint adjoining : adjoiningPoints) { - if ((travelPlane.evaluateIsZero(adjoining) && checkPointCutoffPlane.isWithin(adjoining) && checkPointOtherCutoffPlane.isWithin(adjoining)) || - (testPointPlane.evaluateIsZero(adjoining) && testPointCutoffPlane.isWithin(adjoining) && testPointOtherCutoffPlane.isWithin(adjoining))) { - //System.out.println(" Adjoining point "+adjoining+" (intersection dist = "+intersectionPoint.linearDistance(adjoining)+") is within"); + if ((travelPlane.evaluateIsZero(adjoining) + && checkPointCutoffPlane.isWithin(adjoining) + && checkPointOtherCutoffPlane.isWithin(adjoining)) + || (testPointPlane.evaluateIsZero(adjoining) + && testPointCutoffPlane.isWithin(adjoining) + && testPointOtherCutoffPlane.isWithin(adjoining))) { + // System.out.println(" Adjoining point " + adjoining + // + " (intersection dist = " + intersectionPoint.linearDistance(adjoining) + // + ") is within"); withinCount++; } else { - //System.out.println(" Adjoining point "+adjoining+" (intersection dist = "+intersectionPoint.linearDistance(adjoining)+"; travelPlane dist="+travelPlane.evaluate(adjoining)+"; testPointPlane dist="+testPointPlane.evaluate(adjoining)+") is not within"); + // System.out.println(" Adjoining point " + adjoining + // + " (intersection dist = " + intersectionPoint.linearDistance(adjoining) + // + "; travelPlane dist=" + travelPlane.evaluate(adjoining) + // + "; testPointPlane dist=" + testPointPlane.evaluate(adjoining) + // + ") is not within"); } } return (withinCount & 1) != 0; } - } - - /** This is the amount we go, roughly, in both directions, to find adjoining points to test. If we go too far, - * we might miss a transition, but if we go too little, we might not see it either due to numerical issues. - */ - private final static double DELTA_DISTANCE = Vector.MINIMUM_RESOLUTION; - /** This is the maximum number of iterations. If we get this high, effectively the planes are parallel, and we - * treat that as a crossing. - */ - private final static int MAX_ITERATIONS = 100; - /** This is the amount off of the envelope plane that we count as "enough" for a valid crossing assessment. */ - private final static double OFF_PLANE_AMOUNT = Vector.MINIMUM_RESOLUTION * 0.1; - - /** Given a point on the plane and the ellipsoid, this method looks for a pair of adjoining points on either side of the plane, which are - * about MINIMUM_RESOLUTION away from the given point. This only works for planes which go through the center of the world. - * Returns null if the planes are effectively parallel and reasonable adjoining points cannot be determined. + /** + * This is the amount we go, roughly, in both directions, to find adjoining points to test. If we + * go too far, we might miss a transition, but if we go too little, we might not see it either due + * to numerical issues. */ - private GeoPoint[] findAdjoiningPoints(final Plane plane, final GeoPoint pointOnPlane, final Plane envelopePlane) { + private static final double DELTA_DISTANCE = Vector.MINIMUM_RESOLUTION; + /** + * This is the maximum number of iterations. If we get this high, effectively the planes are + * parallel, and we treat that as a crossing. + */ + private static final int MAX_ITERATIONS = 100; + /** + * This is the amount off of the envelope plane that we count as "enough" for a valid crossing + * assessment. + */ + private static final double OFF_PLANE_AMOUNT = Vector.MINIMUM_RESOLUTION * 0.1; + + /** + * Given a point on the plane and the ellipsoid, this method looks for a pair of adjoining points + * on either side of the plane, which are about MINIMUM_RESOLUTION away from the given point. This + * only works for planes which go through the center of the world. Returns null if the planes are + * effectively parallel and reasonable adjoining points cannot be determined. + */ + private GeoPoint[] findAdjoiningPoints( + final Plane plane, final GeoPoint pointOnPlane, final Plane envelopePlane) { // Compute a normalized perpendicular vector final Vector perpendicular = new Vector(plane, pointOnPlane); double distanceFactor = 0.0; for (int i = 0; i < MAX_ITERATIONS; i++) { distanceFactor += DELTA_DISTANCE; // Compute two new points along this vector from the original - final GeoPoint pointA = planetModel.createSurfacePoint(pointOnPlane.x + perpendicular.x * distanceFactor, - pointOnPlane.y + perpendicular.y * distanceFactor, - pointOnPlane.z + perpendicular.z * distanceFactor); - final GeoPoint pointB = planetModel.createSurfacePoint(pointOnPlane.x - perpendicular.x * distanceFactor, - pointOnPlane.y - perpendicular.y * distanceFactor, - pointOnPlane.z - perpendicular.z * distanceFactor); - if (Math.abs(envelopePlane.evaluate(pointA)) > OFF_PLANE_AMOUNT && Math.abs(envelopePlane.evaluate(pointB)) > OFF_PLANE_AMOUNT) { - //System.out.println("Distance: "+computeSquaredDistance(rval[0], pointOnPlane)+" and "+computeSquaredDistance(rval[1], pointOnPlane)); - return new GeoPoint[]{pointA, pointB}; + final GeoPoint pointA = + planetModel.createSurfacePoint( + pointOnPlane.x + perpendicular.x * distanceFactor, + pointOnPlane.y + perpendicular.y * distanceFactor, + pointOnPlane.z + perpendicular.z * distanceFactor); + final GeoPoint pointB = + planetModel.createSurfacePoint( + pointOnPlane.x - perpendicular.x * distanceFactor, + pointOnPlane.y - perpendicular.y * distanceFactor, + pointOnPlane.z - perpendicular.z * distanceFactor); + if (Math.abs(envelopePlane.evaluate(pointA)) > OFF_PLANE_AMOUNT + && Math.abs(envelopePlane.evaluate(pointB)) > OFF_PLANE_AMOUNT) { + // System.out.println("Distance: " + // + computeSquaredDistance(rval[0], pointOnPlane) + // + " and " + computeSquaredDistance(rval[1], pointOnPlane)); + return new GeoPoint[] {pointA, pointB}; } // Loop back around and use a bigger delta } // Had to abort, so return null. - //System.out.println(" Adjoining points not found. Are planes parallel? edge = "+plane+"; envelope = "+envelopePlane+"; perpendicular = "+perpendicular); + // System.out.println(" Adjoining points not found. Are planes parallel?" + // + " edge = " + plane + " envelope = " + envelopePlane + // + "; perpendicular = " + perpendicular); return null; } - private static double computeSquaredDistance(final GeoPoint checkPoint, final GeoPoint intersectionPoint) { + private static double computeSquaredDistance( + final GeoPoint checkPoint, final GeoPoint intersectionPoint) { final double distanceX = checkPoint.x - intersectionPoint.x; final double distanceY = checkPoint.y - intersectionPoint.y; final double distanceZ = checkPoint.z - intersectionPoint.z; return distanceX * distanceX + distanceY * distanceY + distanceZ * distanceZ; } - + @Override public boolean equals(Object o) { - if (!(o instanceof GeoComplexPolygon)) - return false; + if (!(o instanceof GeoComplexPolygon)) return false; final GeoComplexPolygon other = (GeoComplexPolygon) o; - return super.equals(other) && testPoint1InSet == other.testPoint1InSet + return super.equals(other) + && testPoint1InSet == other.testPoint1InSet && testPoint1.equals(other.testPoint1) && pointsList.equals(other.pointsList); } @@ -1796,9 +2389,21 @@ class GeoComplexPolygon extends GeoBasePolygon { for (final Edge shapeStartEdge : shapeStartEdges) { fillInEdgeDescription(edgeDescription, shapeStartEdge); } - return "GeoComplexPolygon: {planetmodel=" + planetModel + ", number of shapes="+shapeStartEdges.length+", address="+ Integer.toHexString(hashCode())+", testPoint="+testPoint1+", testPointInSet="+testPoint1InSet+", shapes={"+edgeDescription+"}}"; + return "GeoComplexPolygon: {planetmodel=" + + planetModel + + ", number of shapes=" + + shapeStartEdges.length + + ", address=" + + Integer.toHexString(hashCode()) + + ", testPoint=" + + testPoint1 + + ", testPointInSet=" + + testPoint1InSet + + ", shapes={" + + edgeDescription + + "}}"; } - + private static void fillInEdgeDescription(final StringBuilder description, final Edge startEdge) { description.append(" {"); Edge currentEdge = startEdge; @@ -1819,6 +2424,4 @@ class GeoComplexPolygon extends GeoBasePolygon { edgeCounter++; } } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositeAreaShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositeAreaShape.java index 118144d5c4d..0110b1a26a7 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositeAreaShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositeAreaShape.java @@ -17,8 +17,8 @@ package org.apache.lucene.spatial3d.geom; -import java.io.InputStream; import java.io.IOException; +import java.io.InputStream; /** * GeoCompositeAreaShape is a set of GeoAreaShape's, treated as a unit. @@ -27,26 +27,27 @@ import java.io.IOException; */ public class GeoCompositeAreaShape extends GeoBaseCompositeAreaShape { - /** - * Constructor. - */ + /** Constructor. */ public GeoCompositeAreaShape(PlanetModel planetModel) { super(planetModel); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoCompositeAreaShape(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoCompositeAreaShape(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { super(planetModel, inputStream, GeoAreaShape.class); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoCompositeAreaShape)) + if (!(o instanceof GeoCompositeAreaShape)) { return false; + } return super.equals(o); } @@ -54,5 +55,4 @@ public class GeoCompositeAreaShape extends GeoBaseCompositeAreaShape implements GeoMembershipShape { +public class GeoCompositeMembershipShape extends GeoBaseCompositeMembershipShape + implements GeoMembershipShape { - /** - * Constructor. - */ + /** Constructor. */ public GeoCompositeMembershipShape(PlanetModel planetModel) { super(planetModel); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoCompositeMembershipShape(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoCompositeMembershipShape(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { super(planetModel, inputStream, GeoMembershipShape.class); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoCompositeMembershipShape)) + if (!(o instanceof GeoCompositeMembershipShape)) { return false; + } return super.equals(o); } @@ -54,4 +56,3 @@ public class GeoCompositeMembershipShape extends GeoBaseCompositeMembershipShape return "GeoCompositeMembershipShape: {" + shapes + '}'; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositePolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositePolygon.java index f9f3a95a391..8769778c607 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositePolygon.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositePolygon.java @@ -16,35 +16,38 @@ */ package org.apache.lucene.spatial3d.geom; -import java.io.InputStream; import java.io.IOException; +import java.io.InputStream; /** - * GeoCompositePolygon is a specific implementation of GeoCompositeAreaShape, which implements GeoPolygon explicitly. + * GeoCompositePolygon is a specific implementation of GeoCompositeAreaShape, which implements + * GeoPolygon explicitly. * * @lucene.experimental */ -public class GeoCompositePolygon extends GeoBaseCompositeAreaShape implements GeoPolygon { - /** - * Constructor. - */ +public class GeoCompositePolygon extends GeoBaseCompositeAreaShape + implements GeoPolygon { + /** Constructor. */ public GeoCompositePolygon(PlanetModel planetModel) { super(planetModel); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoCompositePolygon(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoCompositePolygon(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { super(planetModel, inputStream, GeoPolygon.class); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoCompositePolygon)) + if (!(o instanceof GeoCompositePolygon)) { return false; + } return super.equals(o); } @@ -53,4 +56,3 @@ public class GeoCompositePolygon extends GeoBaseCompositeAreaShape i return "GeoCompositePolygon: {" + shapes + '}'; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java index 692d47458b6..203d6bd0fa1 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConcavePolygon.java @@ -16,20 +16,19 @@ */ package org.apache.lucene.spatial3d.geom; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.List; -import java.util.HashMap; -import java.util.Map; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** - * GeoConcavePolygon objects are generic building blocks of more complex structures. - * The only restrictions on these objects are: (1) they must be concave; (2) they must have - * a maximum extent larger than PI. Violating either one of these limits will - * cause the logic to fail. + * GeoConcavePolygon objects are generic building blocks of more complex structures. The only + * restrictions on these objects are: (1) they must be concave; (2) they must have a maximum extent + * larger than PI. Violating either one of these limits will cause the logic to fail. * * @lucene.internal */ @@ -38,7 +37,7 @@ class GeoConcavePolygon extends GeoBasePolygon { protected final List points; /** A bitset describing, for each edge, whether it is internal or not */ protected final BitSet isInternalEdges; - /** The list of holes. If a point is in the hole, it is *not* in the polygon */ + /** The list of holes. If a point is in the hole, it is *not* in the polygon */ protected final List holes; /** A list of edges */ @@ -59,23 +58,25 @@ class GeoConcavePolygon extends GeoBasePolygon { protected Map nextBrotherMap = null; /** - * Create a concave polygon from a list of points. The first point must be on the - * external edge. - *@param planetModel is the planet model. - *@param pointList is the list of points to create the polygon from. + * Create a concave polygon from a list of points. The first point must be on the external edge. + * + * @param planetModel is the planet model. + * @param pointList is the list of points to create the polygon from. */ public GeoConcavePolygon(final PlanetModel planetModel, final List pointList) { this(planetModel, pointList, null); } - + /** - * Create a concave polygon from a list of points. The first point must be on the - * external edge. - *@param planetModel is the planet model. - *@param pointList is the list of points to create the polygon from. - *@param holes is the list of GeoPolygon objects that describe holes in the concave polygon. Null == no holes. + * Create a concave polygon from a list of points. The first point must be on the external edge. + * + * @param planetModel is the planet model. + * @param pointList is the list of points to create the polygon from. + * @param holes is the list of GeoPolygon objects that describe holes in the concave polygon. Null + * == no holes. */ - public GeoConcavePolygon(final PlanetModel planetModel, final List pointList, final List holes) { + public GeoConcavePolygon( + final PlanetModel planetModel, final List pointList, final List holes) { super(planetModel); this.points = pointList; if (holes != null && holes.size() == 0) { @@ -88,34 +89,39 @@ class GeoConcavePolygon extends GeoBasePolygon { } /** - * Create a concave polygon from a list of points, keeping track of which boundaries - * are internal. This is used when creating a polygon as a building block for another shape. - *@param planetModel is the planet model. - *@param pointList is the set of points to create the polygon from. - *@param internalEdgeFlags is a bitset describing whether each edge is internal or not. - *@param returnEdgeInternal is true when the final return edge is an internal one. + * Create a concave polygon from a list of points, keeping track of which boundaries are internal. + * This is used when creating a polygon as a building block for another shape. + * + * @param planetModel is the planet model. + * @param pointList is the set of points to create the polygon from. + * @param internalEdgeFlags is a bitset describing whether each edge is internal or not. + * @param returnEdgeInternal is true when the final return edge is an internal one. */ - public GeoConcavePolygon(final PlanetModel planetModel, - final List pointList, - final BitSet internalEdgeFlags, - final boolean returnEdgeInternal) { + public GeoConcavePolygon( + final PlanetModel planetModel, + final List pointList, + final BitSet internalEdgeFlags, + final boolean returnEdgeInternal) { this(planetModel, pointList, null, internalEdgeFlags, returnEdgeInternal); } /** - * Create a concave polygon from a list of points, keeping track of which boundaries - * are internal. This is used when creating a polygon as a building block for another shape. - *@param planetModel is the planet model. - *@param pointList is the set of points to create the polygon from. - *@param holes is the list of GeoPolygon objects that describe holes in the concave polygon. Null == no holes. - *@param internalEdgeFlags is a bitset describing whether each edge is internal or not. - *@param returnEdgeInternal is true when the final return edge is an internal one. + * Create a concave polygon from a list of points, keeping track of which boundaries are internal. + * This is used when creating a polygon as a building block for another shape. + * + * @param planetModel is the planet model. + * @param pointList is the set of points to create the polygon from. + * @param holes is the list of GeoPolygon objects that describe holes in the concave polygon. Null + * == no holes. + * @param internalEdgeFlags is a bitset describing whether each edge is internal or not. + * @param returnEdgeInternal is true when the final return edge is an internal one. */ - public GeoConcavePolygon(final PlanetModel planetModel, - final List pointList, - final List holes, - final BitSet internalEdgeFlags, - final boolean returnEdgeInternal) { + public GeoConcavePolygon( + final PlanetModel planetModel, + final List pointList, + final List holes, + final BitSet internalEdgeFlags, + final boolean returnEdgeInternal) { super(planetModel); this.points = pointList; if (holes != null && holes.size() == 0) { @@ -128,30 +134,33 @@ class GeoConcavePolygon extends GeoBasePolygon { } /** - * Create a concave polygon, with a starting latitude and longitude. - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param startLatitude is the latitude of the first point. - *@param startLongitude is the longitude of the first point. + * Create a concave polygon, with a starting latitude and longitude. Accepts only values in the + * following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} + * + * @param planetModel is the planet model. + * @param startLatitude is the latitude of the first point. + * @param startLongitude is the longitude of the first point. */ - public GeoConcavePolygon(final PlanetModel planetModel, - final double startLatitude, - final double startLongitude) { + public GeoConcavePolygon( + final PlanetModel planetModel, final double startLatitude, final double startLongitude) { this(planetModel, startLatitude, startLongitude, null); } - + /** - * Create a concave polygon, with a starting latitude and longitude. - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param startLatitude is the latitude of the first point. - *@param startLongitude is the longitude of the first point. - *@param holes is the list of GeoPolygon objects that describe holes in the concave polygon. Null == no holes. + * Create a concave polygon, with a starting latitude and longitude. Accepts only values in the + * following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} + * + * @param planetModel is the planet model. + * @param startLatitude is the latitude of the first point. + * @param startLongitude is the longitude of the first point. + * @param holes is the list of GeoPolygon objects that describe holes in the concave polygon. Null + * == no holes. */ - public GeoConcavePolygon(final PlanetModel planetModel, - final double startLatitude, - final double startLongitude, - final List holes) { + public GeoConcavePolygon( + final PlanetModel planetModel, + final double startLatitude, + final double startLongitude, + final List holes) { super(planetModel); points = new ArrayList<>(); if (holes != null && holes.size() == 0) { @@ -164,38 +173,45 @@ class GeoConcavePolygon extends GeoBasePolygon { } /** - * Add a point to the polygon. - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} + * Add a point to the polygon. Accepts only values in the following ranges: lat: {@code -PI/2 -> + * PI/2}, lon: {@code -PI -> PI} * - * @param latitude is the latitude of the next point. - * @param longitude is the longitude of the next point. - * @param isInternalEdge is true if the edge just added with this point should be considered "internal", and not - * intersected as part of the intersects() operation. + * @param latitude is the latitude of the next point. + * @param longitude is the longitude of the next point. + * @param isInternalEdge is true if the edge just added with this point should be considered + * "internal", and not intersected as part of the intersects() operation. */ - public void addPoint(final double latitude, final double longitude, final boolean isInternalEdge) { - if (isDone) + public void addPoint( + final double latitude, final double longitude, final boolean isInternalEdge) { + if (isDone) { throw new IllegalStateException("Can't call addPoint() if done() already called"); - if (isInternalEdge) + } + if (isInternalEdge) { isInternalEdges.set(points.size() - 1); + } points.add(new GeoPoint(planetModel, latitude, longitude)); } /** * Finish the polygon, by connecting the last added point with the starting point. - *@param isInternalReturnEdge is true if the return edge (back to start) is an internal one. + * + * @param isInternalReturnEdge is true if the return edge (back to start) is an internal one. */ public void done(final boolean isInternalReturnEdge) { - if (isDone) + if (isDone) { throw new IllegalStateException("Can't call done() more than once"); + } // If fewer than 3 points, can't do it. - if (points.size() < 3) + if (points.size() < 3) { throw new IllegalArgumentException("Polygon needs at least three points."); + } - if (isInternalReturnEdge) + if (isInternalReturnEdge) { isInternalEdges.set(points.size() - 1); + } isDone = true; - + // Time to construct the planes. If the polygon is truly concave then any adjacent point // to a segment can provide an exterior measurement. Note: We build the true planes // here and use the logic to return what *isn't* inside all of them. @@ -221,12 +237,13 @@ class GeoConcavePolygon extends GeoBasePolygon { throw new IllegalArgumentException("Polygon points are all coplanar"); } final GeoPoint check = points.get(endPointIndex); - //System.out.println("Created edge "+sp+" using start="+start+" end="+end+" check="+check); + // System.out.println("Created edge " + sp + " using start=" + start + // + " end=" + end + " check=" + check); edges[i] = new SidedPlane(check, false, start, end); invertedEdges[i] = new SidedPlane(edges[i]); - notableEdgePoints[i] = new GeoPoint[]{start, end}; + notableEdgePoints[i] = new GeoPoint[] {start, end}; } - + // For each edge, create a bounds object. eitherBounds = new HashMap<>(edges.length); prevBrotherMap = new HashMap<>(edges.length); @@ -234,41 +251,45 @@ class GeoConcavePolygon extends GeoBasePolygon { for (int edgeIndex = 0; edgeIndex < edges.length; edgeIndex++) { final SidedPlane edge = edges[edgeIndex]; final SidedPlane invertedEdge = invertedEdges[edgeIndex]; - int bound1Index = legalIndex(edgeIndex+1); + int bound1Index = legalIndex(edgeIndex + 1); while (invertedEdges[bound1Index].isNumericallyIdentical(invertedEdge)) { if (bound1Index == edgeIndex) { - throw new IllegalArgumentException("Constructed planes are all coplanar: "+points); + throw new IllegalArgumentException("Constructed planes are all coplanar: " + points); } bound1Index = legalIndex(bound1Index + 1); } - int bound2Index = legalIndex(edgeIndex-1); + int bound2Index = legalIndex(edgeIndex - 1); while (invertedEdges[bound2Index].isNumericallyIdentical(invertedEdge)) { if (bound2Index == edgeIndex) { - throw new IllegalArgumentException("Constructed planes are all coplanar: "+points); + throw new IllegalArgumentException("Constructed planes are all coplanar: " + points); } bound2Index = legalIndex(bound2Index - 1); } // Also confirm that all interior points are within the bounds int startingIndex = bound2Index; while (true) { - startingIndex = legalIndex(startingIndex+1); + startingIndex = legalIndex(startingIndex + 1); if (startingIndex == bound1Index) { break; } final GeoPoint interiorPoint = points.get(startingIndex); - if (!invertedEdges[bound1Index].isWithin(interiorPoint) || !invertedEdges[bound2Index].isWithin(interiorPoint)) { - throw new IllegalArgumentException("Concave polygon has a side that is more than 180 degrees"); + if (!invertedEdges[bound1Index].isWithin(interiorPoint) + || !invertedEdges[bound2Index].isWithin(interiorPoint)) { + throw new IllegalArgumentException( + "Concave polygon has a side that is more than 180 degrees"); } } - eitherBounds.put(edge, new EitherBound(invertedEdges[bound1Index], invertedEdges[bound2Index])); - // When we are done with this cycle, we'll need to build the intersection bound for each edge and its brother. + eitherBounds.put( + edge, new EitherBound(invertedEdges[bound1Index], invertedEdges[bound2Index])); + // When we are done with this cycle, we'll need to build the intersection bound for each edge + // and its brother. // For now, keep track of the relationships. nextBrotherMap.put(invertedEdge, invertedEdges[bound1Index]); prevBrotherMap.put(invertedEdge, invertedEdges[bound2Index]); } - // Pick an edge point arbitrarily from the outer polygon. Glom this together with all edge points from - // inner polygons. + // Pick an edge point arbitrarily from the outer polygon. Glom this together with all edge + // points from inner polygons. int edgePointCount = 1; if (holes != null) { for (final GeoPolygon hole : holes) { @@ -290,12 +311,13 @@ class GeoConcavePolygon extends GeoBasePolygon { if (isWithinHoles(points.get(0))) { throw new IllegalArgumentException("Polygon edge intersects a polygon hole; not allowed"); } - } - /** Check if a point is within the provided holes. - *@param point point to check. - *@return true if the point is within any of the holes. + /** + * Check if a point is within the provided holes. + * + * @param point point to check. + * @return true if the point is within any of the holes. */ protected boolean isWithinHoles(final GeoPoint point) { if (holes != null) { @@ -308,13 +330,16 @@ class GeoConcavePolygon extends GeoBasePolygon { return false; } - /** Compute a legal point index from a possibly illegal one, that may have wrapped. - *@param index is the index. - *@return the normalized index. + /** + * Compute a legal point index from a possibly illegal one, that may have wrapped. + * + * @param index is the index. + * @return the normalized index. */ protected int legalIndex(int index) { - while (index >= points.size()) + while (index >= points.size()) { index -= points.size(); + } while (index < 0) { index += points.size(); } @@ -323,20 +348,24 @@ class GeoConcavePolygon extends GeoBasePolygon { /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoConcavePolygon(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoConcavePolygon(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { super(planetModel); - this.points = java.util.Arrays.asList(SerializableObject.readPointArray(planetModel, inputStream)); - final List holes = java.util.Arrays.asList(SerializableObject.readPolygonArray(planetModel, inputStream)); + this.points = + java.util.Arrays.asList(SerializableObject.readPointArray(planetModel, inputStream)); + final List holes = + java.util.Arrays.asList(SerializableObject.readPolygonArray(planetModel, inputStream)); if (holes != null && holes.size() == 0) { this.holes = null; } else { this.holes = holes; } this.isInternalEdges = SerializableObject.readBitSet(inputStream); - done(this.isInternalEdges.get(points.size()-1)); + done(this.isInternalEdges.get(points.size() - 1)); } @Override @@ -383,7 +412,8 @@ class GeoConcavePolygon extends GeoBasePolygon { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { // The bounding planes are inverted and complementary. For intersection computation, we // cannot use them as bounds. They are independent hemispheres. for (int edgeIndex = 0; edgeIndex < edges.length; edgeIndex++) { @@ -391,22 +421,25 @@ class GeoConcavePolygon extends GeoBasePolygon { final SidedPlane invertedEdge = invertedEdges[edgeIndex]; final GeoPoint[] points = this.notableEdgePoints[edgeIndex]; if (!isInternalEdges.get(edgeIndex)) { - //System.err.println("Checking concave edge "+edge+" for intersection against plane "+p); - if (invertedEdge.intersects(planetModel, p, notablePoints, points, bounds, eitherBounds.get(edge))) { - //System.err.println(" intersects!"); + // System.err.println("Checking concave edge " + edge + // + " for intersection against plane " + p); + if (invertedEdge.intersects( + planetModel, p, notablePoints, points, bounds, eitherBounds.get(edge))) { + // System.err.println(" intersects!"); return true; } } } if (holes != null) { - // Each hole needs to be looked at for intersection too, since a shape can be entirely within the hole + // Each hole needs to be looked at for intersection too, since a shape can be entirely within + // the hole for (final GeoPolygon hole : holes) { if (hole.intersects(p, notablePoints, bounds)) { return true; } } } - //System.err.println(" no intersection"); + // System.err.println(" no intersection"); return false; } @@ -431,17 +464,18 @@ class GeoConcavePolygon extends GeoBasePolygon { return false; } - /** A membership implementation representing polygon edges that must apply. - */ + /** A membership implementation representing polygon edges that must apply. */ protected static class EitherBound implements Membership { - + protected final SidedPlane sideBound1; protected final SidedPlane sideBound2; - - /** Constructor. - * @param sideBound1 is the first side bound. - * @param sideBound2 is the second side bound. - */ + + /** + * Constructor. + * + * @param sideBound1 is the first side bound. + * @param sideBound2 is the second side bound. + */ public EitherBound(final SidedPlane sideBound1, final SidedPlane sideBound2) { this.sideBound1 = sideBound1; this.sideBound2 = sideBound2; @@ -454,26 +488,23 @@ class GeoConcavePolygon extends GeoBasePolygon { @Override public boolean isWithin(final double x, final double y, final double z) { - return sideBound1.isWithin(x,y,z) && sideBound2.isWithin(x,y,z); + return sideBound1.isWithin(x, y, z) && sideBound2.isWithin(x, y, z); } - + @Override public String toString() { return "(" + sideBound1 + "," + sideBound2 + ")"; } - } @Override public void getBounds(Bounds bounds) { // Because of holes, we don't want to use superclass method if (localIsWithin(planetModel.NORTH_POLE)) { - bounds.noTopLatitudeBound().noLongitudeBound() - .addPoint(planetModel.NORTH_POLE); + bounds.noTopLatitudeBound().noLongitudeBound().addPoint(planetModel.NORTH_POLE); } if (localIsWithin(planetModel.SOUTH_POLE)) { - bounds.noBottomLatitudeBound().noLongitudeBound() - .addPoint(planetModel.SOUTH_POLE); + bounds.noBottomLatitudeBound().noLongitudeBound().addPoint(planetModel.SOUTH_POLE); } if (localIsWithin(planetModel.MIN_X_POLE)) { bounds.addPoint(planetModel.MIN_X_POLE); @@ -501,22 +532,29 @@ class GeoConcavePolygon extends GeoBasePolygon { } for (final SidedPlane invertedEdge : invertedEdges) { final SidedPlane nextEdge = nextBrotherMap.get(invertedEdge); - bounds.addIntersection(planetModel, invertedEdge, nextEdge, prevBrotherMap.get(invertedEdge), nextBrotherMap.get(nextEdge)); + bounds.addIntersection( + planetModel, + invertedEdge, + nextEdge, + prevBrotherMap.get(invertedEdge), + nextBrotherMap.get(nextEdge)); } - } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { double minimumDistance = Double.POSITIVE_INFINITY; for (final GeoPoint edgePoint : points) { - final double newDist = distanceStyle.computeDistance(edgePoint, x,y,z); + final double newDist = distanceStyle.computeDistance(edgePoint, x, y, z); if (newDist < minimumDistance) { minimumDistance = newDist; } } for (final SidedPlane edgePlane : edges) { - final double newDist = distanceStyle.computeDistance(planetModel, edgePlane, x, y, z, eitherBounds.get(edgePlane)); + final double newDist = + distanceStyle.computeDistance( + planetModel, edgePlane, x, y, z, eitherBounds.get(edgePlane)); if (newDist < minimumDistance) { minimumDistance = newDist; } @@ -534,13 +572,16 @@ class GeoConcavePolygon extends GeoBasePolygon { @Override public boolean equals(Object o) { - if (!(o instanceof GeoConcavePolygon)) + if (!(o instanceof GeoConcavePolygon)) { return false; + } final GeoConcavePolygon other = (GeoConcavePolygon) o; - if (!super.equals(other)) + if (!super.equals(other)) { return false; - if (!other.isInternalEdges.equals(isInternalEdges)) + } + if (!other.isInternalEdges.equals(isInternalEdges)) { return false; + } if (other.holes != null || holes != null) { if (other.holes == null || holes == null) { return false; @@ -564,7 +605,13 @@ class GeoConcavePolygon extends GeoBasePolygon { @Override public String toString() { - return "GeoConcavePolygon: {planetmodel=" + planetModel + ", points=" + points + ", internalEdges=" + isInternalEdges + ((holes== null)?"":", holes=" + holes) + "}"; + return "GeoConcavePolygon: {planetmodel=" + + planetModel + + ", points=" + + points + + ", internalEdges=" + + isInternalEdges + + ((holes == null) ? "" : ", holes=" + holes) + + "}"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java index 1fb2b0f15f5..9d2550b8180 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoConvexPolygon.java @@ -16,20 +16,19 @@ */ package org.apache.lucene.spatial3d.geom; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.List; -import java.util.HashMap; -import java.util.Map; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** - * GeoConvexPolygon objects are generic building blocks of more complex structures. - * The only restrictions on these objects are: (1) they must be convex; (2) they must have - * a maximum extent no larger than PI. Violating either one of these limits will - * cause the logic to fail. + * GeoConvexPolygon objects are generic building blocks of more complex structures. The only + * restrictions on these objects are: (1) they must be convex; (2) they must have a maximum extent + * no larger than PI. Violating either one of these limits will cause the logic to fail. * * @lucene.internal */ @@ -38,7 +37,7 @@ class GeoConvexPolygon extends GeoBasePolygon { protected final List points; /** A bitset describing, for each edge, whether it is internal or not */ protected final BitSet isInternalEdges; - /** The list of holes. If a point is in the hole, it is *not* in the polygon */ + /** The list of holes. If a point is in the hole, it is *not* in the polygon */ protected final List holes; /** A list of edges */ @@ -55,25 +54,27 @@ class GeoConvexPolygon extends GeoBasePolygon { protected Map prevBrotherMap = null; /** Map from edge to its next non-coplanar brother */ protected Map nextBrotherMap = null; - + /** - * Create a convex polygon from a list of points. The first point must be on the - * external edge. - *@param planetModel is the planet model. - *@param pointList is the list of points to create the polygon from. + * Create a convex polygon from a list of points. The first point must be on the external edge. + * + * @param planetModel is the planet model. + * @param pointList is the list of points to create the polygon from. */ public GeoConvexPolygon(final PlanetModel planetModel, final List pointList) { this(planetModel, pointList, null); } - + /** - * Create a convex polygon from a list of points. The first point must be on the - * external edge. - *@param planetModel is the planet model. - *@param pointList is the list of points to create the polygon from. - *@param holes is the list of GeoPolygon objects that describe holes in the complex polygon. Null == no holes. + * Create a convex polygon from a list of points. The first point must be on the external edge. + * + * @param planetModel is the planet model. + * @param pointList is the list of points to create the polygon from. + * @param holes is the list of GeoPolygon objects that describe holes in the complex polygon. Null + * == no holes. */ - public GeoConvexPolygon(final PlanetModel planetModel, final List pointList, final List holes) { + public GeoConvexPolygon( + final PlanetModel planetModel, final List pointList, final List holes) { super(planetModel); this.points = pointList; if (holes != null && holes.size() == 0) { @@ -86,34 +87,39 @@ class GeoConvexPolygon extends GeoBasePolygon { } /** - * Create a convex polygon from a list of points, keeping track of which boundaries - * are internal. This is used when creating a polygon as a building block for another shape. - *@param planetModel is the planet model. - *@param pointList is the set of points to create the polygon from. - *@param internalEdgeFlags is a bitset describing whether each edge is internal or not. - *@param returnEdgeInternal is true when the final return edge is an internal one. + * Create a convex polygon from a list of points, keeping track of which boundaries are internal. + * This is used when creating a polygon as a building block for another shape. + * + * @param planetModel is the planet model. + * @param pointList is the set of points to create the polygon from. + * @param internalEdgeFlags is a bitset describing whether each edge is internal or not. + * @param returnEdgeInternal is true when the final return edge is an internal one. */ - public GeoConvexPolygon(final PlanetModel planetModel, - final List pointList, - final BitSet internalEdgeFlags, - final boolean returnEdgeInternal) { + public GeoConvexPolygon( + final PlanetModel planetModel, + final List pointList, + final BitSet internalEdgeFlags, + final boolean returnEdgeInternal) { this(planetModel, pointList, null, internalEdgeFlags, returnEdgeInternal); } /** - * Create a convex polygon from a list of points, keeping track of which boundaries - * are internal. This is used when creating a polygon as a building block for another shape. - *@param planetModel is the planet model. - *@param pointList is the set of points to create the polygon from. - *@param holes is the list of GeoPolygon objects that describe holes in the complex polygon. Null == no holes. - *@param internalEdgeFlags is a bitset describing whether each edge is internal or not. - *@param returnEdgeInternal is true when the final return edge is an internal one. + * Create a convex polygon from a list of points, keeping track of which boundaries are internal. + * This is used when creating a polygon as a building block for another shape. + * + * @param planetModel is the planet model. + * @param pointList is the set of points to create the polygon from. + * @param holes is the list of GeoPolygon objects that describe holes in the complex polygon. Null + * == no holes. + * @param internalEdgeFlags is a bitset describing whether each edge is internal or not. + * @param returnEdgeInternal is true when the final return edge is an internal one. */ - public GeoConvexPolygon(final PlanetModel planetModel, - final List pointList, - final List holes, - final BitSet internalEdgeFlags, - final boolean returnEdgeInternal) { + public GeoConvexPolygon( + final PlanetModel planetModel, + final List pointList, + final List holes, + final BitSet internalEdgeFlags, + final boolean returnEdgeInternal) { super(planetModel); this.points = pointList; if (holes != null && holes.size() == 0) { @@ -126,30 +132,33 @@ class GeoConvexPolygon extends GeoBasePolygon { } /** - * Create a convex polygon, with a starting latitude and longitude. - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param startLatitude is the latitude of the first point. - *@param startLongitude is the longitude of the first point. + * Create a convex polygon, with a starting latitude and longitude. Accepts only values in the + * following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} + * + * @param planetModel is the planet model. + * @param startLatitude is the latitude of the first point. + * @param startLongitude is the longitude of the first point. */ - public GeoConvexPolygon(final PlanetModel planetModel, - final double startLatitude, - final double startLongitude) { + public GeoConvexPolygon( + final PlanetModel planetModel, final double startLatitude, final double startLongitude) { this(planetModel, startLatitude, startLongitude, null); } - + /** - * Create a convex polygon, with a starting latitude and longitude. - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param startLatitude is the latitude of the first point. - *@param startLongitude is the longitude of the first point. - *@param holes is the list of GeoPolygon objects that describe holes in the complex polygon. Null == no holes. + * Create a convex polygon, with a starting latitude and longitude. Accepts only values in the + * following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} + * + * @param planetModel is the planet model. + * @param startLatitude is the latitude of the first point. + * @param startLongitude is the longitude of the first point. + * @param holes is the list of GeoPolygon objects that describe holes in the complex polygon. Null + * == no holes. */ - public GeoConvexPolygon(final PlanetModel planetModel, - final double startLatitude, - final double startLongitude, - final List holes) { + public GeoConvexPolygon( + final PlanetModel planetModel, + final double startLatitude, + final double startLongitude, + final List holes) { super(planetModel); points = new ArrayList<>(); if (holes != null && holes.size() == 0) { @@ -162,38 +171,45 @@ class GeoConvexPolygon extends GeoBasePolygon { } /** - * Add a point to the polygon. - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} + * Add a point to the polygon. Accepts only values in the following ranges: lat: {@code -PI/2 -> + * PI/2}, lon: {@code -PI -> PI} * - * @param latitude is the latitude of the next point. - * @param longitude is the longitude of the next point. - * @param isInternalEdge is true if the edge just added with this point should be considered "internal", and not - * intersected as part of the intersects() operation. + * @param latitude is the latitude of the next point. + * @param longitude is the longitude of the next point. + * @param isInternalEdge is true if the edge just added with this point should be considered + * "internal", and not intersected as part of the intersects() operation. */ - public void addPoint(final double latitude, final double longitude, final boolean isInternalEdge) { - if (isDone) + public void addPoint( + final double latitude, final double longitude, final boolean isInternalEdge) { + if (isDone) { throw new IllegalStateException("Can't call addPoint() if done() already called"); - if (isInternalEdge) + } + if (isInternalEdge) { isInternalEdges.set(points.size() - 1); + } points.add(new GeoPoint(planetModel, latitude, longitude)); } /** * Finish the polygon, by connecting the last added point with the starting point. - *@param isInternalReturnEdge is true if the return edge (back to start) is an internal one. + * + * @param isInternalReturnEdge is true if the return edge (back to start) is an internal one. */ public void done(final boolean isInternalReturnEdge) { - if (isDone) + if (isDone) { throw new IllegalStateException("Can't call done() more than once"); + } // If fewer than 3 points, can't do it. - if (points.size() < 3) + if (points.size() < 3) { throw new IllegalArgumentException("Polygon needs at least three points."); + } - if (isInternalReturnEdge) + if (isInternalReturnEdge) { isInternalEdges.set(points.size() - 1); + } isDone = true; - + // Time to construct the planes. If the polygon is truly convex, then any adjacent point // to a segment can provide an interior measurement. edges = new SidedPlane[points.size()]; @@ -214,56 +230,58 @@ class GeoConvexPolygon extends GeoBasePolygon { } } if (endPointIndex == -1) { - throw new IllegalArgumentException("Polygon points are all coplanar: "+points); + throw new IllegalArgumentException("Polygon points are all coplanar: " + points); } final GeoPoint check = points.get(endPointIndex); final SidedPlane sp = new SidedPlane(check, start, end); edges[i] = sp; - notableEdgePoints[i] = new GeoPoint[]{start, end}; + notableEdgePoints[i] = new GeoPoint[] {start, end}; } - + // For each edge, create a bounds object. eitherBounds = new HashMap<>(edges.length); prevBrotherMap = new HashMap<>(edges.length); nextBrotherMap = new HashMap<>(edges.length); for (int edgeIndex = 0; edgeIndex < edges.length; edgeIndex++) { final SidedPlane edge = edges[edgeIndex]; - int bound1Index = legalIndex(edgeIndex+1); + int bound1Index = legalIndex(edgeIndex + 1); while (edges[bound1Index].isNumericallyIdentical(edge)) { if (bound1Index == edgeIndex) { - throw new IllegalArgumentException("Constructed planes are all coplanar: "+points); + throw new IllegalArgumentException("Constructed planes are all coplanar: " + points); } bound1Index = legalIndex(bound1Index + 1); } - int bound2Index = legalIndex(edgeIndex-1); + int bound2Index = legalIndex(edgeIndex - 1); // Look for bound2 while (edges[bound2Index].isNumericallyIdentical(edge)) { if (bound2Index == edgeIndex) { - throw new IllegalArgumentException("Constructed planes are all coplanar: "+points); + throw new IllegalArgumentException("Constructed planes are all coplanar: " + points); } bound2Index = legalIndex(bound2Index - 1); } // Also confirm that all interior points are within the bounds int startingIndex = bound2Index; while (true) { - startingIndex = legalIndex(startingIndex+1); + startingIndex = legalIndex(startingIndex + 1); if (startingIndex == bound1Index) { break; } final GeoPoint interiorPoint = points.get(startingIndex); - if (!edges[bound1Index].isWithin(interiorPoint) || !edges[bound2Index].isWithin(interiorPoint)) { - throw new IllegalArgumentException("Convex polygon has a side that is more than 180 degrees"); + if (!edges[bound1Index].isWithin(interiorPoint) + || !edges[bound2Index].isWithin(interiorPoint)) { + throw new IllegalArgumentException( + "Convex polygon has a side that is more than 180 degrees"); } } eitherBounds.put(edge, new EitherBound(edges[bound1Index], edges[bound2Index])); - // When we are done with this cycle, we'll need to build the intersection bound for each edge and its brother. - // For now, keep track of the relationships. + // When we are done with this cycle, we'll need to build the intersection bound for each edge + // and its brother. For now, keep track of the relationships. nextBrotherMap.put(edge, edges[bound1Index]); prevBrotherMap.put(edge, edges[bound2Index]); } - // Pick an edge point arbitrarily from the outer polygon. Glom this together with all edge points from - // inner polygons. + // Pick an edge point arbitrarily from the outer polygon. Glom this together with all edge + // points from inner polygons. int edgePointCount = 1; if (holes != null) { for (final GeoPolygon hole : holes) { @@ -281,16 +299,17 @@ class GeoConvexPolygon extends GeoBasePolygon { } } } - + if (isWithinHoles(points.get(0))) { throw new IllegalArgumentException("Polygon edge intersects a polygon hole; not allowed"); } - } - /** Check if a point is within the provided holes. - *@param point point to check. - *@return true if the point is within any of the holes. + /** + * Check if a point is within the provided holes. + * + * @param point point to check. + * @return true if the point is within any of the holes. */ protected boolean isWithinHoles(final GeoPoint point) { if (holes != null) { @@ -302,10 +321,12 @@ class GeoConvexPolygon extends GeoBasePolygon { } return false; } - - /** Compute a legal point index from a possibly illegal one, that may have wrapped. - *@param index is the index. - *@return the normalized index. + + /** + * Compute a legal point index from a possibly illegal one, that may have wrapped. + * + * @param index is the index. + * @return the normalized index. */ protected int legalIndex(int index) { while (index >= points.size()) { @@ -319,20 +340,24 @@ class GeoConvexPolygon extends GeoBasePolygon { /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoConvexPolygon(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoConvexPolygon(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { super(planetModel); - this.points = java.util.Arrays.asList(SerializableObject.readPointArray(planetModel, inputStream)); - final List holes = java.util.Arrays.asList(SerializableObject.readPolygonArray(planetModel, inputStream)); + this.points = + java.util.Arrays.asList(SerializableObject.readPointArray(planetModel, inputStream)); + final List holes = + java.util.Arrays.asList(SerializableObject.readPolygonArray(planetModel, inputStream)); if (holes != null && holes.size() == 0) { this.holes = null; } else { this.holes = holes; } this.isInternalEdges = SerializableObject.readBitSet(inputStream); - done(this.isInternalEdges.get(points.size()-1)); + done(this.isInternalEdges.get(points.size() - 1)); } @Override @@ -356,47 +381,52 @@ class GeoConvexPolygon extends GeoBasePolygon { } return true; } - + protected boolean localIsWithin(final Vector v) { return localIsWithin(v.x, v.y, v.z); } protected boolean localIsWithin(final double x, final double y, final double z) { for (final SidedPlane edge : edges) { - if (!edge.isWithin(x, y, z)) + if (!edge.isWithin(x, y, z)) { return false; + } } return true; } - + @Override public GeoPoint[] getEdgePoints() { return edgePoints; } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - //System.err.println("Checking for polygon intersection with plane "+p+"..."); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + // System.err.println("Checking for polygon intersection with plane "+p+"..."); for (int edgeIndex = 0; edgeIndex < edges.length; edgeIndex++) { final SidedPlane edge = edges[edgeIndex]; final GeoPoint[] points = this.notableEdgePoints[edgeIndex]; if (!isInternalEdges.get(edgeIndex)) { - //System.err.println("Checking convex edge "+edge+" for intersection against plane "+p); - if (edge.intersects(planetModel, p, notablePoints, points, bounds, eitherBounds.get(edge))) { - //System.err.println(" intersects!"); + // System.err.println("Checking convex edge " + edge + // + " for intersection against plane " + p); + if (edge.intersects( + planetModel, p, notablePoints, points, bounds, eitherBounds.get(edge))) { + // System.err.println(" intersects!"); return true; } } } if (holes != null) { - // Each hole needs to be looked at for intersection too, since a shape can be entirely within the hole + // Each hole needs to be looked at for intersection too, since a shape can be entirely within + // the hole for (final GeoPolygon hole : holes) { if (hole.intersects(p, notablePoints, bounds)) { return true; } } } - //System.err.println(" no intersection"); + // System.err.println(" no intersection"); return false; } @@ -421,17 +451,18 @@ class GeoConvexPolygon extends GeoBasePolygon { return false; } - /** A membership implementation representing polygon edges that must apply. - */ + /** A membership implementation representing polygon edges that must apply. */ protected static class EitherBound implements Membership { - + protected final SidedPlane sideBound1; protected final SidedPlane sideBound2; - - /** Constructor. - * @param sideBound1 is the first side bound. - * @param sideBound2 is the second side bound. - */ + + /** + * Constructor. + * + * @param sideBound1 is the first side bound. + * @param sideBound2 is the second side bound. + */ public EitherBound(final SidedPlane sideBound1, final SidedPlane sideBound2) { this.sideBound1 = sideBound1; this.sideBound2 = sideBound2; @@ -444,26 +475,23 @@ class GeoConvexPolygon extends GeoBasePolygon { @Override public boolean isWithin(final double x, final double y, final double z) { - return sideBound1.isWithin(x,y,z) && sideBound2.isWithin(x,y,z); + return sideBound1.isWithin(x, y, z) && sideBound2.isWithin(x, y, z); } - + @Override public String toString() { return "(" + sideBound1 + "," + sideBound2 + ")"; } } - @Override public void getBounds(Bounds bounds) { // Because of holes, we don't want to use superclass method if (localIsWithin(planetModel.NORTH_POLE)) { - bounds.noTopLatitudeBound().noLongitudeBound() - .addPoint(planetModel.NORTH_POLE); + bounds.noTopLatitudeBound().noLongitudeBound().addPoint(planetModel.NORTH_POLE); } if (localIsWithin(planetModel.SOUTH_POLE)) { - bounds.noBottomLatitudeBound().noLongitudeBound() - .addPoint(planetModel.SOUTH_POLE); + bounds.noBottomLatitudeBound().noLongitudeBound().addPoint(planetModel.SOUTH_POLE); } if (localIsWithin(planetModel.MIN_X_POLE)) { bounds.addPoint(planetModel.MIN_X_POLE); @@ -487,22 +515,25 @@ class GeoConvexPolygon extends GeoBasePolygon { for (final SidedPlane edge : edges) { bounds.addPlane(planetModel, edge, eitherBounds.get(edge)); final SidedPlane nextEdge = nextBrotherMap.get(edge); - bounds.addIntersection(planetModel, edge, nextEdge, prevBrotherMap.get(edge), nextBrotherMap.get(nextEdge)); + bounds.addIntersection( + planetModel, edge, nextEdge, prevBrotherMap.get(edge), nextBrotherMap.get(nextEdge)); } - } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { double minimumDistance = Double.POSITIVE_INFINITY; for (final GeoPoint edgePoint : points) { - final double newDist = distanceStyle.computeDistance(edgePoint, x,y,z); + final double newDist = distanceStyle.computeDistance(edgePoint, x, y, z); if (newDist < minimumDistance) { minimumDistance = newDist; } } for (final SidedPlane edgePlane : edges) { - final double newDist = distanceStyle.computeDistance(planetModel, edgePlane, x, y, z, eitherBounds.get(edgePlane)); + final double newDist = + distanceStyle.computeDistance( + planetModel, edgePlane, x, y, z, eitherBounds.get(edgePlane)); if (newDist < minimumDistance) { minimumDistance = newDist; } @@ -520,13 +551,16 @@ class GeoConvexPolygon extends GeoBasePolygon { @Override public boolean equals(Object o) { - if (!(o instanceof GeoConvexPolygon)) + if (!(o instanceof GeoConvexPolygon)) { return false; + } final GeoConvexPolygon other = (GeoConvexPolygon) o; - if (!super.equals(other)) + if (!super.equals(other)) { return false; - if (!other.isInternalEdges.equals(isInternalEdges)) + } + if (!other.isInternalEdges.equals(isInternalEdges)) { return false; + } if (other.holes != null || holes != null) { if (other.holes == null || holes == null) { return false; @@ -550,7 +584,13 @@ class GeoConvexPolygon extends GeoBasePolygon { @Override public String toString() { - return "GeoConvexPolygon: {planetmodel=" + planetModel + ", points=" + points + ", internalEdges="+isInternalEdges+((holes== null)?"":", holes=" + holes) + "}"; + return "GeoConvexPolygon: {planetmodel=" + + planetModel + + ", points=" + + points + + ", internalEdges=" + + isInternalEdges + + ((holes == null) ? "" : ", holes=" + holes) + + "}"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateHorizontalLine.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateHorizontalLine.java index 9be5b37fe54..90d03166fb1 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateHorizontalLine.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateHorizontalLine.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Degenerate bounding box limited on two sides (left lon, right lon). - * The left-right maximum extent for this shape is PI; for anything larger, use - * GeoWideDegenerateHorizontalLine. + * Degenerate bounding box limited on two sides (left lon, right lon). The left-right maximum extent + * for this shape is PI; for anything larger, use GeoWideDegenerateHorizontalLine. * * @lucene.internal */ @@ -57,12 +56,14 @@ class GeoDegenerateHorizontalLine extends GeoBaseBBox { /** * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param latitude is the latitude of the line. - *@param leftLon is the left end longitude. - *@param rightLon is the right end longitude. + * + * @param planetModel is the planet model. + * @param latitude is the latitude of the line. + * @param leftLon is the left end longitude. + * @param rightLon is the right end longitude. */ - public GeoDegenerateHorizontalLine(final PlanetModel planetModel, final double latitude, final double leftLon, double rightLon) { + public GeoDegenerateHorizontalLine( + final PlanetModel planetModel, final double latitude, final double leftLon, double rightLon) { super(planetModel); // Argument checking if (latitude > Math.PI * 0.5 || latitude < -Math.PI * 0.5) @@ -75,8 +76,9 @@ class GeoDegenerateHorizontalLine extends GeoBaseBBox { if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent > Math.PI) + if (extent > Math.PI) { throw new IllegalArgumentException("Width of rectangle too great"); + } this.latitude = latitude; this.leftLon = leftLon; @@ -90,8 +92,12 @@ class GeoDegenerateHorizontalLine extends GeoBaseBBox { final double cosRightLon = Math.cos(rightLon); // Now build the two points - this.LHC = new GeoPoint(planetModel, sinLatitude, sinLeftLon, cosLatitude, cosLeftLon, latitude, leftLon); - this.RHC = new GeoPoint(planetModel, sinLatitude, sinRightLon, cosLatitude, cosRightLon, latitude, rightLon); + this.LHC = + new GeoPoint( + planetModel, sinLatitude, sinLeftLon, cosLatitude, cosLeftLon, latitude, leftLon); + this.RHC = + new GeoPoint( + planetModel, sinLatitude, sinRightLon, cosLatitude, cosRightLon, latitude, rightLon); this.plane = new Plane(planetModel, sinLatitude); @@ -103,22 +109,29 @@ class GeoDegenerateHorizontalLine extends GeoBaseBBox { final double sinMiddleLon = Math.sin(middleLon); final double cosMiddleLon = Math.cos(middleLon); - this.centerPoint = new GeoPoint(planetModel, sinLatitude, sinMiddleLon, cosLatitude, cosMiddleLon); + this.centerPoint = + new GeoPoint(planetModel, sinLatitude, sinMiddleLon, cosLatitude, cosMiddleLon); this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - this.planePoints = new GeoPoint[]{LHC, RHC}; + this.planePoints = new GeoPoint[] {LHC, RHC}; - this.edgePoints = new GeoPoint[]{centerPoint}; + this.edgePoints = new GeoPoint[] {centerPoint}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoDegenerateHorizontalLine(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoDegenerateHorizontalLine(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -134,22 +147,24 @@ class GeoDegenerateHorizontalLine extends GeoBaseBBox { double newBottomLat = latitude - angle; // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return plane.evaluateIsZero(x, y, z) && - leftPlane.isWithin(x, y, z) && - rightPlane.isWithin(x, y, z); + return plane.evaluateIsZero(x, y, z) + && leftPlane.isWithin(x, y, z) + && rightPlane.isWithin(x, y, z); } @Override @@ -170,8 +185,10 @@ class GeoDegenerateHorizontalLine extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return p.intersects(planetModel, plane, notablePoints, planePoints, bounds, leftPlane, rightPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects( + planetModel, plane, notablePoints, planePoints, bounds, leftPlane, rightPlane); } @Override @@ -182,43 +199,46 @@ class GeoDegenerateHorizontalLine extends GeoBaseBBox { @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.addHorizontalPlane(planetModel, latitude, plane, leftPlane, rightPlane) - .addPoint(LHC).addPoint(RHC); + bounds + .addHorizontalPlane(planetModel, latitude, plane, leftPlane, rightPlane) + .addPoint(LHC) + .addPoint(RHC); } @Override public int getRelationship(final GeoShape path) { - //System.err.println("getting relationship between "+this+" and "+path); + // System.err.println("getting relationship between " + this + " and " + path); if (intersects(path)) { - //System.err.println(" overlaps"); + // System.err.println(" overlaps"); return OVERLAPS; } if (path.isWithin(centerPoint)) { - //System.err.println(" contains"); + // System.err.println(" contains"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double distance = distanceStyle.computeDistance(planetModel, plane, x,y,z, leftPlane, rightPlane); - - final double LHCDistance = distanceStyle.computeDistance(LHC, x,y,z); - final double RHCDistance = distanceStyle.computeDistance(RHC, x,y,z); - - return Math.min( - distance, - Math.min(LHCDistance, RHCDistance)); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double distance = + distanceStyle.computeDistance(planetModel, plane, x, y, z, leftPlane, rightPlane); + + final double LHCDistance = distanceStyle.computeDistance(LHC, x, y, z); + final double RHCDistance = distanceStyle.computeDistance(RHC, x, y, z); + + return Math.min(distance, Math.min(LHCDistance, RHCDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoDegenerateHorizontalLine)) + if (!(o instanceof GeoDegenerateHorizontalLine)) { return false; + } GeoDegenerateHorizontalLine other = (GeoDegenerateHorizontalLine) o; return super.equals(other) && other.LHC.equals(LHC) && other.RHC.equals(RHC); } @@ -233,8 +253,20 @@ class GeoDegenerateHorizontalLine extends GeoBaseBBox { @Override public String toString() { - return "GeoDegenerateHorizontalLine: {planetmodel="+planetModel+", latitude=" + latitude + "(" + latitude * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightLon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoDegenerateHorizontalLine: {planetmodel=" + + planetModel + + ", latitude=" + + latitude + + "(" + + latitude * 180.0 / Math.PI + + "), leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightLon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateLatitudeZone.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateLatitudeZone.java index aa8798121bd..002830b0617 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateLatitudeZone.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateLatitudeZone.java @@ -16,13 +16,12 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * This GeoBBox represents an area rectangle of one specific latitude with - * no longitude bounds. + * This GeoBBox represents an area rectangle of one specific latitude with no longitude bounds. * * @lucene.internal */ @@ -38,11 +37,13 @@ class GeoDegenerateLatitudeZone extends GeoBaseBBox { /** An array consisting of the interiorPoint */ protected final GeoPoint[] edgePoints; /** No notable points */ - protected final static GeoPoint[] planePoints = new GeoPoint[0]; + protected static final GeoPoint[] planePoints = new GeoPoint[0]; - /** Constructor. - *@param planetModel is the planet model to use. - *@param latitude is the latitude of the latitude zone. + /** + * Constructor. + * + * @param planetModel is the planet model to use. + * @param latitude is the latitude of the latitude zone. */ public GeoDegenerateLatitudeZone(final PlanetModel planetModel, final double latitude) { super(planetModel); @@ -53,15 +54,17 @@ class GeoDegenerateLatitudeZone extends GeoBaseBBox { this.plane = new Plane(planetModel, sinLatitude); // Compute an interior point. interiorPoint = new GeoPoint(planetModel, sinLatitude, 0.0, cosLatitude, 1.0); - edgePoints = new GeoPoint[]{interiorPoint}; + edgePoints = new GeoPoint[] {interiorPoint}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoDegenerateLatitudeZone(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoDegenerateLatitudeZone(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { this(planetModel, SerializableObject.readDouble(inputStream)); } @@ -99,7 +102,8 @@ class GeoDegenerateLatitudeZone extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { return p.intersects(planetModel, plane, notablePoints, planePoints, bounds); } @@ -111,15 +115,14 @@ class GeoDegenerateLatitudeZone extends GeoBaseBBox { @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.noLongitudeBound() - .addHorizontalPlane(planetModel, latitude, plane); + bounds.noLongitudeBound().addHorizontalPlane(planetModel, latitude, plane); } @Override public int getRelationship(final GeoShape path) { // Second, the shortcut of seeing whether endpoints are in/out is not going to // work with no area endpoints. So we rely entirely on intersections. - //System.out.println("Got here! latitude="+latitude+" path="+path); + // System.out.println("Got here! latitude=" + latitude + " path=" + path); if (intersects(path)) { return OVERLAPS; @@ -133,14 +136,16 @@ class GeoDegenerateLatitudeZone extends GeoBaseBBox { } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - return distanceStyle.computeDistance(planetModel, plane, x,y,z); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + return distanceStyle.computeDistance(planetModel, plane, x, y, z); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoDegenerateLatitudeZone)) + if (!(o instanceof GeoDegenerateLatitudeZone)) { return false; + } GeoDegenerateLatitudeZone other = (GeoDegenerateLatitudeZone) o; return super.equals(other) && other.latitude == latitude; } @@ -155,7 +160,12 @@ class GeoDegenerateLatitudeZone extends GeoBaseBBox { @Override public String toString() { - return "GeoDegenerateLatitudeZone: {planetmodel="+planetModel+", lat=" + latitude + "(" + latitude * 180.0 / Math.PI + ")}"; + return "GeoDegenerateLatitudeZone: {planetmodel=" + + planetModel + + ", lat=" + + latitude + + "(" + + latitude * 180.0 / Math.PI + + ")}"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateLongitudeSlice.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateLongitudeSlice.java index 0d303358c48..6f1703fa617 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateLongitudeSlice.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateLongitudeSlice.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * Degenerate longitude slice. @@ -40,9 +40,7 @@ class GeoDegenerateLongitudeSlice extends GeoBaseBBox { /** Notable points for the slice (north and south poles) */ protected final GeoPoint[] planePoints; - /** - * Accepts only values in the following ranges: lon: {@code -PI -> PI} - */ + /** Accepts only values in the following ranges: lon: {@code -PI -> PI} */ public GeoDegenerateLongitudeSlice(final PlanetModel planetModel, final double longitude) { super(planetModel); // Argument checking @@ -54,19 +52,22 @@ class GeoDegenerateLongitudeSlice extends GeoBaseBBox { final double cosLongitude = Math.cos(longitude); this.plane = new Plane(cosLongitude, sinLongitude); - // We need a bounding plane too, which is perpendicular to the longitude plane and sided so that the point (0.0, longitude) is inside. + // We need a bounding plane too, which is perpendicular to the longitude plane and sided so that + // the point (0.0, longitude) is inside. this.interiorPoint = new GeoPoint(planetModel, 0.0, sinLongitude, 1.0, cosLongitude); this.boundingPlane = new SidedPlane(interiorPoint, -sinLongitude, cosLongitude); - this.edgePoints = new GeoPoint[]{interiorPoint}; - this.planePoints = new GeoPoint[]{planetModel.NORTH_POLE, planetModel.SOUTH_POLE}; + this.edgePoints = new GeoPoint[] {interiorPoint}; + this.planePoints = new GeoPoint[] {planetModel.NORTH_POLE, planetModel.SOUTH_POLE}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoDegenerateLongitudeSlice(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoDegenerateLongitudeSlice(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { this(planetModel, SerializableObject.readDouble(inputStream)); } @@ -85,13 +86,13 @@ class GeoDegenerateLongitudeSlice extends GeoBaseBBox { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return plane.evaluateIsZero(x, y, z) && - boundingPlane.isWithin(x, y, z); + return plane.evaluateIsZero(x, y, z) && boundingPlane.isWithin(x, y, z); } @Override @@ -110,7 +111,8 @@ class GeoDegenerateLongitudeSlice extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { return p.intersects(planetModel, plane, notablePoints, planePoints, bounds, boundingPlane); } @@ -123,38 +125,42 @@ class GeoDegenerateLongitudeSlice extends GeoBaseBBox { public void getBounds(Bounds bounds) { super.getBounds(bounds); bounds - .addVerticalPlane(planetModel, longitude, plane, boundingPlane) - .addPoint(planetModel.NORTH_POLE).addPoint(planetModel.SOUTH_POLE); + .addVerticalPlane(planetModel, longitude, plane, boundingPlane) + .addPoint(planetModel.NORTH_POLE) + .addPoint(planetModel.SOUTH_POLE); } @Override public int getRelationship(final GeoShape path) { // Look for intersections. - if (intersects(path)) + if (intersects(path)) { return OVERLAPS; + } - if (path.isWithin(interiorPoint)) + if (path.isWithin(interiorPoint)) { return CONTAINS; + } return DISJOINT; } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double distance = distanceStyle.computeDistance(planetModel, plane, x,y,z, boundingPlane); - - final double northDistance = distanceStyle.computeDistance(planetModel.NORTH_POLE, x,y,z); - final double southDistance = distanceStyle.computeDistance(planetModel.SOUTH_POLE, x,y,z); - - return Math.min( - distance, - Math.min(northDistance, southDistance)); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double distance = + distanceStyle.computeDistance(planetModel, plane, x, y, z, boundingPlane); + + final double northDistance = distanceStyle.computeDistance(planetModel.NORTH_POLE, x, y, z); + final double southDistance = distanceStyle.computeDistance(planetModel.SOUTH_POLE, x, y, z); + + return Math.min(distance, Math.min(northDistance, southDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoDegenerateLongitudeSlice)) + if (!(o instanceof GeoDegenerateLongitudeSlice)) { return false; + } GeoDegenerateLongitudeSlice other = (GeoDegenerateLongitudeSlice) o; return super.equals(other) && other.longitude == longitude; } @@ -169,8 +175,12 @@ class GeoDegenerateLongitudeSlice extends GeoBaseBBox { @Override public String toString() { - return "GeoDegenerateLongitudeSlice: {planetmodel="+planetModel+", longitude=" + longitude + "(" + longitude * 180.0 / Math.PI + ")}"; + return "GeoDegenerateLongitudeSlice: {planetmodel=" + + planetModel + + ", longitude=" + + longitude + + "(" + + longitude * 180.0 / Math.PI + + ")}"; } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePath.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePath.java index f2456b6970c..a6361cc3087 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePath.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePath.java @@ -16,10 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; - import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -27,10 +26,9 @@ import java.util.List; import java.util.Map; /** - * GeoShape representing a path across the surface of the globe, - * with a specified half-width. Path is described by a series of points. - * Distances are measured from the starting point along the path, and then at right - * angles to the path. + * GeoShape representing a path across the surface of the globe, with a specified half-width. Path + * is described by a series of points. Distances are measured from the starting point along the + * path, and then at right angles to the path. * * @lucene.internal */ @@ -38,7 +36,7 @@ class GeoDegeneratePath extends GeoBasePath { /** The original list of path points */ protected final List points = new ArrayList(); - + /** A list of SegmentEndpoints */ protected List endPoints; /** A list of PathSegments */ @@ -49,10 +47,12 @@ class GeoDegeneratePath extends GeoBasePath { /** Set to true if path has been completely constructed */ protected boolean isDone = false; - - /** Constructor. - *@param planetModel is the planet model. - *@param pathPoints are the points in the path. + + /** + * Constructor. + * + * @param planetModel is the planet model. + * @param pathPoints are the points in the path. */ public GeoDegeneratePath(final PlanetModel planetModel, final GeoPoint[] pathPoints) { this(planetModel); @@ -60,35 +60,41 @@ class GeoDegeneratePath extends GeoBasePath { done(); } - /** Piece-wise constructor. Use in conjunction with addPoint() and done(). - *@param planetModel is the planet model. + /** + * Piece-wise constructor. Use in conjunction with addPoint() and done(). + * + * @param planetModel is the planet model. */ public GeoDegeneratePath(final PlanetModel planetModel) { super(planetModel); } - /** Add a point to the path. - *@param lat is the latitude of the point. - *@param lon is the longitude of the point. + /** + * Add a point to the path. + * + * @param lat is the latitude of the point. + * @param lon is the longitude of the point. */ public void addPoint(final double lat, final double lon) { - if (isDone) + if (isDone) { throw new IllegalStateException("Can't call addPoint() if done() already called"); + } points.add(new GeoPoint(planetModel, lat, lon)); } - - /** Complete the path. - */ + + /** Complete the path. */ public void done() { - if (isDone) + if (isDone) { throw new IllegalStateException("Can't call done() twice"); - if (points.size() == 0) + } + if (points.size() == 0) { throw new IllegalArgumentException("Path must have at least one point"); + } isDone = true; endPoints = new ArrayList<>(points.size()); segments = new ArrayList<>(points.size()); - + // First, build all segments. We'll then go back and build corresponding segment endpoints. GeoPoint lastPoint = null; for (final GeoPoint end : points) { @@ -101,49 +107,50 @@ class GeoDegeneratePath extends GeoBasePath { } lastPoint = end; } - + if (segments.size() == 0) { // Simple circle final GeoPoint point = points.get(0); - + final SegmentEndpoint onlyEndpoint = new SegmentEndpoint(point); endPoints.add(onlyEndpoint); - this.edgePoints = new GeoPoint[]{point}; + this.edgePoints = new GeoPoint[] {point}; return; } - + // Create segment endpoints. Use an appropriate constructor for the start and end of the path. for (int i = 0; i < segments.size(); i++) { final PathSegment currentSegment = segments.get(i); - + if (i == 0) { // Starting endpoint - final SegmentEndpoint startEndpoint = new SegmentEndpoint(currentSegment.start, - currentSegment.startCutoffPlane); + final SegmentEndpoint startEndpoint = + new SegmentEndpoint(currentSegment.start, currentSegment.startCutoffPlane); endPoints.add(startEndpoint); - this.edgePoints = new GeoPoint[]{currentSegment.start}; + this.edgePoints = new GeoPoint[] {currentSegment.start}; continue; } - - endPoints.add(new SegmentEndpoint(currentSegment.start, - segments.get(i-1).endCutoffPlane, - currentSegment.startCutoffPlane)); + + endPoints.add( + new SegmentEndpoint( + currentSegment.start, + segments.get(i - 1).endCutoffPlane, + currentSegment.startCutoffPlane)); } // Do final endpoint - final PathSegment lastSegment = segments.get(segments.size()-1); - endPoints.add(new SegmentEndpoint(lastSegment.end, - lastSegment.endCutoffPlane)); - + final PathSegment lastSegment = segments.get(segments.size() - 1); + endPoints.add(new SegmentEndpoint(lastSegment.end, lastSegment.endCutoffPlane)); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoDegeneratePath(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readPointArray(planetModel, inputStream)); + public GeoDegeneratePath(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this(planetModel, SerializableObject.readPointArray(planetModel, inputStream)); } @Override @@ -152,12 +159,14 @@ class GeoDegeneratePath extends GeoBasePath { } @Override - public double computePathCenterDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double computePathCenterDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { // Walk along path and keep track of the closest distance we find double closestDistance = Double.POSITIVE_INFINITY; // Segments first for (PathSegment segment : segments) { - final double segmentDistance = segment.pathCenterDistance(planetModel, distanceStyle, x, y, z); + final double segmentDistance = + segment.pathCenterDistance(planetModel, distanceStyle, x, y, z); if (segmentDistance < closestDistance) { closestDistance = segmentDistance; } @@ -173,12 +182,13 @@ class GeoDegeneratePath extends GeoBasePath { } @Override - public double computeNearestDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double computeNearestDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { double currentDistance = 0.0; double minPathCenterDistance = Double.POSITIVE_INFINITY; double bestDistance = Double.POSITIVE_INFINITY; int segmentIndex = 0; - + for (SegmentEndpoint endpoint : endPoints) { final double endpointPathCenterDistance = endpoint.pathCenterDistance(distanceStyle, x, y, z); if (endpointPathCenterDistance < minPathCenterDistance) { @@ -189,28 +199,38 @@ class GeoDegeneratePath extends GeoBasePath { // Look at the following segment, if any if (segmentIndex < segments.size()) { final PathSegment segment = segments.get(segmentIndex++); - final double segmentPathCenterDistance = segment.pathCenterDistance(planetModel, distanceStyle, x, y, z); + final double segmentPathCenterDistance = + segment.pathCenterDistance(planetModel, distanceStyle, x, y, z); if (segmentPathCenterDistance < minPathCenterDistance) { minPathCenterDistance = segmentPathCenterDistance; - bestDistance = distanceStyle.aggregateDistances(currentDistance, segment.nearestPathDistance(planetModel, distanceStyle, x, y, z)); + bestDistance = + distanceStyle.aggregateDistances( + currentDistance, + segment.nearestPathDistance(planetModel, distanceStyle, x, y, z)); } - currentDistance = distanceStyle.aggregateDistances(currentDistance, segment.fullPathDistance(distanceStyle)); + currentDistance = + distanceStyle.aggregateDistances( + currentDistance, segment.fullPathDistance(distanceStyle)); } } return bestDistance; } @Override - protected double distance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double distance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { // Algorithm: // (1) If the point is within any of the segments along the path, return that value. // (2) If the point is within any of the segment end circles along the path, return that value. double currentDistance = 0.0; for (PathSegment segment : segments) { - double distance = segment.pathDistance(planetModel, distanceStyle, x,y,z); + double distance = segment.pathDistance(planetModel, distanceStyle, x, y, z); if (distance != Double.POSITIVE_INFINITY) - return distanceStyle.fromAggregationForm(distanceStyle.aggregateDistances(currentDistance, distance)); - currentDistance = distanceStyle.aggregateDistances(currentDistance, segment.fullPathDistance(distanceStyle)); + return distanceStyle.fromAggregationForm( + distanceStyle.aggregateDistances(currentDistance, distance)); + currentDistance = + distanceStyle.aggregateDistances( + currentDistance, segment.fullPathDistance(distanceStyle)); } int segmentIndex = 0; @@ -218,38 +238,47 @@ class GeoDegeneratePath extends GeoBasePath { for (SegmentEndpoint endpoint : endPoints) { double distance = endpoint.pathDistance(distanceStyle, x, y, z); if (distance != Double.POSITIVE_INFINITY) - return distanceStyle.fromAggregationForm(distanceStyle.aggregateDistances(currentDistance, distance)); + return distanceStyle.fromAggregationForm( + distanceStyle.aggregateDistances(currentDistance, distance)); if (segmentIndex < segments.size()) - currentDistance = distanceStyle.aggregateDistances(currentDistance, segments.get(segmentIndex++).fullPathDistance(distanceStyle)); + currentDistance = + distanceStyle.aggregateDistances( + currentDistance, segments.get(segmentIndex++).fullPathDistance(distanceStyle)); } return Double.POSITIVE_INFINITY; } @Override - protected double deltaDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - // Since this is always called when a point is within the degenerate path, delta distance is always zero by definition. + protected double deltaDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + // Since this is always called when a point is within the degenerate path, delta distance is + // always zero by definition. return 0.0; } - + @Override - protected void distanceBounds(final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { + protected void distanceBounds( + final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { // TBD: Compute actual bounds based on distance getBounds(bounds); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { double minDistance = Double.POSITIVE_INFINITY; for (final SegmentEndpoint endpoint : endPoints) { - final double newDistance = endpoint.outsideDistance(distanceStyle, x,y,z); - if (newDistance < minDistance) + final double newDistance = endpoint.outsideDistance(distanceStyle, x, y, z); + if (newDistance < minDistance) { minDistance = newDistance; + } } for (final PathSegment segment : segments) { final double newDistance = segment.outsideDistance(planetModel, distanceStyle, x, y, z); - if (newDistance < minDistance) + if (newDistance < minDistance) { minDistance = newDistance; + } } return minDistance; } @@ -275,7 +304,8 @@ class GeoDegeneratePath extends GeoBasePath { } @Override - public boolean intersects(final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds) { // We look for an intersection with any of the exterior edges of the path. // We also have to look for intersections with the cones described by the endpoints. // Return "true" if any such intersections are found. @@ -285,8 +315,8 @@ class GeoDegeneratePath extends GeoBasePath { // any of the intersection points are within the bounds, then we've detected an intersection. // Well, sort of. We can detect intersections also due to overlap of segments with each other. // But that's an edge case and we won't be optimizing for it. - //System.err.println(" Looking for intersection of plane "+plane+" with path "+this); - + // System.err.println(" Looking for intersection of plane " + plane + " with path " + this); + // Since the endpoints are included in the path segments, we only need to do this if there are // no path segments if (endPoints.size() == 1) { @@ -335,11 +365,13 @@ class GeoDegeneratePath extends GeoBasePath { @Override public boolean equals(Object o) { - if (!(o instanceof GeoDegeneratePath)) + if (!(o instanceof GeoDegeneratePath)) { return false; + } GeoDegeneratePath p = (GeoDegeneratePath) o; - if (!super.equals(p)) + if (!super.equals(p)) { return false; + } return points.equals(p.points); } @@ -352,15 +384,20 @@ class GeoDegeneratePath extends GeoBasePath { @Override public String toString() { - return "GeoDegeneratePath: {planetmodel=" + planetModel+", points={" + points + "}}"; + return "GeoDegeneratePath: {planetmodel=" + planetModel + ", points={" + points + "}}"; } /** - * This is precalculated data for segment endpoint. Since the path is degenerate, there are several different cases: - * (1) The path consists of a single endpoint. In this case, the degenerate path consists of this one point. - * (2) This is the end of a path. There is a bounding plane passed in which describes the part of the world that is considered - * to belong to this endpoint. - * (3) Intersection. There are two cutoff planes, one for each end of the intersection. + * This is precalculated data for segment endpoint. Since the path is degenerate, there are + * several different cases: + * + *

      + *
    1. The path consists of a single endpoint. In this case, the degenerate path consists of + * this one point. + *
    2. This is the end of a path. There is a bounding plane passed in which describes the part + * of the world that is considered to belong to this endpoint. + *
    3. Intersection. There are two cutoff planes, one for each end of the intersection. + *
    */ private static class SegmentEndpoint { /** The center point of the endpoint */ @@ -370,149 +407,185 @@ class GeoDegeneratePath extends GeoBasePath { /** Notable points for this segment endpoint */ public final GeoPoint[] notablePoints; /** No notable points from the circle itself */ - public final static GeoPoint[] circlePoints = new GeoPoint[0]; + public static final GeoPoint[] circlePoints = new GeoPoint[0]; /** Null membership */ - public final static Membership[] NO_MEMBERSHIP = new Membership[0]; - - /** Constructor for case (1). - *@param point is the center point. + public static final Membership[] NO_MEMBERSHIP = new Membership[0]; + + /** + * Constructor for case (1). + * + * @param point is the center point. */ public SegmentEndpoint(final GeoPoint point) { this.point = point; this.cutoffPlanes = NO_MEMBERSHIP; this.notablePoints = circlePoints; } - - /** Constructor for case (2). - * Generate an endpoint, given a single cutoff plane plus upper and lower edge points. - *@param point is the center point. - *@param cutoffPlane is the plane from the adjoining path segment marking the boundary between this endpoint and that segment. + + /** + * Constructor for case (2). Generate an endpoint, given a single cutoff plane plus upper and + * lower edge points. + * + * @param point is the center point. + * @param cutoffPlane is the plane from the adjoining path segment marking the boundary between + * this endpoint and that segment. */ public SegmentEndpoint(final GeoPoint point, final SidedPlane cutoffPlane) { this.point = point; - this.cutoffPlanes = new Membership[]{new SidedPlane(cutoffPlane)}; - this.notablePoints = new GeoPoint[]{point}; + this.cutoffPlanes = new Membership[] {new SidedPlane(cutoffPlane)}; + this.notablePoints = new GeoPoint[] {point}; } - /** Constructor for case (3). - * Generate an endpoint, given two cutoff planes. - *@param point is the center. - *@param cutoffPlane1 is one adjoining path segment cutoff plane. - *@param cutoffPlane2 is another adjoining path segment cutoff plane. + /** + * Constructor for case (3). Generate an endpoint, given two cutoff planes. + * + * @param point is the center. + * @param cutoffPlane1 is one adjoining path segment cutoff plane. + * @param cutoffPlane2 is another adjoining path segment cutoff plane. */ - public SegmentEndpoint(final GeoPoint point, - final SidedPlane cutoffPlane1, final SidedPlane cutoffPlane2) { + public SegmentEndpoint( + final GeoPoint point, final SidedPlane cutoffPlane1, final SidedPlane cutoffPlane2) { this.point = point; - this.cutoffPlanes = new Membership[]{new SidedPlane(cutoffPlane1), new SidedPlane(cutoffPlane2)}; - this.notablePoints = new GeoPoint[]{point}; + this.cutoffPlanes = + new Membership[] {new SidedPlane(cutoffPlane1), new SidedPlane(cutoffPlane2)}; + this.notablePoints = new GeoPoint[] {point}; } - - /** Check if point is within this endpoint. - *@param point is the point. - *@return true of within. + + /** + * Check if point is within this endpoint. + * + * @param point is the point. + * @return true of within. */ public boolean isWithin(final Vector point) { return this.point.isIdentical(point.x, point.y, point.z); } - /** Check if point is within this endpoint. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return true of within. + /** + * Check if point is within this endpoint. + * + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return true of within. */ public boolean isWithin(final double x, final double y, final double z) { return this.point.isIdentical(x, y, z); } - /** Compute interior path distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, in aggregation form. + /** + * Compute interior path distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, in aggregation form. */ - public double pathDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (!isWithin(x,y,z)) + public double pathDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + if (!isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; + } return distanceStyle.toAggregationForm(distanceStyle.computeDistance(this.point, x, y, z)); } - /** Compute nearest path distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric (always value zero), in aggregation form, or POSITIVE_INFINITY - * if the point is not within the bounds of the endpoint. + /** + * Compute nearest path distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric (always value zero), in aggregation form, or POSITIVE_INFINITY if + * the point is not within the bounds of the endpoint. */ - public double nearestPathDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double nearestPathDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { for (final Membership m : cutoffPlanes) { - if (!m.isWithin(x,y,z)) { + if (!m.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } } return distanceStyle.toAggregationForm(0.0); } - /** Compute path center distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, or POSITIVE_INFINITY - * if the point is not within the bounds of the endpoint. + /** + * Compute path center distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, or POSITIVE_INFINITY if the point is not within the bounds of + * the endpoint. */ - public double pathCenterDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double pathCenterDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { for (final Membership m : cutoffPlanes) { - if (!m.isWithin(x,y,z)) { + if (!m.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } } return distanceStyle.computeDistance(this.point, x, y, z); } - /** Compute external distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric. + /** + * Compute external distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric. */ - public double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return distanceStyle.computeDistance(this.point, x, y, z); } - /** Determine if this endpoint intersects a specified plane. - *@param planetModel is the planet model. - *@param p is the plane. - *@param notablePoints are the points associated with the plane. - *@param bounds are any bounds which the intersection must lie within. - *@return true if there is a matching intersection. + /** + * Determine if this endpoint intersects a specified plane. + * + * @param planetModel is the planet model. + * @param p is the plane. + * @param notablePoints are the points associated with the plane. + * @param bounds are any bounds which the intersection must lie within. + * @return true if there is a matching intersection. */ - public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) { - // If not on the plane, no intersection - if (!p.evaluateIsZero(point)) - return false; - - for (Membership m : bounds) { - if (!m.isWithin(point)) + public boolean intersects( + final PlanetModel planetModel, + final Plane p, + final GeoPoint[] notablePoints, + final Membership[] bounds) { + // If not on the plane, no intersection + if (!p.evaluateIsZero(point)) { return false; - } - return true; + } + + for (Membership m : bounds) { + if (!m.isWithin(point)) { + return false; + } + } + return true; } - /** Determine if this endpoint intersects a GeoShape. - *@param geoShape is the GeoShape. - *@return true if there is shape intersect this endpoint. + /** + * Determine if this endpoint intersects a GeoShape. + * + * @param geoShape is the GeoShape. + * @return true if there is shape intersect this endpoint. */ public boolean intersects(final GeoShape geoShape) { return geoShape.isWithin(point); } - /** Get the bounds for a segment endpoint. - *@param planetModel is the planet model. - *@param bounds are the bounds to be modified. + /** + * Get the bounds for a segment endpoint. + * + * @param planetModel is the planet model. + * @param bounds are the bounds to be modified. */ public void getBounds(final PlanetModel planetModel, Bounds bounds) { bounds.addPoint(point); @@ -520,8 +593,9 @@ class GeoDegeneratePath extends GeoBasePath { @Override public boolean equals(Object o) { - if (!(o instanceof SegmentEndpoint)) + if (!(o instanceof SegmentEndpoint)) { return false; + } SegmentEndpoint other = (SegmentEndpoint) o; return point.equals(other.point); } @@ -537,16 +611,14 @@ class GeoDegeneratePath extends GeoBasePath { } } - /** - * This is the pre-calculated data for a path segment. - */ + /** This is the pre-calculated data for a path segment. */ private static class PathSegment { /** Starting point of the segment */ public final GeoPoint start; /** End point of the segment */ public final GeoPoint end; /** Place to keep any complete segment distances we've calculated so far */ - public final Map fullDistanceCache = new HashMap(); + public final Map fullDistanceCache = new HashMap<>(); /** Normalized plane connecting the two points and going through world center */ public final Plane normalizedConnectingPlane; /** Plane going through the center and start point, marking the start edge of the segment */ @@ -556,239 +628,329 @@ class GeoDegeneratePath extends GeoBasePath { /** Notable points for the connecting plane */ public final GeoPoint[] connectingPlanePoints; - /** Construct a path segment. - *@param planetModel is the planet model. - *@param start is the starting point. - *@param end is the ending point. - *@param normalizedConnectingPlane is the connecting plane. + /** + * Construct a path segment. + * + * @param planetModel is the planet model. + * @param start is the starting point. + * @param end is the ending point. + * @param normalizedConnectingPlane is the connecting plane. */ - public PathSegment(final PlanetModel planetModel, final GeoPoint start, final GeoPoint end, - final Plane normalizedConnectingPlane) { + public PathSegment( + final PlanetModel planetModel, + final GeoPoint start, + final GeoPoint end, + final Plane normalizedConnectingPlane) { this.start = start; this.end = end; this.normalizedConnectingPlane = normalizedConnectingPlane; - + // Cutoff planes use opposite endpoints as correct side examples startCutoffPlane = new SidedPlane(end, normalizedConnectingPlane, start); endCutoffPlane = new SidedPlane(start, normalizedConnectingPlane, end); - connectingPlanePoints = new GeoPoint[]{start, end}; + connectingPlanePoints = new GeoPoint[] {start, end}; } - /** Compute the full distance along this path segment. - *@param distanceStyle is the distance style. - *@return the distance metric, in aggregation form. + /** + * Compute the full distance along this path segment. + * + * @param distanceStyle is the distance style. + * @return the distance metric, in aggregation form. */ public double fullPathDistance(final DistanceStyle distanceStyle) { synchronized (fullDistanceCache) { Double dist = fullDistanceCache.get(distanceStyle); if (dist == null) { - dist = distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, end.x, end.y, end.z)); + dist = + distanceStyle.toAggregationForm( + distanceStyle.computeDistance(start, end.x, end.y, end.z)); fullDistanceCache.put(distanceStyle, dist); } return dist.doubleValue(); } } - - /** Check if point is within this segment. - *@param point is the point. - *@return true of within. + + /** + * Check if point is within this segment. + * + * @param point is the point. + * @return true of within. */ public boolean isWithin(final Vector point) { - return startCutoffPlane.isWithin(point) && - endCutoffPlane.isWithin(point) && - normalizedConnectingPlane.evaluateIsZero(point); + return startCutoffPlane.isWithin(point) + && endCutoffPlane.isWithin(point) + && normalizedConnectingPlane.evaluateIsZero(point); } - /** Check if point is within this segment. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return true of within. + /** + * Check if point is within this segment. + * + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return true of within. */ public boolean isWithin(final double x, final double y, final double z) { - return startCutoffPlane.isWithin(x, y, z) && - endCutoffPlane.isWithin(x, y, z) && - normalizedConnectingPlane.evaluateIsZero(x, y, z); + return startCutoffPlane.isWithin(x, y, z) + && endCutoffPlane.isWithin(x, y, z) + && normalizedConnectingPlane.evaluateIsZero(x, y, z); } - /** Compute path center distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, or Double.POSITIVE_INFINITY if outside this segment + /** + * Compute path center distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, or Double.POSITIVE_INFINITY if outside this segment */ - public double pathCenterDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double pathCenterDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { // First, if this point is outside the endplanes of the segment, return POSITIVE_INFINITY. if (!startCutoffPlane.isWithin(x, y, z) || !endCutoffPlane.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } - // (1) Compute normalizedPerpPlane. If degenerate, then there is no such plane, which means that the point given - // is insufficient to distinguish between a family of such planes. This can happen only if the point is one of the - // "poles", imagining the normalized plane to be the "equator". In that case, the distance returned should be zero. + // (1) Compute normalizedPerpPlane. If degenerate, then there is no such plane, which means + // that the point given is insufficient to distinguish between a family of such planes. + // This can happen only if the point is one of the "poles", imagining the normalized plane + // to be the "equator". In that case, the distance returned should be zero. // Want no allocations or expensive operations! so we do this the hard way final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); - if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) + if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) { return distanceStyle.computeDistance(start, x, y, z); - final double normFactor = 1.0/magnitude; - final Plane normalizedPerpPlane = new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); - - final GeoPoint[] intersectionPoints = normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); + } + final double normFactor = 1.0 / magnitude; + final Plane normalizedPerpPlane = + new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); + + final GeoPoint[] intersectionPoints = + normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); GeoPoint thePoint; - if (intersectionPoints.length == 0) - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - else if (intersectionPoints.length == 1) + if (intersectionPoints.length == 0) { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } else if (intersectionPoints.length == 1) { thePoint = intersectionPoints[0]; - else { - if (startCutoffPlane.isWithin(intersectionPoints[0]) && endCutoffPlane.isWithin(intersectionPoints[0])) + } else { + if (startCutoffPlane.isWithin(intersectionPoints[0]) + && endCutoffPlane.isWithin(intersectionPoints[0])) { thePoint = intersectionPoints[0]; - else if (startCutoffPlane.isWithin(intersectionPoints[1]) && endCutoffPlane.isWithin(intersectionPoints[1])) + } else if (startCutoffPlane.isWithin(intersectionPoints[1]) + && endCutoffPlane.isWithin(intersectionPoints[1])) { thePoint = intersectionPoints[1]; - else - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); + } else { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } } return distanceStyle.computeDistance(thePoint, x, y, z); } - - /** Compute nearest path distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, in aggregation form, or Double.POSITIVE_INFINITY if outside this segment + + /** + * Compute nearest path distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, in aggregation form, or Double.POSITIVE_INFINITY if outside this + * segment */ - public double nearestPathDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double nearestPathDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { // First, if this point is outside the endplanes of the segment, return POSITIVE_INFINITY. if (!startCutoffPlane.isWithin(x, y, z) || !endCutoffPlane.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } - // (1) Compute normalizedPerpPlane. If degenerate, then there is no such plane, which means that the point given - // is insufficient to distinguish between a family of such planes. This can happen only if the point is one of the - // "poles", imagining the normalized plane to be the "equator". In that case, the distance returned should be zero. + // (1) Compute normalizedPerpPlane. If degenerate, then there is no such plane, which means + // that the point given is insufficient to distinguish between a family of such planes. + // This can happen only if the point is one of the "poles", imagining the normalized plane + // to be the "equator". In that case, the distance returned should be zero. // Want no allocations or expensive operations! so we do this the hard way final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); - if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) + if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) { return distanceStyle.toAggregationForm(0.0); - final double normFactor = 1.0/magnitude; - final Plane normalizedPerpPlane = new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); - - final GeoPoint[] intersectionPoints = normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); - GeoPoint thePoint; - if (intersectionPoints.length == 0) - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - else if (intersectionPoints.length == 1) - thePoint = intersectionPoints[0]; - else { - if (startCutoffPlane.isWithin(intersectionPoints[0]) && endCutoffPlane.isWithin(intersectionPoints[0])) - thePoint = intersectionPoints[0]; - else if (startCutoffPlane.isWithin(intersectionPoints[1]) && endCutoffPlane.isWithin(intersectionPoints[1])) - thePoint = intersectionPoints[1]; - else - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); } - return distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, thePoint.x, thePoint.y, thePoint.z)); + final double normFactor = 1.0 / magnitude; + final Plane normalizedPerpPlane = + new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); + + final GeoPoint[] intersectionPoints = + normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); + GeoPoint thePoint; + if (intersectionPoints.length == 0) { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } else if (intersectionPoints.length == 1) { + thePoint = intersectionPoints[0]; + } else { + if (startCutoffPlane.isWithin(intersectionPoints[0]) + && endCutoffPlane.isWithin(intersectionPoints[0])) { + thePoint = intersectionPoints[0]; + } else if (startCutoffPlane.isWithin(intersectionPoints[1]) + && endCutoffPlane.isWithin(intersectionPoints[1])) { + thePoint = intersectionPoints[1]; + } else { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } + } + return distanceStyle.toAggregationForm( + distanceStyle.computeDistance(start, thePoint.x, thePoint.y, thePoint.z)); } - - /** Compute interior path distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, in aggregation form. + /** + * Compute interior path distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, in aggregation form. */ - public double pathDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (!isWithin(x,y,z)) - return Double.POSITIVE_INFINITY; + public double pathDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { + if (!isWithin(x, y, z)) return Double.POSITIVE_INFINITY; - // (1) Compute normalizedPerpPlane. If degenerate, then return point distance from start to point. + // (1) Compute normalizedPerpPlane. If degenerate, then return point distance from start to + // point. // Want no allocations or expensive operations! so we do this the hard way final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); - if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) - return distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, x,y,z)); - final double normFactor = 1.0/magnitude; - final Plane normalizedPerpPlane = new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); - - // Old computation: too expensive, because it calculates the intersection point twice. - //return distanceStyle.computeDistance(planetModel, normalizedConnectingPlane, x, y, z, startCutoffPlane, endCutoffPlane) + - // distanceStyle.computeDistance(planetModel, normalizedPerpPlane, start.x, start.y, start.z, upperConnectingPlane, lowerConnectingPlane); - - final GeoPoint[] intersectionPoints = normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); - GeoPoint thePoint; - if (intersectionPoints.length == 0) - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - else if (intersectionPoints.length == 1) - thePoint = intersectionPoints[0]; - else { - if (startCutoffPlane.isWithin(intersectionPoints[0]) && endCutoffPlane.isWithin(intersectionPoints[0])) - thePoint = intersectionPoints[0]; - else if (startCutoffPlane.isWithin(intersectionPoints[1]) && endCutoffPlane.isWithin(intersectionPoints[1])) - thePoint = intersectionPoints[1]; - else - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); + if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) { + return distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, x, y, z)); } - return distanceStyle.aggregateDistances(distanceStyle.toAggregationForm(distanceStyle.computeDistance(thePoint, x, y, z)), - distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, thePoint.x, thePoint.y, thePoint.z))); + final double normFactor = 1.0 / magnitude; + final Plane normalizedPerpPlane = + new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); + + // Old computation: too expensive, because it calculates the intersection point twice. + // return distanceStyle.computeDistance(planetModel, normalizedConnectingPlane, x, y, z, + // startCutoffPlane, endCutoffPlane) + + // distanceStyle.computeDistance(planetModel, normalizedPerpPlane, start.x, start.y, start.z, + // upperConnectingPlane, lowerConnectingPlane); + + final GeoPoint[] intersectionPoints = + normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); + GeoPoint thePoint; + if (intersectionPoints.length == 0) { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } else if (intersectionPoints.length == 1) { + thePoint = intersectionPoints[0]; + } else { + if (startCutoffPlane.isWithin(intersectionPoints[0]) + && endCutoffPlane.isWithin(intersectionPoints[0])) { + thePoint = intersectionPoints[0]; + } else if (startCutoffPlane.isWithin(intersectionPoints[1]) + && endCutoffPlane.isWithin(intersectionPoints[1])) { + thePoint = intersectionPoints[1]; + } else { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } + } + return distanceStyle.aggregateDistances( + distanceStyle.toAggregationForm(distanceStyle.computeDistance(thePoint, x, y, z)), + distanceStyle.toAggregationForm( + distanceStyle.computeDistance(start, thePoint.x, thePoint.y, thePoint.z))); } - /** Compute external distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric. + /** + * Compute external distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric. */ - public double outsideDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double distance = distanceStyle.computeDistance(planetModel, normalizedConnectingPlane, x,y,z, startCutoffPlane, endCutoffPlane); - final double startDistance = distanceStyle.computeDistance(start, x,y,z); - final double endDistance = distanceStyle.computeDistance(end, x,y,z); - return Math.min( - Math.min(startDistance, endDistance), - distance); + public double outsideDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { + final double distance = + distanceStyle.computeDistance( + planetModel, normalizedConnectingPlane, x, y, z, startCutoffPlane, endCutoffPlane); + final double startDistance = distanceStyle.computeDistance(start, x, y, z); + final double endDistance = distanceStyle.computeDistance(end, x, y, z); + return Math.min(Math.min(startDistance, endDistance), distance); } - /** Determine if this endpoint intersects a specified plane. - *@param planetModel is the planet model. - *@param p is the plane. - *@param notablePoints are the points associated with the plane. - *@param bounds are any bounds which the intersection must lie within. - *@return true if there is a matching intersection. + /** + * Determine if this endpoint intersects a specified plane. + * + * @param planetModel is the planet model. + * @param p is the plane. + * @param notablePoints are the points associated with the plane. + * @param bounds are any bounds which the intersection must lie within. + * @return true if there is a matching intersection. */ - public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) { - return normalizedConnectingPlane.intersects(planetModel, p, connectingPlanePoints, notablePoints, bounds, startCutoffPlane, endCutoffPlane); + public boolean intersects( + final PlanetModel planetModel, + final Plane p, + final GeoPoint[] notablePoints, + final Membership[] bounds) { + return normalizedConnectingPlane.intersects( + planetModel, + p, + connectingPlanePoints, + notablePoints, + bounds, + startCutoffPlane, + endCutoffPlane); } - /** Determine if this endpoint intersects a specified GeoShape. - *@param geoShape is the GeoShape. - *@return true if there GeoShape intersects this endpoint. + /** + * Determine if this endpoint intersects a specified GeoShape. + * + * @param geoShape is the GeoShape. + * @return true if there GeoShape intersects this endpoint. */ public boolean intersects(final GeoShape geoShape) { - return geoShape.intersects(normalizedConnectingPlane, connectingPlanePoints, startCutoffPlane, endCutoffPlane); + return geoShape.intersects( + normalizedConnectingPlane, connectingPlanePoints, startCutoffPlane, endCutoffPlane); } - /** Get the bounds for a segment endpoint. - *@param planetModel is the planet model. - *@param bounds are the bounds to be modified. + /** + * Get the bounds for a segment endpoint. + * + * @param planetModel is the planet model. + * @param bounds are the bounds to be modified. */ public void getBounds(final PlanetModel planetModel, Bounds bounds) { // We need to do all bounding planes as well as corner points - bounds.addPoint(start).addPoint(end) - .addPlane(planetModel, normalizedConnectingPlane, startCutoffPlane, endCutoffPlane); + bounds + .addPoint(start) + .addPoint(end) + .addPlane(planetModel, normalizedConnectingPlane, startCutoffPlane, endCutoffPlane); } - } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePoint.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePoint.java index fe9ac7921a7..9040be0212e 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePoint.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePoint.java @@ -16,12 +16,12 @@ */ package org.apache.lucene.spatial3d.geom; -import java.io.InputStream; import java.io.IOException; +import java.io.InputStream; /** - * This class represents a degenerate point bounding box. - * It is not a simple GeoPoint because we must have the latitude and longitude. + * This class represents a degenerate point bounding box. It is not a simple GeoPoint because we + * must have the latitude and longitude. * * @lucene.internal */ @@ -31,27 +31,32 @@ class GeoDegeneratePoint extends GeoPoint implements GeoPointShape { /** Edge point is an area containing just this */ protected final GeoPoint[] edgePoints; - /** Constructor. - *@param planetModel is the planet model to use. - *@param lat is the latitude. - *@param lon is the longitude. + /** + * Constructor. + * + * @param planetModel is the planet model to use. + * @param lat is the latitude. + * @param lon is the longitude. */ public GeoDegeneratePoint(final PlanetModel planetModel, final double lat, final double lon) { super(planetModel, lat, lon); this.planetModel = planetModel; - this.edgePoints = new GeoPoint[]{this}; + this.edgePoints = new GeoPoint[] {this}; } - /** Constructor for deserialization. - *@param planetModel is the planet model to use. - *@param inputStream is the input stream. + /** + * Constructor for deserialization. + * + * @param planetModel is the planet model to use. + * @param inputStream is the input stream. */ - public GeoDegeneratePoint(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoDegeneratePoint(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { super(planetModel, inputStream); this.planetModel = planetModel; - this.edgePoints = new GeoPoint[]{this}; + this.edgePoints = new GeoPoint[] {this}; } - + @Override public PlanetModel getPlanetModel() { return planetModel; @@ -63,7 +68,8 @@ class GeoDegeneratePoint extends GeoPoint implements GeoPointShape { final double newBottomLat = latitude - angle; final double newLeftLon = longitude - angle; final double newRightLon = longitude + angle; - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override @@ -72,24 +78,28 @@ class GeoDegeneratePoint extends GeoPoint implements GeoPointShape { } @Override - public boolean intersects(final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds) { // If not on the plane, no intersection - if (!plane.evaluateIsZero(this)) + if (!plane.evaluateIsZero(this)) { return false; + } for (Membership m : bounds) { - if (!m.isWithin(this)) + if (!m.isWithin(this)) { return false; + } } return true; } @Override public boolean intersects(GeoShape geoShape) { - // We have no way of computing this properly, so return isWithin(), as we are allowed by contract. + // We have no way of computing this properly, so return isWithin(), as we are allowed by + // contract. return geoShape.isWithin(this); } - + @Override public void getBounds(Bounds bounds) { bounds.addPoint(this); @@ -101,21 +111,33 @@ class GeoDegeneratePoint extends GeoPoint implements GeoPointShape { } @Override - public double computeOutsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - return distanceStyle.computeDistance(this, x,y,z); + public double computeOutsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + return distanceStyle.computeDistance(this, x, y, z); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoDegeneratePoint)) + if (!(o instanceof GeoDegeneratePoint)) { return false; + } GeoDegeneratePoint other = (GeoDegeneratePoint) o; return super.equals(other) && other.latitude == latitude && other.longitude == longitude; } @Override public String toString() { - return "GeoDegeneratePoint: {planetmodel="+planetModel+", lat=" + latitude + "(" + latitude * 180.0 / Math.PI + "), lon=" + longitude + "(" + longitude * 180.0 / Math.PI + ")}"; + return "GeoDegeneratePoint: {planetmodel=" + + planetModel + + ", lat=" + + latitude + + "(" + + latitude * 180.0 / Math.PI + + "), lon=" + + longitude + + "(" + + longitude * 180.0 / Math.PI + + ")}"; } @Override @@ -141,25 +163,26 @@ class GeoDegeneratePoint extends GeoPoint implements GeoPointShape { @Override public int getRelationship(final GeoShape shape) { if (shape.isWithin(this)) { - //System.err.println("Degenerate point "+this+" is WITHIN shape "+shape); + // System.err.println("Degenerate point " + this + " is WITHIN shape " + shape); return CONTAINS; } - //System.err.println("Degenerate point "+this+" is NOT within shape "+shape); + // System.err.println("Degenerate point " + this + " is NOT within shape " + shape); return DISJOINT; } @Override - public double computeDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (isWithin(x,y,z)) + public double computeDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + if (isWithin(x, y, z)) { return 0.0; + } return Double.POSITIVE_INFINITY; } - + @Override - public void getDistanceBounds(final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { + public void getDistanceBounds( + final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { getBounds(bounds); } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateVerticalLine.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateVerticalLine.java index 22c0928d106..c1c114e7b14 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateVerticalLine.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegenerateVerticalLine.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * Degenerate bounding box limited on two sides (top lat, bottom lat). @@ -54,9 +54,14 @@ public class GeoDegenerateVerticalLine extends GeoBaseBBox { protected final GeoPoint[] edgePoints; /** - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, longitude: {@code -PI -> PI} + * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, longitude: {@code -PI + * -> PI} */ - public GeoDegenerateVerticalLine(final PlanetModel planetModel, final double topLat, final double bottomLat, final double longitude) { + public GeoDegenerateVerticalLine( + final PlanetModel planetModel, + final double topLat, + final double bottomLat, + final double longitude) { super(planetModel); // Argument checking if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5) @@ -80,8 +85,18 @@ public class GeoDegenerateVerticalLine extends GeoBaseBBox { final double cosLongitude = Math.cos(longitude); // Now build the two points - this.UHC = new GeoPoint(planetModel, sinTopLat, sinLongitude, cosTopLat, cosLongitude, topLat, longitude); - this.LHC = new GeoPoint(planetModel, sinBottomLat, sinLongitude, cosBottomLat, cosLongitude, bottomLat, longitude); + this.UHC = + new GeoPoint( + planetModel, sinTopLat, sinLongitude, cosTopLat, cosLongitude, topLat, longitude); + this.LHC = + new GeoPoint( + planetModel, + sinBottomLat, + sinLongitude, + cosBottomLat, + cosLongitude, + bottomLat, + longitude); this.plane = new Plane(cosLongitude, sinLongitude); @@ -89,25 +104,32 @@ public class GeoDegenerateVerticalLine extends GeoBaseBBox { final double sinMiddleLat = Math.sin(middleLat); final double cosMiddleLat = Math.cos(middleLat); - this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinLongitude, cosMiddleLat, cosLongitude); + this.centerPoint = + new GeoPoint(planetModel, sinMiddleLat, sinLongitude, cosMiddleLat, cosLongitude); this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat); this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat); this.boundingPlane = new SidedPlane(centerPoint, -sinLongitude, cosLongitude); - this.planePoints = new GeoPoint[]{UHC, LHC}; + this.planePoints = new GeoPoint[] {UHC, LHC}; - this.edgePoints = new GeoPoint[]{centerPoint}; + this.edgePoints = new GeoPoint[] {centerPoint}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoDegenerateVerticalLine(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoDegenerateVerticalLine(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -128,22 +150,24 @@ public class GeoDegenerateVerticalLine extends GeoBaseBBox { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return plane.evaluateIsZero(x, y, z) && - boundingPlane.isWithin(x, y, z) && - topPlane.isWithin(x, y, z) && - bottomPlane.isWithin(x, y, z); + return plane.evaluateIsZero(x, y, z) + && boundingPlane.isWithin(x, y, z) + && topPlane.isWithin(x, y, z) + && bottomPlane.isWithin(x, y, z); } @Override public double getRadius() { - // Here we compute the distance from the middle point to one of the corners. However, we need to be careful - // to use the longest of three distances: the distance to a corner on the top; the distnace to a corner on the bottom, and - // the distance to the right or left edge from the center. + // Here we compute the distance from the middle point to one of the corners. However, we need + // to be careful to use the longest of three distances: the distance to a corner on the top; + // the distance to a corner on the bottom, and the distance to the right or left edge from the + // center. final double topAngle = centerPoint.arcDistance(UHC); final double bottomAngle = centerPoint.arcDistance(LHC); return Math.max(topAngle, bottomAngle); @@ -160,8 +184,17 @@ public class GeoDegenerateVerticalLine extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return p.intersects(planetModel, plane, notablePoints, planePoints, bounds, boundingPlane, topPlane, bottomPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects( + planetModel, + plane, + notablePoints, + planePoints, + bounds, + boundingPlane, + topPlane, + bottomPlane); } @Override @@ -172,43 +205,47 @@ public class GeoDegenerateVerticalLine extends GeoBaseBBox { @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.addVerticalPlane(planetModel, longitude, plane, boundingPlane, topPlane, bottomPlane) - .addPoint(UHC).addPoint(LHC); + bounds + .addVerticalPlane(planetModel, longitude, plane, boundingPlane, topPlane, bottomPlane) + .addPoint(UHC) + .addPoint(LHC); } @Override public int getRelationship(final GeoShape path) { - //System.err.println(this+" relationship to "+path); + // System.err.println(this + " relationship to " + path); if (intersects(path)) { - //System.err.println(" overlaps"); + // System.err.println(" overlaps"); return OVERLAPS; } if (path.isWithin(centerPoint)) { - //System.err.println(" contains"); + // System.err.println(" contains"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double distance = distanceStyle.computeDistance(planetModel, plane, x,y,z, topPlane, bottomPlane, boundingPlane); - - final double UHCDistance = distanceStyle.computeDistance(UHC, x,y,z); - final double LHCDistance = distanceStyle.computeDistance(LHC, x,y,z); - - return Math.min( - distance, - Math.min(UHCDistance, LHCDistance)); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double distance = + distanceStyle.computeDistance( + planetModel, plane, x, y, z, topPlane, bottomPlane, boundingPlane); + + final double UHCDistance = distanceStyle.computeDistance(UHC, x, y, z); + final double LHCDistance = distanceStyle.computeDistance(LHC, x, y, z); + + return Math.min(distance, Math.min(UHCDistance, LHCDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoDegenerateVerticalLine)) + if (!(o instanceof GeoDegenerateVerticalLine)) { return false; + } GeoDegenerateVerticalLine other = (GeoDegenerateVerticalLine) o; return super.equals(other) && other.UHC.equals(UHC) && other.LHC.equals(LHC); } @@ -223,8 +260,18 @@ public class GeoDegenerateVerticalLine extends GeoBaseBBox { @Override public String toString() { - return "GeoDegenerateVerticalLine: {longitude=" + longitude + "(" + longitude * 180.0 / Math.PI + "), toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + ")}"; + return "GeoDegenerateVerticalLine: {longitude=" + + longitude + + "(" + + longitude * 180.0 / Math.PI + + "), toplat=" + + topLat + + "(" + + topLat * 180.0 / Math.PI + + "), bottomlat=" + + bottomLat + + "(" + + bottomLat * 180.0 / Math.PI + + ")}"; } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDistance.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDistance.java index bf020fe5bf8..6f595ba4725 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDistance.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDistance.java @@ -17,23 +17,22 @@ package org.apache.lucene.spatial3d.geom; /** - * An implementer of this interface is capable of computing the described "distance" values, - * which are meant to provide both actual distance values, as well as - * distance estimates that can be computed more cheaply. + * An implementer of this interface is capable of computing the described "distance" values, which + * are meant to provide both actual distance values, as well as distance estimates that can be + * computed more cheaply. * * @lucene.experimental */ public interface GeoDistance extends Membership { - + // The following methods compute distances from the shape to a point // expected to be INSIDE the shape. Typically a value of Double.POSITIVE_INFINITY // is returned for points that happen to be outside the shape. /** - * Compute this shape's internal "distance" to the GeoPoint. - * Implementations should clarify how this is computed when it's non-obvious. - * A return value of Double.POSITIVE_INFINITY should be returned for - * points outside of the shape. + * Compute this shape's internal "distance" to the GeoPoint. Implementations should + * clarify how this is computed when it's non-obvious. A return value of Double.POSITIVE_INFINITY + * should be returned for points outside of the shape. * * @param distanceStyle is the distance style. * @param point is the point to compute the distance to. @@ -44,10 +43,9 @@ public interface GeoDistance extends Membership { } /** - * Compute this shape's internal "distance" to the GeoPoint. - * Implementations should clarify how this is computed when it's non-obvious. - * A return value of Double.POSITIVE_INFINITY should be returned for - * points outside of the shape. + * Compute this shape's internal "distance" to the GeoPoint. Implementations should + * clarify how this is computed when it's non-obvious. A return value of Double.POSITIVE_INFINITY + * should be returned for points outside of the shape. * * @param distanceStyle is the distance style. * @param x is the point's unit x coordinate (using U.S. convention). @@ -55,31 +53,33 @@ public interface GeoDistance extends Membership { * @param z is the point's unit z coordinate (using U.S. convention). * @return the distance. */ - public double computeDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); + public double computeDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); /** - * Compute the shape's delta distance given a point. This is defined as the distance that someone traveling - * the "length" of the shape would have to go out of their way to include the point. - * For some shapes, e.g. paths, this makes perfect sense. For other shapes, e.g. circles, the "length" of the shape is zero, - * and the delta is computed as the distance from the center to the point and back. - * A return value of Double.POSITIVE_INFINITY should be returned for - * points outside of the shape. + * Compute the shape's delta distance given a point. This is defined as the distance that + * someone traveling the "length" of the shape would have to go out of their way to include the + * point. For some shapes, e.g. paths, this makes perfect sense. For other shapes, e.g. circles, + * the "length" of the shape is zero, and the delta is computed as the distance from the center to + * the point and back. A return value of Double.POSITIVE_INFINITY should be returned for points + * outside of the shape. * * @param distanceStyle is the distance style. * @param point is the point to compute the distance to. * @return the distance. */ - public default double computeDeltaDistance(final DistanceStyle distanceStyle, final GeoPoint point) { + public default double computeDeltaDistance( + final DistanceStyle distanceStyle, final GeoPoint point) { return computeDeltaDistance(distanceStyle, point.x, point.y, point.z); } /** - * Compute the shape's delta distance given a point. This is defined as the distance that someone traveling - * the "length" of the shape would have to go out of their way to include the point. - * For some shapes, e.g. paths, this makes perfect sense. For other shapes, e.g. circles, the "length" of the shape is zero, - * and the delta is computed as the distance from the center to the point and back. - * A return value of Double.POSITIVE_INFINITY should be returned for - * points outside of the shape. + * Compute the shape's delta distance given a point. This is defined as the distance that + * someone traveling the "length" of the shape would have to go out of their way to include the + * point. For some shapes, e.g. paths, this makes perfect sense. For other shapes, e.g. circles, + * the "length" of the shape is zero, and the delta is computed as the distance from the center to + * the point and back. A return value of Double.POSITIVE_INFINITY should be returned for points + * outside of the shape. * * @param distanceStyle is the distance style. * @param x is the point's unit x coordinate (using U.S. convention). @@ -87,9 +87,9 @@ public interface GeoDistance extends Membership { * @param z is the point's unit z coordinate (using U.S. convention). * @return the distance. */ - public default double computeDeltaDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public default double computeDeltaDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { // Default to standard distance * 2, which is fine for circles return computeDistance(distanceStyle, x, y, z) * 2.0; } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDistanceShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDistanceShape.java index c49ed30b950..20719f58cdc 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDistanceShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDistanceShape.java @@ -17,23 +17,24 @@ package org.apache.lucene.spatial3d.geom; /** - * Distance shapes have capabilities of both geohashing and distance - * computation (which also includes point membership determination). + * Distance shapes have capabilities of both geohashing and distance computation (which also + * includes point membership determination). * * @lucene.experimental */ public interface GeoDistanceShape extends GeoAreaShape, GeoDistance { /** - * Compute a bound based on a provided distance measure. - * This method takes an input distance and distance metric and provides bounds on the - * shape if reduced to match that distance. The method is allowed to return - * bounds that are larger than the distance would indicate, but never smaller. + * Compute a bound based on a provided distance measure. This method takes an input distance and + * distance metric and provides bounds on the shape if reduced to match that distance. The method + * is allowed to return bounds that are larger than the distance would indicate, but never + * smaller. + * * @param bounds is the bounds object to update. * @param distanceStyle describes the type of distance metric provided. - * @param distanceValue is the distance metric to use. It is presumed that the distance metric - * was produced with the same distance style as is provided to this method. + * @param distanceValue is the distance metric to use. It is presumed that the distance metric was + * produced with the same distance style as is provided to this method. */ - public void getDistanceBounds(final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue); - + public void getDistanceBounds( + final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue); } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoExactCircle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoExactCircle.java index f25d74ce8e2..c4ad8b8bcd3 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoExactCircle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoExactCircle.java @@ -16,18 +16,17 @@ */ package org.apache.lucene.spatial3d.geom; -import java.util.Arrays; -import java.util.List; -import java.util.ArrayList; - import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; /** - * Circular area with a center and a radius that represents the surface distance to the center. - * The circle is divided in sectors where the circle edge is approximated using Vincenty formulae. - * The higher is the precision the more sectors are needed to describe the shape and therefore a penalty + * Circular area with a center and a radius that represents the surface distance to the center. The + * circle is divided in sectors where the circle edge is approximated using Vincenty formulae. The + * higher is the precision the more sectors are needed to describe the shape and therefore a penalty * in performance. * * @lucene.experimental @@ -44,25 +43,40 @@ class GeoExactCircle extends GeoBaseCircle { /** Slices of the circle */ protected final List circleSlices; - /** Constructor. - *@param planetModel is the planet model. - *@param lat is the center latitude. - *@param lon is the center longitude. - *@param radius is the surface radius for the circle. - *@param accuracy is the allowed error value (linear distance). Maximum accuracy is 1e-12. + /** + * Constructor. + * + * @param planetModel is the planet model. + * @param lat is the center latitude. + * @param lon is the center longitude. + * @param radius is the surface radius for the circle. + * @param accuracy is the allowed error value (linear distance). Maximum accuracy is 1e-12. */ - public GeoExactCircle(final PlanetModel planetModel, final double lat, final double lon, final double radius, final double accuracy) { + public GeoExactCircle( + final PlanetModel planetModel, + final double lat, + final double lon, + final double radius, + final double accuracy) { super(planetModel); - if (lat < -Math.PI * 0.5 || lat > Math.PI * 0.5) + if (lat < -Math.PI * 0.5 || lat > Math.PI * 0.5) { throw new IllegalArgumentException("Latitude out of bounds"); - if (lon < -Math.PI || lon > Math.PI) + } + if (lon < -Math.PI || lon > Math.PI) { throw new IllegalArgumentException("Longitude out of bounds"); - if (radius < 0.0) + } + if (radius < 0.0) { throw new IllegalArgumentException("Radius out of bounds"); - if (radius < Vector.MINIMUM_RESOLUTION) + } + if (radius < Vector.MINIMUM_RESOLUTION) { throw new IllegalArgumentException("Radius cannot be effectively zero"); - if (planetModel.minimumPoleDistance - radius < Vector.MINIMUM_RESOLUTION) - throw new IllegalArgumentException("Radius out of bounds. It cannot be bigger than " + planetModel.minimumPoleDistance + " for this planet model"); + } + if (planetModel.minimumPoleDistance - radius < Vector.MINIMUM_RESOLUTION) { + throw new IllegalArgumentException( + "Radius out of bounds. It cannot be bigger than " + + planetModel.minimumPoleDistance + + " for this planet model"); + } this.center = new GeoPoint(planetModel, lat, lon); this.radius = radius; @@ -84,65 +98,113 @@ class GeoExactCircle extends GeoBaseCircle { final GeoPoint edgePoint; if (planetModel.zScaling > planetModel.xyScaling) { // z can be greater than x or y, so ellipse is longer in height than width - slices.add(new ApproximationSlice(center, eastPoint, Math.PI * 0.5, westPoint, Math.PI * -0.5, northPoint, 0.0, true)); - slices.add(new ApproximationSlice(center, westPoint, Math.PI * 1.5, eastPoint, Math.PI * 0.5, southPoint, Math.PI, true)); + slices.add( + new ApproximationSlice( + center, eastPoint, Math.PI * 0.5, westPoint, Math.PI * -0.5, northPoint, 0.0, true)); + slices.add( + new ApproximationSlice( + center, + westPoint, + Math.PI * 1.5, + eastPoint, + Math.PI * 0.5, + southPoint, + Math.PI, + true)); edgePoint = eastPoint; } else { // z will be less than x or y, so ellipse is shorter than it is tall - slices.add(new ApproximationSlice(center, northPoint, 0.0, southPoint, Math.PI, eastPoint, Math.PI * 0.5, true)); - slices.add(new ApproximationSlice(center, southPoint, Math.PI, northPoint, Math.PI * 2.0, westPoint, Math.PI * 1.5, true)); + slices.add( + new ApproximationSlice( + center, northPoint, 0.0, southPoint, Math.PI, eastPoint, Math.PI * 0.5, true)); + slices.add( + new ApproximationSlice( + center, + southPoint, + Math.PI, + northPoint, + Math.PI * 2.0, + westPoint, + Math.PI * 1.5, + true)); edgePoint = northPoint; } - //System.out.println("Edgepoint = " + edgePoint); + // System.out.println("Edgepoint = " + edgePoint); this.circleSlices = new ArrayList<>(); - + // Now, iterate over slices until we have converted all of them into safe SidedPlanes. while (slices.size() > 0) { // Peel off a slice from the back - final ApproximationSlice thisSlice = slices.remove(slices.size()-1); + final ApproximationSlice thisSlice = slices.remove(slices.size() - 1); // Assess it to see if it is OK as it is, or needs to be split. // To do this, we need to look at the part of the circle that will have the greatest error. // We will need to compute bearing points for these. - final double interpPoint1Bearing = (thisSlice.point1Bearing + thisSlice.middlePointBearing) * 0.5; - final GeoPoint interpPoint1 = planetModel.surfacePointOnBearing(center, radius, interpPoint1Bearing); - final double interpPoint2Bearing = (thisSlice.point2Bearing + thisSlice.middlePointBearing) * 0.5; - final GeoPoint interpPoint2 = planetModel.surfacePointOnBearing(center, radius, interpPoint2Bearing); - + final double interpPoint1Bearing = + (thisSlice.point1Bearing + thisSlice.middlePointBearing) * 0.5; + final GeoPoint interpPoint1 = + planetModel.surfacePointOnBearing(center, radius, interpPoint1Bearing); + final double interpPoint2Bearing = + (thisSlice.point2Bearing + thisSlice.middlePointBearing) * 0.5; + final GeoPoint interpPoint2 = + planetModel.surfacePointOnBearing(center, radius, interpPoint2Bearing); + // Is this point on the plane? (that is, is the approximation good enough?) - if (!thisSlice.mustSplit && Math.abs(thisSlice.plane.evaluate(interpPoint1)) < actualAccuracy && Math.abs(thisSlice.plane.evaluate(interpPoint2)) < actualAccuracy) { - circleSlices.add(new CircleSlice(thisSlice.plane, thisSlice.endPoint1, thisSlice.endPoint2, center, thisSlice.middlePoint)); - //assert thisSlice.plane.isWithin(center); + if (!thisSlice.mustSplit + && Math.abs(thisSlice.plane.evaluate(interpPoint1)) < actualAccuracy + && Math.abs(thisSlice.plane.evaluate(interpPoint2)) < actualAccuracy) { + circleSlices.add( + new CircleSlice( + thisSlice.plane, + thisSlice.endPoint1, + thisSlice.endPoint2, + center, + thisSlice.middlePoint)); + // assert thisSlice.plane.isWithin(center); } else { // Split the plane into two, and add it back to the end - slices.add(new ApproximationSlice(center, - thisSlice.endPoint1, thisSlice.point1Bearing, - thisSlice.middlePoint, thisSlice.middlePointBearing, - interpPoint1, interpPoint1Bearing, false)); - slices.add(new ApproximationSlice(center, - thisSlice.middlePoint, thisSlice.middlePointBearing, - thisSlice.endPoint2, thisSlice.point2Bearing, - interpPoint2, interpPoint2Bearing, false)); + slices.add( + new ApproximationSlice( + center, + thisSlice.endPoint1, + thisSlice.point1Bearing, + thisSlice.middlePoint, + thisSlice.middlePointBearing, + interpPoint1, + interpPoint1Bearing, + false)); + slices.add( + new ApproximationSlice( + center, + thisSlice.middlePoint, + thisSlice.middlePointBearing, + thisSlice.endPoint2, + thisSlice.point2Bearing, + interpPoint2, + interpPoint2Bearing, + false)); } } - - this.edgePoints = new GeoPoint[]{edgePoint}; - //System.out.println("Is edgepoint within? "+isWithin(edgePoint)); + this.edgePoints = new GeoPoint[] {edgePoint}; + + // System.out.println("Is edgepoint within? " + isWithin(edgePoint)); } - /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoExactCircle(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public GeoExactCircle(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -164,21 +226,26 @@ class GeoExactCircle extends GeoBaseCircle { } @Override - protected double distance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double distance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return distanceStyle.computeDistance(this.center, x, y, z); } @Override - protected void distanceBounds(final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { + protected void distanceBounds( + final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { // TBD: Compute actual bounds based on distance getBounds(bounds); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { double outsideDistance = Double.POSITIVE_INFINITY; for (final CircleSlice slice : circleSlices) { - final double distance = distanceStyle.computeDistance(planetModel, slice.circlePlane, x, y, z, slice.plane1, slice.plane2); + final double distance = + distanceStyle.computeDistance( + planetModel, slice.circlePlane, x, y, z, slice.plane1, slice.plane2); if (distance < outsideDistance) { outsideDistance = distance; } @@ -189,7 +256,9 @@ class GeoExactCircle extends GeoBaseCircle { @Override public boolean isWithin(final double x, final double y, final double z) { for (final CircleSlice slice : circleSlices) { - if (slice.circlePlane.isWithin(x, y, z) && slice.plane1.isWithin(x, y, z) && slice.plane2.isWithin(x, y, z)) { + if (slice.circlePlane.isWithin(x, y, z) + && slice.plane1.isWithin(x, y, z) + && slice.plane2.isWithin(x, y, z)) { return true; } } @@ -202,9 +271,17 @@ class GeoExactCircle extends GeoBaseCircle { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { for (final CircleSlice slice : circleSlices) { - if (slice.circlePlane.intersects(planetModel, p, notablePoints, slice.notableEdgePoints, bounds, slice.plane1, slice.plane2)) { + if (slice.circlePlane.intersects( + planetModel, + p, + notablePoints, + slice.notableEdgePoints, + bounds, + slice.plane1, + slice.plane2)) { return true; } } @@ -214,14 +291,14 @@ class GeoExactCircle extends GeoBaseCircle { @Override public boolean intersects(GeoShape geoShape) { for (final CircleSlice slice : circleSlices) { - if (geoShape.intersects(slice.circlePlane, slice.notableEdgePoints, slice.plane1, slice.plane2)) { + if (geoShape.intersects( + slice.circlePlane, slice.notableEdgePoints, slice.plane1, slice.plane2)) { return true; } } return false; } - @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); @@ -236,10 +313,14 @@ class GeoExactCircle extends GeoBaseCircle { @Override public boolean equals(Object o) { - if (!(o instanceof GeoExactCircle)) + if (!(o instanceof GeoExactCircle)) { return false; + } GeoExactCircle other = (GeoExactCircle) o; - return super.equals(other) && other.center.equals(center) && other.radius == radius && other.actualAccuracy == actualAccuracy; + return super.equals(other) + && other.center.equals(center) + && other.radius == radius + && other.actualAccuracy == actualAccuracy; } @Override @@ -249,17 +330,26 @@ class GeoExactCircle extends GeoBaseCircle { long temp = Double.doubleToLongBits(radius); result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = Double.doubleToLongBits(actualAccuracy); - result = 31 * result + (int) (temp ^ (temp >>> 32)); + result = 31 * result + (int) (temp ^ (temp >>> 32)); return result; } @Override public String toString() { - return "GeoExactCircle: {planetmodel=" + planetModel+", center=" + center + ", radius=" + radius + "(" + radius * 180.0 / Math.PI + "), accuracy=" + actualAccuracy + "}"; + return "GeoExactCircle: {planetmodel=" + + planetModel + + ", center=" + + center + + ", radius=" + + radius + + "(" + + radius * 180.0 / Math.PI + + "), accuracy=" + + actualAccuracy + + "}"; } - - /** A temporary description of a section of circle. - */ + + /** A temporary description of a section of circle. */ protected static class ApproximationSlice { public final SidedPlane plane; public final GeoPoint endPoint1; @@ -269,11 +359,16 @@ class GeoExactCircle extends GeoBaseCircle { public final GeoPoint middlePoint; public final double middlePointBearing; public final boolean mustSplit; - - public ApproximationSlice(final GeoPoint center, - final GeoPoint endPoint1, final double point1Bearing, - final GeoPoint endPoint2, final double point2Bearing, - final GeoPoint middlePoint, final double middlePointBearing, final boolean mustSplit) { + + public ApproximationSlice( + final GeoPoint center, + final GeoPoint endPoint1, + final double point1Bearing, + final GeoPoint endPoint2, + final double point2Bearing, + final GeoPoint middlePoint, + final double middlePointBearing, + final boolean mustSplit) { this.endPoint1 = endPoint1; this.point1Bearing = point1Bearing; this.endPoint2 = endPoint2; @@ -282,33 +377,73 @@ class GeoExactCircle extends GeoBaseCircle { this.middlePointBearing = middlePointBearing; this.mustSplit = mustSplit; // Construct the plane going through the three given points - this.plane = SidedPlane.constructNormalizedThreePointSidedPlane(center, endPoint1, endPoint2, middlePoint); + this.plane = + SidedPlane.constructNormalizedThreePointSidedPlane( + center, endPoint1, endPoint2, middlePoint); if (this.plane == null) { - throw new IllegalArgumentException("Either circle is too small or accuracy is too high; could not construct a plane with endPoint1="+endPoint1+" bearing "+point1Bearing+", endPoint2="+endPoint2+" bearing "+point2Bearing+", middle="+middlePoint+" bearing "+middlePointBearing); + throw new IllegalArgumentException( + "Either circle is too small or accuracy is too high; could not construct a plane with endPoint1=" + + endPoint1 + + " bearing " + + point1Bearing + + ", endPoint2=" + + endPoint2 + + " bearing " + + point2Bearing + + ", middle=" + + middlePoint + + " bearing " + + middlePointBearing); } if (this.plane.isWithin(-center.x, -center.y, -center.z)) { - //Plane is bogus, we cannot build the circle - throw new IllegalArgumentException("Could not construct a valid plane for this planet model with endPoint1="+endPoint1+" bearing "+point1Bearing+", endPoint2="+endPoint2+" bearing "+point2Bearing+", middle="+middlePoint+" bearing "+middlePointBearing); + // Plane is bogus, we cannot build the circle + throw new IllegalArgumentException( + "Could not construct a valid plane for this planet model with endPoint1=" + + endPoint1 + + " bearing " + + point1Bearing + + ", endPoint2=" + + endPoint2 + + " bearing " + + point2Bearing + + ", middle=" + + middlePoint + + " bearing " + + middlePointBearing); } } @Override public String toString() { - return "{end point 1 = " + endPoint1 + " bearing 1 = "+point1Bearing + - " end point 2 = " + endPoint2 + " bearing 2 = " + point2Bearing + - " middle point = " + middlePoint + " middle bearing = " + middlePointBearing + "}"; + return "{end point 1 = " + + endPoint1 + + " bearing 1 = " + + point1Bearing + + " end point 2 = " + + endPoint2 + + " bearing 2 = " + + point2Bearing + + " middle point = " + + middlePoint + + " middle bearing = " + + middlePointBearing + + "}"; } } - /** A description of a section of circle. - */ + /** A description of a section of circle. */ protected static class CircleSlice { final GeoPoint[] notableEdgePoints; public final SidedPlane circlePlane; public final SidedPlane plane1; public final SidedPlane plane2; - public CircleSlice(SidedPlane circlePlane, GeoPoint endPoint1, GeoPoint endPoint2, GeoPoint center, GeoPoint check) { + public CircleSlice( + SidedPlane circlePlane, + GeoPoint endPoint1, + GeoPoint endPoint2, + GeoPoint center, + GeoPoint check) { this.circlePlane = circlePlane; this.plane1 = new SidedPlane(check, endPoint1, center); this.plane2 = new SidedPlane(check, endPoint2, center); @@ -317,8 +452,15 @@ class GeoExactCircle extends GeoBaseCircle { @Override public String toString() { - return "{circle plane = " + circlePlane + " plane 1 = "+plane1 + - " plane 2 = " + plane2 + " notable edge points = " + Arrays.toString(notableEdgePoints) + "}"; + return "{circle plane = " + + circlePlane + + " plane 1 = " + + plane1 + + " plane 2 = " + + plane2 + + " notable edge points = " + + Arrays.toString(notableEdgePoints) + + "}"; } } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoLatitudeZone.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoLatitudeZone.java index 8db77ed1af2..c00e40ba42e 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoLatitudeZone.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoLatitudeZone.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * This GeoBBox represents an area rectangle limited only in latitude. @@ -41,11 +41,12 @@ class GeoLatitudeZone extends GeoBaseBBox { /** An interior point */ protected final GeoPoint interiorPoint; /** Notable points (none) */ - protected final static GeoPoint[] planePoints = new GeoPoint[0]; + protected static final GeoPoint[] planePoints = new GeoPoint[0]; - // We need two additional points because a latitude zone's boundaries don't intersect. This is a very + // We need two additional points because a latitude zone's boundaries don't intersect. This is a + // very // special case that most GeoBBox's do not have. - + /** Top boundary point */ protected final GeoPoint topBoundaryPoint; /** Bottom boundary point */ @@ -53,12 +54,15 @@ class GeoLatitudeZone extends GeoBaseBBox { /** A point on each distinct edge */ protected final GeoPoint[] edgePoints; - /** Constructor. - *@param planetModel is the planet model to use. - *@param topLat is the top latitude. - *@param bottomLat is the bottom latitude. + /** + * Constructor. + * + * @param planetModel is the planet model to use. + * @param topLat is the top latitude. + * @param bottomLat is the bottom latitude. */ - public GeoLatitudeZone(final PlanetModel planetModel, final double topLat, final double bottomLat) { + public GeoLatitudeZone( + final PlanetModel planetModel, final double topLat, final double bottomLat) { super(planetModel); this.topLat = topLat; this.bottomLat = bottomLat; @@ -71,23 +75,33 @@ class GeoLatitudeZone extends GeoBaseBBox { // Compute an interior point. Pick one whose lat is between top and bottom. final double middleLat = (topLat + bottomLat) * 0.5; final double sinMiddleLat = Math.sin(middleLat); - this.interiorPoint = new GeoPoint(planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0); - this.topBoundaryPoint = new GeoPoint(planetModel, sinTopLat, 0.0, Math.sqrt(1.0 - sinTopLat * sinTopLat), 1.0); - this.bottomBoundaryPoint = new GeoPoint(planetModel, sinBottomLat, 0.0, Math.sqrt(1.0 - sinBottomLat * sinBottomLat), 1.0); + this.interiorPoint = + new GeoPoint( + planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0); + this.topBoundaryPoint = + new GeoPoint(planetModel, sinTopLat, 0.0, Math.sqrt(1.0 - sinTopLat * sinTopLat), 1.0); + this.bottomBoundaryPoint = + new GeoPoint( + planetModel, sinBottomLat, 0.0, Math.sqrt(1.0 - sinBottomLat * sinBottomLat), 1.0); this.topPlane = new SidedPlane(interiorPoint, planetModel, sinTopLat); this.bottomPlane = new SidedPlane(interiorPoint, planetModel, sinBottomLat); - this.edgePoints = new GeoPoint[]{topBoundaryPoint, bottomBoundaryPoint}; + this.edgePoints = new GeoPoint[] {topBoundaryPoint, bottomBoundaryPoint}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoLatitudeZone(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoLatitudeZone(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -105,19 +119,20 @@ class GeoLatitudeZone extends GeoBaseBBox { @Override public boolean isWithin(final double x, final double y, final double z) { - return topPlane.isWithin(x, y, z) && - bottomPlane.isWithin(x, y, z); + return topPlane.isWithin(x, y, z) && bottomPlane.isWithin(x, y, z); } @Override public double getRadius() { // This is a bit tricky. I guess we should interpret this as meaning the angle of a circle that // would contain all the bounding box points, when starting in the "center". - if (topLat > 0.0 && bottomLat < 0.0) + if (topLat > 0.0 && bottomLat < 0.0) { return Math.PI; + } double maxCosLat = cosTopLat; - if (maxCosLat < cosBottomLat) + if (maxCosLat < cosBottomLat) { maxCosLat = cosBottomLat; + } return maxCosLat * Math.PI; } @@ -133,39 +148,47 @@ class GeoLatitudeZone extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return p.intersects(planetModel, topPlane, notablePoints, planePoints, bounds, bottomPlane) || - p.intersects(planetModel, bottomPlane, notablePoints, planePoints, bounds, topPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects(planetModel, topPlane, notablePoints, planePoints, bounds, bottomPlane) + || p.intersects(planetModel, bottomPlane, notablePoints, planePoints, bounds, topPlane); } @Override public boolean intersects(final GeoShape geoShape) { - return geoShape.intersects(topPlane, planePoints, bottomPlane) || - geoShape.intersects(bottomPlane, planePoints, topPlane); + return geoShape.intersects(topPlane, planePoints, bottomPlane) + || geoShape.intersects(bottomPlane, planePoints, topPlane); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.noLongitudeBound() - .addHorizontalPlane(planetModel, topLat, topPlane) - .addHorizontalPlane(planetModel, bottomLat, bottomPlane); + bounds + .noLongitudeBound() + .addHorizontalPlane(planetModel, topLat, topPlane) + .addHorizontalPlane(planetModel, bottomLat, bottomPlane); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double topDistance = distanceStyle.computeDistance(planetModel, topPlane, x,y,z, bottomPlane); - final double bottomDistance = distanceStyle.computeDistance(planetModel, bottomPlane, x,y,z, topPlane); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double topDistance = + distanceStyle.computeDistance(planetModel, topPlane, x, y, z, bottomPlane); + final double bottomDistance = + distanceStyle.computeDistance(planetModel, bottomPlane, x, y, z, topPlane); return Math.min(topDistance, bottomDistance); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoLatitudeZone)) + if (!(o instanceof GeoLatitudeZone)) { return false; + } GeoLatitudeZone other = (GeoLatitudeZone) o; - return super.equals(other) && other.topBoundaryPoint.equals(topBoundaryPoint) && other.bottomBoundaryPoint.equals(bottomBoundaryPoint); + return super.equals(other) + && other.topBoundaryPoint.equals(topBoundaryPoint) + && other.bottomBoundaryPoint.equals(bottomBoundaryPoint); } @Override @@ -178,6 +201,16 @@ class GeoLatitudeZone extends GeoBaseBBox { @Override public String toString() { - return "GeoLatitudeZone: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + ")}"; + return "GeoLatitudeZone: {planetmodel=" + + planetModel + + ", toplat=" + + topLat + + "(" + + topLat * 180.0 / Math.PI + + "), bottomlat=" + + bottomLat + + "(" + + bottomLat * 180.0 / Math.PI + + ")}"; } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoLongitudeSlice.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoLongitudeSlice.java index 4a8a60474ab..22db1441419 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoLongitudeSlice.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoLongitudeSlice.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Bounding box limited on left and right. - * The left-right maximum extent for this shape is PI; for anything larger, use - * {@link GeoWideLongitudeSlice}. + * Bounding box limited on left and right. The left-right maximum extent for this shape is PI; for + * anything larger, use {@link GeoWideLongitudeSlice}. * * @lucene.internal */ @@ -47,9 +46,10 @@ class GeoLongitudeSlice extends GeoBaseBBox { /** * Accepts only values in the following ranges: lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param leftLon is the left longitude of the slice. - *@param rightLon is the right longitude of the slice. + * + * @param planetModel is the planet model. + * @param leftLon is the left longitude of the slice. + * @param rightLon is the right longitude of the slice. */ public GeoLongitudeSlice(final PlanetModel planetModel, final double leftLon, double rightLon) { super(planetModel); @@ -62,8 +62,9 @@ class GeoLongitudeSlice extends GeoBaseBBox { if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent > Math.PI) + if (extent > Math.PI) { throw new IllegalArgumentException("Width of rectangle too great"); + } this.leftLon = leftLon; this.rightLon = rightLon; @@ -85,23 +86,29 @@ class GeoLongitudeSlice extends GeoBaseBBox { this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - + // Compute the backing plane - // The normal for this plane is a unit vector through the origin that goes through the middle lon. The plane's D is 0, + // The normal for this plane is a unit vector through the origin that goes through the middle + // lon. The plane's D is 0, // because it goes through the origin. this.backingPlane = new SidedPlane(this.centerPoint, cosMiddleLon, sinMiddleLon, 0.0, 0.0); - this.planePoints = new GeoPoint[]{planetModel.NORTH_POLE, planetModel.SOUTH_POLE}; - this.edgePoints = new GeoPoint[]{planetModel.NORTH_POLE}; + this.planePoints = new GeoPoint[] {planetModel.NORTH_POLE, planetModel.SOUTH_POLE}; + this.edgePoints = new GeoPoint[] {planetModel.NORTH_POLE}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoLongitudeSlice(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoLongitudeSlice(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -114,30 +121,33 @@ class GeoLongitudeSlice extends GeoBaseBBox { public GeoBBox expand(final double angle) { // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return backingPlane.isWithin(x, y, z) && - leftPlane.isWithin(x, y, z) && - rightPlane.isWithin(x, y, z); + return backingPlane.isWithin(x, y, z) + && leftPlane.isWithin(x, y, z) + && rightPlane.isWithin(x, y, z); } @Override public double getRadius() { // Compute the extent and divide by two double extent = rightLon - leftLon; - if (extent < 0.0) + if (extent < 0.0) { extent += Math.PI * 2.0; + } return Math.max(Math.PI * 0.5, extent * 0.5); } @@ -152,46 +162,48 @@ class GeoLongitudeSlice extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return p.intersects(planetModel, leftPlane, notablePoints, planePoints, bounds, rightPlane) || - p.intersects(planetModel, rightPlane, notablePoints, planePoints, bounds, leftPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects(planetModel, leftPlane, notablePoints, planePoints, bounds, rightPlane) + || p.intersects(planetModel, rightPlane, notablePoints, planePoints, bounds, leftPlane); } @Override public boolean intersects(final GeoShape geoShape) { - return geoShape.intersects(leftPlane, planePoints, rightPlane) || - geoShape.intersects(rightPlane, planePoints, leftPlane); + return geoShape.intersects(leftPlane, planePoints, rightPlane) + || geoShape.intersects(rightPlane, planePoints, leftPlane); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); bounds - .addVerticalPlane(planetModel, leftLon, leftPlane, rightPlane) - .addVerticalPlane(planetModel, rightLon, rightPlane, leftPlane) - //.addIntersection(planetModel, rightPlane, leftPlane) - .addPoint(planetModel.NORTH_POLE) - .addPoint(planetModel.SOUTH_POLE); + .addVerticalPlane(planetModel, leftLon, leftPlane, rightPlane) + .addVerticalPlane(planetModel, rightLon, rightPlane, leftPlane) + // .addIntersection(planetModel, rightPlane, leftPlane) + .addPoint(planetModel.NORTH_POLE) + .addPoint(planetModel.SOUTH_POLE); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x,y,z, rightPlane); - final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x,y,z, leftPlane); - - final double northDistance = distanceStyle.computeDistance(planetModel.NORTH_POLE, x,y,z); - final double southDistance = distanceStyle.computeDistance(planetModel.SOUTH_POLE, x,y,z); - - return - Math.min( - Math.min(northDistance, southDistance), - Math.min(leftDistance, rightDistance)); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double leftDistance = + distanceStyle.computeDistance(planetModel, leftPlane, x, y, z, rightPlane); + final double rightDistance = + distanceStyle.computeDistance(planetModel, rightPlane, x, y, z, leftPlane); + + final double northDistance = distanceStyle.computeDistance(planetModel.NORTH_POLE, x, y, z); + final double southDistance = distanceStyle.computeDistance(planetModel.SOUTH_POLE, x, y, z); + + return Math.min(Math.min(northDistance, southDistance), Math.min(leftDistance, rightDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoLongitudeSlice)) + if (!(o instanceof GeoLongitudeSlice)) { return false; + } GeoLongitudeSlice other = (GeoLongitudeSlice) o; return super.equals(other) && other.leftLon == leftLon && other.rightLon == rightLon; } @@ -208,7 +220,16 @@ class GeoLongitudeSlice extends GeoBaseBBox { @Override public String toString() { - return "GeoLongitudeSlice: {planetmodel="+planetModel+", leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoLongitudeSlice: {planetmodel=" + + planetModel + + ", leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightlon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoMembershipShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoMembershipShape.java index 2c479714ec3..9ee092d96c3 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoMembershipShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoMembershipShape.java @@ -17,11 +17,8 @@ package org.apache.lucene.spatial3d.geom; /** - * Membership shapes have capabilities of both geohashing and membership - * determination. + * Membership shapes have capabilities of both geohashing and membership determination. * * @lucene.experimental */ -public interface GeoMembershipShape extends GeoShape, GeoOutsideDistance, Membership { - -} +public interface GeoMembershipShape extends GeoShape, GeoOutsideDistance, Membership {} diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoNorthLatitudeZone.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoNorthLatitudeZone.java index 43711266083..bdf8d1fff07 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoNorthLatitudeZone.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoNorthLatitudeZone.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * This GeoBBox represents an area rectangle limited only in south latitude. @@ -35,15 +35,17 @@ class GeoNorthLatitudeZone extends GeoBaseBBox { /** An interior point of the zone */ protected final GeoPoint interiorPoint; /** Notable points: none */ - protected final static GeoPoint[] planePoints = new GeoPoint[0]; + protected static final GeoPoint[] planePoints = new GeoPoint[0]; /** A point on the bottom boundary */ protected final GeoPoint bottomBoundaryPoint; /** A reference to the point on the boundary */ protected final GeoPoint[] edgePoints; - /** Constructor. - *@param planetModel is the planet model. - *@param bottomLat is the bottom latitude. + /** + * Constructor. + * + * @param planetModel is the planet model. + * @param bottomLat is the bottom latitude. */ public GeoNorthLatitudeZone(final PlanetModel planetModel, final double bottomLat) { super(planetModel); @@ -55,20 +57,26 @@ class GeoNorthLatitudeZone extends GeoBaseBBox { // Compute an interior point. Pick one whose lat is between top and bottom. final double middleLat = (Math.PI * 0.5 + bottomLat) * 0.5; final double sinMiddleLat = Math.sin(middleLat); - this.interiorPoint = new GeoPoint(planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0); - this.bottomBoundaryPoint = new GeoPoint(planetModel, sinBottomLat, 0.0, Math.sqrt(1.0 - sinBottomLat * sinBottomLat), 1.0); + this.interiorPoint = + new GeoPoint( + planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0); + this.bottomBoundaryPoint = + new GeoPoint( + planetModel, sinBottomLat, 0.0, Math.sqrt(1.0 - sinBottomLat * sinBottomLat), 1.0); this.bottomPlane = new SidedPlane(interiorPoint, planetModel, sinBottomLat); - this.edgePoints = new GeoPoint[]{bottomBoundaryPoint}; + this.edgePoints = new GeoPoint[] {bottomBoundaryPoint}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoNorthLatitudeZone(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoNorthLatitudeZone(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { this(planetModel, SerializableObject.readDouble(inputStream)); } @@ -86,16 +94,16 @@ class GeoNorthLatitudeZone extends GeoBaseBBox { @Override public boolean isWithin(final double x, final double y, final double z) { - return - bottomPlane.isWithin(x, y, z); + return bottomPlane.isWithin(x, y, z); } @Override public double getRadius() { // This is a bit tricky. I guess we should interpret this as meaning the angle of a circle that // would contain all the bounding box points, when starting in the "center". - if (bottomLat < 0.0) + if (bottomLat < 0.0) { return Math.PI; + } double maxCosLat = cosBottomLat; return maxCosLat * Math.PI; } @@ -111,33 +119,33 @@ class GeoNorthLatitudeZone extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return - p.intersects(planetModel, bottomPlane, notablePoints, planePoints, bounds); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects(planetModel, bottomPlane, notablePoints, planePoints, bounds); } @Override public boolean intersects(final GeoShape geoShape) { - return - geoShape.intersects(bottomPlane, planePoints); + return geoShape.intersects(bottomPlane, planePoints); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds - .addHorizontalPlane(planetModel, bottomLat, bottomPlane); + bounds.addHorizontalPlane(planetModel, bottomLat, bottomPlane); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - return distanceStyle.computeDistance(planetModel, bottomPlane, x,y,z); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + return distanceStyle.computeDistance(planetModel, bottomPlane, x, y, z); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoNorthLatitudeZone)) + if (!(o instanceof GeoNorthLatitudeZone)) { return false; + } GeoNorthLatitudeZone other = (GeoNorthLatitudeZone) o; return super.equals(other) && other.bottomBoundaryPoint.equals(bottomBoundaryPoint); } @@ -151,7 +159,12 @@ class GeoNorthLatitudeZone extends GeoBaseBBox { @Override public String toString() { - return "GeoNorthLatitudeZone: {planetmodel="+planetModel+", bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + ")}"; + return "GeoNorthLatitudeZone: {planetmodel=" + + planetModel + + ", bottomlat=" + + bottomLat + + "(" + + bottomLat * 180.0 / Math.PI + + ")}"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoNorthRectangle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoNorthRectangle.java index 93645c15f2d..a8ee7ed9949 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoNorthRectangle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoNorthRectangle.java @@ -16,15 +16,14 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Bounding box limited on three sides (bottom lat, left lon, right lon), including - * the north pole. - * The left-right maximum extent for this shape is PI; for anything larger, use - * {@link GeoWideNorthRectangle}. + * Bounding box limited on three sides (bottom lat, left lon, right lon), including the north pole. + * The left-right maximum extent for this shape is PI; for anything larger, use {@link + * GeoWideNorthRectangle}. * * @lucene.internal */ @@ -62,26 +61,35 @@ class GeoNorthRectangle extends GeoBaseBBox { /** * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param bottomLat is the bottom latitude. - *@param leftLon is the left longitude. - *@param rightLon is the right longitude. + * + * @param planetModel is the planet model. + * @param bottomLat is the bottom latitude. + * @param leftLon is the left longitude. + * @param rightLon is the right longitude. */ - public GeoNorthRectangle(final PlanetModel planetModel, final double bottomLat, final double leftLon, double rightLon) { + public GeoNorthRectangle( + final PlanetModel planetModel, + final double bottomLat, + final double leftLon, + double rightLon) { super(planetModel); // Argument checking - if (bottomLat > Math.PI * 0.5 || bottomLat < -Math.PI * 0.5) + if (bottomLat > Math.PI * 0.5 || bottomLat < -Math.PI * 0.5) { throw new IllegalArgumentException("Bottom latitude out of range"); - if (leftLon < -Math.PI || leftLon > Math.PI) + } + if (leftLon < -Math.PI || leftLon > Math.PI) { throw new IllegalArgumentException("Left longitude out of range"); - if (rightLon < -Math.PI || rightLon > Math.PI) + } + if (rightLon < -Math.PI || rightLon > Math.PI) { throw new IllegalArgumentException("Right longitude out of range"); + } double extent = rightLon - leftLon; if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent > Math.PI) + if (extent > Math.PI) { throw new IllegalArgumentException("Width of rectangle too great"); + } this.bottomLat = bottomLat; this.leftLon = leftLon; @@ -95,8 +103,12 @@ class GeoNorthRectangle extends GeoBaseBBox { final double cosRightLon = Math.cos(rightLon); // Now build the points - this.LRHC = new GeoPoint(planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon, bottomLat, rightLon); - this.LLHC = new GeoPoint(planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon, bottomLat, leftLon); + this.LRHC = + new GeoPoint( + planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon, bottomLat, rightLon); + this.LLHC = + new GeoPoint( + planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon, bottomLat, leftLon); final double middleLat = (Math.PI * 0.5 + bottomLat) * 0.5; final double sinMiddleLat = Math.sin(middleLat); @@ -109,37 +121,45 @@ class GeoNorthRectangle extends GeoBaseBBox { final double sinMiddleLon = Math.sin(middleLon); final double cosMiddleLon = Math.cos(middleLon); - this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); + this.centerPoint = + new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat); this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - assert(bottomPlane.isWithin(centerPoint)); - assert(leftPlane.isWithin(centerPoint)); - assert(rightPlane.isWithin(centerPoint)); - + assert (bottomPlane.isWithin(centerPoint)); + assert (leftPlane.isWithin(centerPoint)); + assert (rightPlane.isWithin(centerPoint)); + // Compute the backing plane - // The normal for this plane is a unit vector through the origin that goes through the middle lon. The plane's D is 0, + // The normal for this plane is a unit vector through the origin that goes through the middle + // lon. The plane's D is 0, // because it goes through the origin. this.backingPlane = new SidedPlane(this.centerPoint, cosMiddleLon, sinMiddleLon, 0.0, 0.0); - - this.bottomPlanePoints = new GeoPoint[]{LLHC, LRHC}; - this.leftPlanePoints = new GeoPoint[]{planetModel.NORTH_POLE, LLHC}; - this.rightPlanePoints = new GeoPoint[]{planetModel.NORTH_POLE, LRHC}; - this.edgePoints = new GeoPoint[]{planetModel.NORTH_POLE}; - - //System.out.println("LLHC = "+LLHC+" LRHC = "+LRHC); + this.bottomPlanePoints = new GeoPoint[] {LLHC, LRHC}; + this.leftPlanePoints = new GeoPoint[] {planetModel.NORTH_POLE, LLHC}; + this.rightPlanePoints = new GeoPoint[] {planetModel.NORTH_POLE, LRHC}; + + this.edgePoints = new GeoPoint[] {planetModel.NORTH_POLE}; + + // System.out.println("LLHC = " + LLHC + " LRHC = " + LRHC); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoNorthRectangle(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoNorthRectangle(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -155,31 +175,33 @@ class GeoNorthRectangle extends GeoBaseBBox { final double newBottomLat = bottomLat - angle; // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return - backingPlane.isWithin(x, y, z) && - bottomPlane.isWithin(x, y, z) && - leftPlane.isWithin(x, y, z) && - rightPlane.isWithin(x, y, z); + return backingPlane.isWithin(x, y, z) + && bottomPlane.isWithin(x, y, z) + && leftPlane.isWithin(x, y, z) + && rightPlane.isWithin(x, y, z); } @Override public double getRadius() { - // Here we compute the distance from the middle point to one of the corners. However, we need to be careful - // to use the longest of three distances: the distance to a corner on the top; the distnace to a corner on the bottom, and - // the distance to the right or left edge from the center. + // Here we compute the distance from the middle point to one of the corners. However, we need + // to be careful to use the longest of three distances: the distance to a corner on the top; + // the distance to a corner on the bottom, and the distance to the right or left edge from the + // center. final double centerAngle = (rightLon - (rightLon + leftLon) * 0.5) * cosMiddleLat; final double bottomAngle = centerPoint.arcDistance(LLHC); return Math.max(centerAngle, bottomAngle); @@ -201,53 +223,71 @@ class GeoNorthRectangle extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return - p.intersects(planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, leftPlane, rightPlane) || - p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, bottomPlane) || - p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, bottomPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects( + planetModel, + bottomPlane, + notablePoints, + bottomPlanePoints, + bounds, + leftPlane, + rightPlane) + || p.intersects( + planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, bottomPlane) + || p.intersects( + planetModel, + rightPlane, + notablePoints, + rightPlanePoints, + bounds, + leftPlane, + bottomPlane); } @Override public boolean intersects(final GeoShape geoShape) { - return - geoShape.intersects(bottomPlane, bottomPlanePoints, leftPlane, rightPlane) || - geoShape.intersects(leftPlane, leftPlanePoints, rightPlane, bottomPlane) || - geoShape.intersects(rightPlane, rightPlanePoints, leftPlane, bottomPlane); + return geoShape.intersects(bottomPlane, bottomPlanePoints, leftPlane, rightPlane) + || geoShape.intersects(leftPlane, leftPlanePoints, rightPlane, bottomPlane) + || geoShape.intersects(rightPlane, rightPlanePoints, leftPlane, bottomPlane); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); bounds - .addHorizontalPlane(planetModel, bottomLat, bottomPlane, leftPlane, rightPlane) - .addVerticalPlane(planetModel, leftLon, leftPlane, bottomPlane, rightPlane) - .addVerticalPlane(planetModel, rightLon, rightPlane, bottomPlane, leftPlane) - //.addIntersection(planetModel, rightPlane, leftPlane, bottomPlane) - .addPoint(LLHC).addPoint(LRHC).addPoint(planetModel.NORTH_POLE); + .addHorizontalPlane(planetModel, bottomLat, bottomPlane, leftPlane, rightPlane) + .addVerticalPlane(planetModel, leftLon, leftPlane, bottomPlane, rightPlane) + .addVerticalPlane(planetModel, rightLon, rightPlane, bottomPlane, leftPlane) + // .addIntersection(planetModel, rightPlane, leftPlane, bottomPlane) + .addPoint(LLHC) + .addPoint(LRHC) + .addPoint(planetModel.NORTH_POLE); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double bottomDistance = distanceStyle.computeDistance(planetModel, bottomPlane, x,y,z, leftPlane, rightPlane); - final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x,y,z, rightPlane, bottomPlane); - final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x,y,z, leftPlane, bottomPlane); - - final double LRHCDistance = distanceStyle.computeDistance(LRHC, x,y,z); - final double LLHCDistance = distanceStyle.computeDistance(LLHC, x,y,z); - - return - Math.min( + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double bottomDistance = + distanceStyle.computeDistance(planetModel, bottomPlane, x, y, z, leftPlane, rightPlane); + final double leftDistance = + distanceStyle.computeDistance(planetModel, leftPlane, x, y, z, rightPlane, bottomPlane); + final double rightDistance = + distanceStyle.computeDistance(planetModel, rightPlane, x, y, z, leftPlane, bottomPlane); + + final double LRHCDistance = distanceStyle.computeDistance(LRHC, x, y, z); + final double LLHCDistance = distanceStyle.computeDistance(LLHC, x, y, z); + + return Math.min( bottomDistance, - Math.min( - Math.min(leftDistance, rightDistance), - Math.min(LRHCDistance, LLHCDistance))); + Math.min(Math.min(leftDistance, rightDistance), Math.min(LRHCDistance, LLHCDistance))); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoNorthRectangle)) + if (!(o instanceof GeoNorthRectangle)) { return false; + } GeoNorthRectangle other = (GeoNorthRectangle) o; return super.equals(other) && other.LLHC.equals(LLHC) && other.LRHC.equals(LRHC); } @@ -262,8 +302,20 @@ class GeoNorthRectangle extends GeoBaseBBox { @Override public String toString() { - return "GeoNorthRectangle: {planetmodel="+planetModel+", bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoNorthRectangle: {planetmodel=" + + planetModel + + ", bottomlat=" + + bottomLat + + "(" + + bottomLat * 180.0 / Math.PI + + "), leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightlon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoOutsideDistance.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoOutsideDistance.java index 717854c542f..bee0dd49386 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoOutsideDistance.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoOutsideDistance.java @@ -17,39 +17,40 @@ package org.apache.lucene.spatial3d.geom; /** - * Implemented by Geo3D shapes that can compute the distance from a point to the closest outside edge. + * Implemented by Geo3D shapes that can compute the distance from a point to the closest outside + * edge. * * @lucene.experimental */ public interface GeoOutsideDistance extends Membership { - + // The following methods compute distances from the shape to a point // expected to be OUTSIDE the shape. Typically a value of 0.0 // is returned for points that happen to be within the shape. - + /** - * Compute this shape's distance to the GeoPoint. - * A return value of 0.0 should be returned for + * Compute this shape's distance to the GeoPoint. A return value of 0.0 should be returned for * points inside of the shape. + * * @param distanceStyle is the distance style. * @param point is the point to compute the distance to. * @return the distance. */ - public default double computeOutsideDistance(final DistanceStyle distanceStyle, final GeoPoint point) { + public default double computeOutsideDistance( + final DistanceStyle distanceStyle, final GeoPoint point) { return computeOutsideDistance(distanceStyle, point.x, point.y, point.z); } /** - * Compute this shape's distance to the GeoPoint. - * A return value of 0.0 should be returned for + * Compute this shape's distance to the GeoPoint. A return value of 0.0 should be returned for * points inside of the shape. + * * @param distanceStyle is the distance style. * @param x is the point's unit x coordinate (using U.S. convention). * @param y is the point's unit y coordinate (using U.S. convention). * @param z is the point's unit z coordinate (using U.S. convention). * @return the distance. */ - public double computeOutsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); - + public double computeOutsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPath.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPath.java index 8fe56326e89..09d92fd8ad0 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPath.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPath.java @@ -28,49 +28,50 @@ public interface GeoPath extends GeoDistanceShape { // itself to the point is not included in the calculation. /** - * Compute the nearest path distance to the GeoPoint. - * The path distance will not include the distance from the path itself to the - * point, but just the distance along the path to the nearest point on the path. + * Compute the nearest path distance to the GeoPoint. The path distance will not include the + * distance from the path itself to the point, but just the distance along the path to the nearest + * point on the path. * * @param distanceStyle is the distance style. * @param point is the point to compute the distance to. * @return the distance to the nearest path point. */ - public default double computeNearestDistance(final DistanceStyle distanceStyle, final GeoPoint point) { + public default double computeNearestDistance( + final DistanceStyle distanceStyle, final GeoPoint point) { return computeNearestDistance(distanceStyle, point.x, point.y, point.z); } /** - * Compute the nearest path distance to the GeoPoint. - * The path distance will not include the distance from the path itself to the - * point, but just the distance along the path to the nearest point on the path. + * Compute the nearest path distance to the GeoPoint. The path distance will not include the + * distance from the path itself to the point, but just the distance along the path to the nearest + * point on the path. * * @param x is the point's unit x coordinate (using U.S. convention). * @param y is the point's unit y coordinate (using U.S. convention). * @param z is the point's unit z coordinate (using U.S. convention). * @return the distance to the nearest path point. */ - public double computeNearestDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); + public double computeNearestDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); // The following methods compute the best distance from the path center to the point. - + /** - * Compute the shortest distance from the path center to the GeoPoint. - * The distance is meant to allow comparisons between different - * paths to find the one that goes closest to a point. + * Compute the shortest distance from the path center to the GeoPoint. The distance is meant to + * allow comparisons between different paths to find the one that goes closest to a point. * * @param distanceStyle is the distance style. * @param point is the point to compute the distance to. * @return the shortest distance from the path center to the point. */ - public default double computePathCenterDistance(final DistanceStyle distanceStyle, final GeoPoint point) { + public default double computePathCenterDistance( + final DistanceStyle distanceStyle, final GeoPoint point) { return computePathCenterDistance(distanceStyle, point.x, point.y, point.z); } /** - * Compute the shortest distance from the path center to the GeoPoint. - * The distance is meant to allow comparisons between different - * paths to find the one that goes closest to a point. + * Compute the shortest distance from the path center to the GeoPoint. The distance is meant to + * allow comparisons between different paths to find the one that goes closest to a point. * * @param distanceStyle is the distance style. * @param x is the point's unit x coordinate (using U.S. convention). @@ -78,6 +79,6 @@ public interface GeoPath extends GeoDistanceShape { * @param z is the point's unit z coordinate (using U.S. convention). * @return the shortest distance from the path center to the point. */ - public double computePathCenterDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); - + public double computePathCenterDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java index 6389f57b70a..444d8422418 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java @@ -25,24 +25,27 @@ import java.util.List; * @lucene.experimental */ public class GeoPathFactory { - private GeoPathFactory() { - } + private GeoPathFactory() {} /** * Create a GeoPath of the right kind given the specified information. + * * @param planetModel is the planet model. * @param maxCutoffAngle is the width of the path, measured as an angle. * @param pathPoints are the points in the path. * @return a GeoPath corresponding to what was specified. */ - public static GeoPath makeGeoPath(final PlanetModel planetModel, final double maxCutoffAngle, final GeoPoint[] pathPoints) { + public static GeoPath makeGeoPath( + final PlanetModel planetModel, final double maxCutoffAngle, final GeoPoint[] pathPoints) { if (maxCutoffAngle < Vector.MINIMUM_ANGULAR_RESOLUTION) { return new GeoDegeneratePath(planetModel, filterPoints(pathPoints)); } return new GeoStandardPath(planetModel, maxCutoffAngle, filterPoints(pathPoints)); } - /** Filter duplicate points. + /** + * Filter duplicate points. + * * @param pathPoints with the arras of points. * @return the filtered array. */ @@ -56,5 +59,4 @@ public class GeoPathFactory { noIdenticalPoints.add(pathPoints[pathPoints.length - 1]); return noIdenticalPoints.toArray(new GeoPoint[noIdenticalPoints.size()]); } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPoint.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPoint.java index e058fe7bb64..72f7f0048a6 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPoint.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPoint.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * This class represents a point on the surface of a sphere or ellipsoid. @@ -26,23 +26,31 @@ import java.io.IOException; * @lucene.experimental */ public class GeoPoint extends Vector implements SerializableObject { - + // By making lazily-evaluated variables be "volatile", we guarantee atomicity when they // are updated. This is necessary if we are using these classes in a multi-thread fashion, // because we don't try to synchronize for the lazy computation. - - /** This is the lazily-evaluated magnitude. Some constructors include it, but others don't, and - * we try not to create extra computation by always computing it. Does not need to be - * synchronized for thread safety, because depends wholly on immutable variables of this class. */ + + /** + * This is the lazily-evaluated magnitude. Some constructors include it, but others don't, and we + * try not to create extra computation by always computing it. Does not need to be synchronized + * for thread safety, because depends wholly on immutable variables of this class. + */ protected volatile double magnitude = Double.NEGATIVE_INFINITY; - /** Lazily-evaluated latitude. Does not need to be - * synchronized for thread safety, because depends wholly on immutable variables of this class. */ + /** + * Lazily-evaluated latitude. Does not need to be synchronized for thread safety, because depends + * wholly on immutable variables of this class. + */ protected volatile double latitude = Double.NEGATIVE_INFINITY; - /** Lazily-evaluated longitude. Does not need to be - * synchronized for thread safety, because depends wholly on immutable variables of this class. */ + /** + * Lazily-evaluated longitude. Does not need to be synchronized for thread safety, because depends + * wholly on immutable variables of this class. + */ protected volatile double longitude = Double.NEGATIVE_INFINITY; - /** Construct a GeoPoint from the trig functions of a lat and lon pair. + /** + * Construct a GeoPoint from the trig functions of a lat and lon pair. + * * @param planetModel is the planetModel to put the point on. * @param sinLat is the sin of the latitude. * @param sinLon is the sin of the longitude. @@ -51,24 +59,48 @@ public class GeoPoint extends Vector implements SerializableObject { * @param lat is the latitude. * @param lon is the longitude. */ - public GeoPoint(final PlanetModel planetModel, final double sinLat, final double sinLon, final double cosLat, final double cosLon, final double lat, final double lon) { - this(computeDesiredEllipsoidMagnitude(planetModel, cosLat * cosLon, cosLat * sinLon, sinLat), - cosLat * cosLon, cosLat * sinLon, sinLat, lat, lon); + public GeoPoint( + final PlanetModel planetModel, + final double sinLat, + final double sinLon, + final double cosLat, + final double cosLon, + final double lat, + final double lon) { + this( + computeDesiredEllipsoidMagnitude(planetModel, cosLat * cosLon, cosLat * sinLon, sinLat), + cosLat * cosLon, + cosLat * sinLon, + sinLat, + lat, + lon); } - - /** Construct a GeoPoint from the trig functions of a lat and lon pair. + + /** + * Construct a GeoPoint from the trig functions of a lat and lon pair. + * * @param planetModel is the planetModel to put the point on. * @param sinLat is the sin of the latitude. * @param sinLon is the sin of the longitude. * @param cosLat is the cos of the latitude. * @param cosLon is the cos of the longitude. */ - public GeoPoint(final PlanetModel planetModel, final double sinLat, final double sinLon, final double cosLat, final double cosLon) { - this(computeDesiredEllipsoidMagnitude(planetModel, cosLat * cosLon, cosLat * sinLon, sinLat), - cosLat * cosLon, cosLat * sinLon, sinLat); + public GeoPoint( + final PlanetModel planetModel, + final double sinLat, + final double sinLon, + final double cosLat, + final double cosLon) { + this( + computeDesiredEllipsoidMagnitude(planetModel, cosLat * cosLon, cosLat * sinLon, sinLat), + cosLat * cosLon, + cosLat * sinLon, + sinLat); } - /** Construct a GeoPoint from a latitude/longitude pair. + /** + * Construct a GeoPoint from a latitude/longitude pair. + * * @param planetModel is the planetModel to put the point on. * @param lat is the latitude. * @param lon is the longitude. @@ -76,43 +108,54 @@ public class GeoPoint extends Vector implements SerializableObject { public GeoPoint(final PlanetModel planetModel, final double lat, final double lon) { this(planetModel, Math.sin(lat), Math.sin(lon), Math.cos(lat), Math.cos(lon), lat, lon); } - - /** Construct a GeoPoint from an input stream. + + /** + * Construct a GeoPoint from an input stream. + * * @param planetModel is the planet model * @param inputStream is the input stream */ public GeoPoint(final PlanetModel planetModel, final InputStream inputStream) throws IOException { this(inputStream); } - - /** Construct a GeoPoint from an input stream with no planet model. + + /** + * Construct a GeoPoint from an input stream with no planet model. + * * @param inputStream is the input stream */ public GeoPoint(final InputStream inputStream) throws IOException { - // Note: this relies on left-right parameter execution order!! Much code depends on that though and - // it is apparently in a java spec: https://stackoverflow.com/questions/2201688/order-of-execution-of-parameters-guarantees-in-java - this(SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + // Note: this relies on left-right parameter execution order!! Much code depends on that though + // and it is apparently in a java spec: + // https://stackoverflow.com/questions/2201688/order-of-execution-of-parameters-guarantees-in-java + this( + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } - - /** Construct a GeoPoint from five unchecked parameters: lat, lon, x, y, z. This is primarily used for deserialization, - * but can also be used to fully initialize a point externally. + + /** + * Construct a GeoPoint from five unchecked parameters: lat, lon, x, y, z. This is primarily used + * for deserialization, but can also be used to fully initialize a point externally. + * * @param lat is the latitude in radians * @param lon is the longitude in radians * @param x is the unit x value * @param y is the unit y value * @param z is the unit z value */ - public GeoPoint(final double lat, final double lon, final double x, final double y, final double z) { + public GeoPoint( + final double lat, final double lon, final double x, final double y, final double z) { super(x, y, z); this.latitude = lat; this.longitude = lon; } - - /** Construct a GeoPoint from a unit (x,y,z) vector and a magnitude. + + /** + * Construct a GeoPoint from a unit (x,y,z) vector and a magnitude. + * * @param magnitude is the desired magnitude, provided to put the point on the ellipsoid. * @param x is the unit x value. * @param y is the unit y value. @@ -120,20 +163,30 @@ public class GeoPoint extends Vector implements SerializableObject { * @param lat is the latitude. * @param lon is the longitude. */ - public GeoPoint(final double magnitude, final double x, final double y, final double z, double lat, double lon) { + public GeoPoint( + final double magnitude, + final double x, + final double y, + final double z, + double lat, + double lon) { super(x * magnitude, y * magnitude, z * magnitude); this.magnitude = magnitude; if (lat > Math.PI * 0.5 || lat < -Math.PI * 0.5) { - throw new IllegalArgumentException("Latitude " + lat + " is out of range: must range from -Math.PI/2 to Math.PI/2"); + throw new IllegalArgumentException( + "Latitude " + lat + " is out of range: must range from -Math.PI/2 to Math.PI/2"); } if (lon < -Math.PI || lon > Math.PI) { - throw new IllegalArgumentException("Longitude " + lon + " is out of range: must range from -Math.PI to Math.PI"); + throw new IllegalArgumentException( + "Longitude " + lon + " is out of range: must range from -Math.PI to Math.PI"); } this.latitude = lat; this.longitude = lon; } - /** Construct a GeoPoint from a unit (x,y,z) vector and a magnitude. + /** + * Construct a GeoPoint from a unit (x,y,z) vector and a magnitude. + * * @param magnitude is the desired magnitude, provided to put the point on the ellipsoid. * @param x is the unit x value. * @param y is the unit y value. @@ -143,9 +196,10 @@ public class GeoPoint extends Vector implements SerializableObject { super(x * magnitude, y * magnitude, z * magnitude); this.magnitude = magnitude; } - - /** Construct a GeoPoint from an (x,y,z) value. - * The (x,y,z) tuple must be on the desired ellipsoid. + + /** + * Construct a GeoPoint from an (x,y,z) value. The (x,y,z) tuple must be on the desired ellipsoid. + * * @param x is the ellipsoid point x value. * @param y is the ellipsoid point y value. * @param z is the ellipsoid point z value. @@ -163,89 +217,103 @@ public class GeoPoint extends Vector implements SerializableObject { SerializableObject.writeDouble(outputStream, z); } - /** Compute an arc distance between two points. - * Note: this is an angular distance, and not a surface distance, and is therefore independent of planet model. - * For surface distance, see {@link PlanetModel#surfaceDistance(GeoPoint, GeoPoint)} + /** + * Compute an arc distance between two points. Note: this is an angular distance, and not a + * surface distance, and is therefore independent of planet model. For surface distance, see + * {@link PlanetModel#surfaceDistance(GeoPoint, GeoPoint)} + * * @param v is the second point. * @return the angle, in radians, between the two points. */ public double arcDistance(final Vector v) { - return Tools.safeAcos(dotProduct(v)/(magnitude() * v.magnitude())); + return Tools.safeAcos(dotProduct(v) / (magnitude() * v.magnitude())); } - /** Compute an arc distance between two points. + /** + * Compute an arc distance between two points. + * * @param x is the x part of the second point. * @param y is the y part of the second point. * @param z is the z part of the second point. * @return the angle, in radians, between the two points. */ public double arcDistance(final double x, final double y, final double z) { - return Tools.safeAcos(dotProduct(x,y,z)/(magnitude() * Vector.magnitude(x,y,z))); + return Tools.safeAcos(dotProduct(x, y, z) / (magnitude() * Vector.magnitude(x, y, z))); } - /** Compute the latitude for the point. + /** + * Compute the latitude for the point. + * * @return the latitude. */ public double getLatitude() { - double lat = this.latitude;//volatile-read once - if (lat == Double.NEGATIVE_INFINITY) + double lat = this.latitude; // volatile-read once + if (lat == Double.NEGATIVE_INFINITY) { this.latitude = lat = Math.asin(z / magnitude()); + } return lat; } - - /** Compute the longitude for the point. - * @return the longitude value. Uses 0.0 if there is no computable longitude. + + /** + * Compute the longitude for the point. + * + * @return the longitude value. Uses 0.0 if there is no computable longitude. */ public double getLongitude() { - double lon = this.longitude;//volatile-read once + double lon = this.longitude; // volatile-read once if (lon == Double.NEGATIVE_INFINITY) { - if (Math.abs(x) < MINIMUM_RESOLUTION && Math.abs(y) < MINIMUM_RESOLUTION) + if (Math.abs(x) < MINIMUM_RESOLUTION && Math.abs(y) < MINIMUM_RESOLUTION) { this.longitude = lon = 0.0; - else - this.longitude = lon = Math.atan2(y,x); + } else { + this.longitude = lon = Math.atan2(y, x); + } } return lon; } - - /** Compute the linear magnitude of the point. + + /** + * Compute the linear magnitude of the point. + * * @return the magnitude. */ @Override public double magnitude() { - double mag = this.magnitude;//volatile-read once + double mag = this.magnitude; // volatile-read once if (mag == Double.NEGATIVE_INFINITY) { this.magnitude = mag = super.magnitude(); } return mag; } - - /** Compute whether point matches another. - *@param p is the other point. - *@return true if the same. + + /** + * Compute whether point matches another. + * + * @param p is the other point. + * @return true if the same. */ public boolean isIdentical(final GeoPoint p) { return isIdentical(p.x, p.y, p.z); } - - /** Compute whether point matches another. - *@param x is the x value - *@param y is the y value - *@param z is the z value - *@return true if the same. + + /** + * Compute whether point matches another. + * + * @param x is the x value + * @param y is the y value + * @param z is the z value + * @return true if the same. */ public boolean isIdentical(final double x, final double y, final double z) { - return Math.abs(this.x - x) < MINIMUM_RESOLUTION && - Math.abs(this.y - y) < MINIMUM_RESOLUTION && - Math.abs(this.z - z) < MINIMUM_RESOLUTION; + return Math.abs(this.x - x) < MINIMUM_RESOLUTION + && Math.abs(this.y - y) < MINIMUM_RESOLUTION + && Math.abs(this.z - z) < MINIMUM_RESOLUTION; } - + @Override public String toString() { if (this.longitude == Double.NEGATIVE_INFINITY) { return super.toString(); } - return "[lat="+getLatitude()+", lon="+getLongitude()+"("+super.toString()+")]"; + return "[lat=" + getLatitude() + ", lon=" + getLongitude() + "(" + super.toString() + ")]"; } - - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPointShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPointShape.java index f0bb7214fdd..cb3a41749fe 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPointShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPointShape.java @@ -18,8 +18,7 @@ package org.apache.lucene.spatial3d.geom; /** - * Interface describing a GeoPointShape shape.It may represents a degenerated - * bounding box or a degenerated circle, hence it extends such interfaces. + * Interface describing a GeoPointShape shape.It may represents a degenerated bounding box or a + * degenerated circle, hence it extends such interfaces. */ -public interface GeoPointShape extends GeoCircle, GeoBBox { -} +public interface GeoPointShape extends GeoCircle, GeoBBox {} diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPointShapeFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPointShapeFactory.java index 4de0d05c6f1..65357c460f7 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPointShapeFactory.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPointShapeFactory.java @@ -17,22 +17,21 @@ package org.apache.lucene.spatial3d.geom; -/** - * Class which constructs a GeoPointShape. - */ +/** Class which constructs a GeoPointShape. */ public class GeoPointShapeFactory { - private GeoPointShapeFactory() { - } + private GeoPointShapeFactory() {} /** * Create a GeoPointShape with the provided information. + * * @param planetModel the planet model * @param lat the latitude * @param lon the longitude * @return a GeoPointShape corresponding to what was specified. */ - public static GeoPointShape makeGeoPointShape(final PlanetModel planetModel, final double lat, final double lon) { + public static GeoPointShape makeGeoPointShape( + final PlanetModel planetModel, final double lat, final double lon) { return new GeoDegeneratePoint(planetModel, lat, lon); } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygon.java index 9125d14fc92..991c1e595d2 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygon.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygon.java @@ -21,6 +21,4 @@ package org.apache.lucene.spatial3d.geom; * * @lucene.experimental */ -public interface GeoPolygon extends GeoAreaShape { - -} +public interface GeoPolygon extends GeoAreaShape {} diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java index 1937b071d06..1b834a8dee5 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java @@ -19,13 +19,13 @@ package org.apache.lucene.spatial3d.geom; import java.util.ArrayList; import java.util.BitSet; import java.util.Collections; -import java.util.List; -import java.util.Random; -import java.util.Iterator; -import java.util.Set; -import java.util.HashSet; -import java.util.Map; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; /** * Class which constructs a GeoMembershipShape representing an arbitrary polygon. @@ -33,133 +33,145 @@ import java.util.HashMap; * @lucene.experimental */ public class GeoPolygonFactory { - private GeoPolygonFactory() { - } + private GeoPolygonFactory() {} private static final int SMALL_POLYGON_CUTOFF_EDGES = 100; - - /** Create a GeoConcavePolygon using the specified points. The polygon must have - * a maximum extent larger than PI. The siding of the polygon is chosen so that any - * adjacent point to a segment provides an exterior measurement and therefore, - * the polygon is a truly concave polygon. Note that this method should only be used when there is certainty - * that we are dealing with a concave polygon, e.g. the polygon has been serialized. - * If there is not such certainty, please refer to @{@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List)}. + + /** + * Create a GeoConcavePolygon using the specified points. The polygon must have a maximum extent + * larger than PI. The siding of the polygon is chosen so that any adjacent point to a segment + * provides an exterior measurement and therefore, the polygon is a truly concave polygon. Note + * that this method should only be used when there is certainty that we are dealing with a concave + * polygon, e.g. the polygon has been serialized. If there is not such certainty, please refer + * to @{@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List)}. + * * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. * @return a GeoPolygon corresponding to what was specified. */ - public static GeoPolygon makeGeoConcavePolygon(final PlanetModel planetModel, - final List pointList) { + public static GeoPolygon makeGeoConcavePolygon( + final PlanetModel planetModel, final List pointList) { return new GeoConcavePolygon(planetModel, pointList); } - /** Create a GeoConvexPolygon using the specified points. The polygon must have - * a maximum extent no larger than PI. The siding of the polygon is chosen so that any adjacent - * point to a segment provides an interior measurement and therefore - * the polygon is a truly convex polygon. Note that this method should only be used when - * there is certainty that we are dealing with a convex polygon, e.g. the polygon has been serialized. - * If there is not such certainty, please refer to @{@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List)}. + /** + * Create a GeoConvexPolygon using the specified points. The polygon must have a maximum extent no + * larger than PI. The siding of the polygon is chosen so that any adjacent point to a segment + * provides an interior measurement and therefore the polygon is a truly convex polygon. Note that + * this method should only be used when there is certainty that we are dealing with a convex + * polygon, e.g. the polygon has been serialized. If there is not such certainty, please refer + * to @{@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List)}. + * * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. * @return a GeoPolygon corresponding to what was specified. */ - public static GeoPolygon makeGeoConvexPolygon(final PlanetModel planetModel, - final List pointList) { + public static GeoPolygon makeGeoConvexPolygon( + final PlanetModel planetModel, final List pointList) { return new GeoConvexPolygon(planetModel, pointList); } - - /** Create a GeoConcavePolygon using the specified points and holes. The polygon must have - * a maximum extent larger than PI. The siding of the polygon is chosen so that any adjacent - * point to a segment provides an exterior measurement and therefore - * the polygon is a truly concave polygon. Note that this method should only be used when - * there is certainty that we are dealing with a concave polygon, e.g. the polygon has been serialized. - * If there is not such certainty, please refer to {@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List, List)}. + /** + * Create a GeoConcavePolygon using the specified points and holes. The polygon must have a + * maximum extent larger than PI. The siding of the polygon is chosen so that any adjacent point + * to a segment provides an exterior measurement and therefore the polygon is a truly concave + * polygon. Note that this method should only be used when there is certainty that we are dealing + * with a concave polygon, e.g. the polygon has been serialized. If there is not such certainty, + * please refer to {@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List, List)}. + * * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. - * @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe the area outside - * each hole as being "in set". Null == none. + * @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe + * the area outside each hole as being "in set". Null == none. * @return a GeoPolygon corresponding to what was specified. */ - public static GeoPolygon makeGeoConcavePolygon(final PlanetModel planetModel, - final List pointList, - final List holes) { - return new GeoConcavePolygon(planetModel,pointList, holes); + public static GeoPolygon makeGeoConcavePolygon( + final PlanetModel planetModel, final List pointList, final List holes) { + return new GeoConcavePolygon(planetModel, pointList, holes); } - /** Create a GeoConvexPolygon using the specified points and holes. The polygon must have - * a maximum extent no larger than PI. The siding of the polygon is chosen so that any adjacent - * point to a segment provides an interior measurement and therefore - * the polygon is a truly convex polygon. Note that this method should only be used when - * there is certainty that we are dealing with a convex polygon, e.g. the polygon has been serialized. - * If there is not such certainty, please refer to {@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List, List)}. + /** + * Create a GeoConvexPolygon using the specified points and holes. The polygon must have a maximum + * extent no larger than PI. The siding of the polygon is chosen so that any adjacent point to a + * segment provides an interior measurement and therefore the polygon is a truly convex polygon. + * Note that this method should only be used when there is certainty that we are dealing with a + * convex polygon, e.g. the polygon has been serialized. If there is not such certainty, please + * refer to {@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List, List)}. + * * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. - * @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe the area outside - * each hole as being "in set". Null == none. + * @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe + * the area outside each hole as being "in set". Null == none. * @return a GeoPolygon corresponding to what was specified. */ - public static GeoPolygon makeGeoConvexPolygon(final PlanetModel planetModel, - final List pointList, - final List holes) { - return new GeoConvexPolygon(planetModel,pointList, holes); + public static GeoPolygon makeGeoConvexPolygon( + final PlanetModel planetModel, final List pointList, final List holes) { + return new GeoConvexPolygon(planetModel, pointList, holes); } - /** Use this class to specify a polygon with associated holes. - */ + /** Use this class to specify a polygon with associated holes. */ public static class PolygonDescription { /** The list of points */ public final List points; /** The list of holes */ public final List holes; - - /** Instantiate the polygon description. + + /** + * Instantiate the polygon description. + * * @param points is the list of points. */ public PolygonDescription(final List points) { this(points, new ArrayList<>()); } - /** Instantiate the polygon description. + /** + * Instantiate the polygon description. + * * @param points is the list of points. * @param holes is the list of holes. */ - public PolygonDescription(final List points, final List holes) { + public PolygonDescription( + final List points, final List holes) { this.points = points; this.holes = holes; } - } - /** Create a GeoPolygon using the specified points and holes, using order to determine - * siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space - * on the same side of the shape as being inside, and counter-clockwise to indicate the - * space on the opposite side as being inside. - * @param description describes the polygon and its associated holes. If points go - * clockwise from a given pole, then that pole should be within the polygon. If points go - * counter-clockwise, then that pole should be outside the polygon. - * @return a GeoPolygon corresponding to what was specified, or null if a valid polygon cannot be generated - * from this input. + /** + * Create a GeoPolygon using the specified points and holes, using order to determine siding of + * the polygon. Much like ESRI, this method uses clockwise to indicate the space on the same side + * of the shape as being inside, and counter-clockwise to indicate the space on the opposite side + * as being inside. + * + * @param description describes the polygon and its associated holes. If points go clockwise from + * a given pole, then that pole should be within the polygon. If points go counter-clockwise, + * then that pole should be outside the polygon. + * @return a GeoPolygon corresponding to what was specified, or null if a valid polygon cannot be + * generated from this input. */ - public static GeoPolygon makeGeoPolygon(final PlanetModel planetModel, - final PolygonDescription description) { + public static GeoPolygon makeGeoPolygon( + final PlanetModel planetModel, final PolygonDescription description) { return makeGeoPolygon(planetModel, description, 0.0); } - /** Create a GeoPolygon using the specified points and holes, using order to determine - * siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space - * on the same side of the shape as being inside, and counter-clockwise to indicate the - * space on the opposite side as being inside. - * @param description describes the polygon and its associated holes. If points go - * clockwise from a given pole, then that pole should be within the polygon. If points go - * counter-clockwise, then that pole should be outside the polygon. - * @param leniencyValue is the maximum distance (in units) that a point can be from the plane and still be considered as - * belonging to the plane. Any value greater than zero may cause some of the provided points that are in fact outside - * the strict definition of co-planarity, but are within this distance, to be discarded for the purposes of creating a - * "safe" polygon. - * @return a GeoPolygon corresponding to what was specified, or null if a valid polygon cannot be generated - * from this input. + /** + * Create a GeoPolygon using the specified points and holes, using order to determine siding of + * the polygon. Much like ESRI, this method uses clockwise to indicate the space on the same side + * of the shape as being inside, and counter-clockwise to indicate the space on the opposite side + * as being inside. + * + * @param description describes the polygon and its associated holes. If points go clockwise from + * a given pole, then that pole should be within the polygon. If points go counter-clockwise, + * then that pole should be outside the polygon. + * @param leniencyValue is the maximum distance (in units) that a point can be from the plane and + * still be considered as belonging to the plane. Any value greater than zero may cause some + * of the provided points that are in fact outside the strict definition of co-planarity, but + * are within this distance, to be discarded for the purposes of creating a "safe" polygon. + * @return a GeoPolygon corresponding to what was specified, or null if a valid polygon cannot be + * generated from this input. */ - public static GeoPolygon makeGeoPolygon(final PlanetModel planetModel, - final PolygonDescription description, - final double leniencyValue) { - + public static GeoPolygon makeGeoPolygon( + final PlanetModel planetModel, + final PolygonDescription description, + final double leniencyValue) { + // First, convert the holes to polygons in their own right. final List holes; if (description.holes != null && description.holes.size() > 0) { @@ -176,46 +188,52 @@ public class GeoPolygonFactory { } if (description.points.size() <= SMALL_POLYGON_CUTOFF_EDGES) { - // First, exercise a sanity filter on the provided pointList, and remove identical points, linear points, and backtracks - //System.err.println(" filtering "+pointList.size()+" points..."); - //final long startTime = System.currentTimeMillis(); + // First, exercise a sanity filter on the provided pointList, and remove identical points, + // linear points, and backtracks + // System.err.println(" filtering "+pointList.size()+" points..."); + // final long startTime = System.currentTimeMillis(); final List firstFilteredPointList = filterPoints(description.points); if (firstFilteredPointList == null) { return null; } final List filteredPointList = filterEdges(firstFilteredPointList, leniencyValue); - //System.err.println(" ...done in "+(System.currentTimeMillis()-startTime)+"ms ("+((filteredPointList==null)?"degenerate":(filteredPointList.size()+" points"))+")"); + // System.err.println(" ...done in " + (System.currentTimeMillis() - startTime) + // + "ms (" + ((filteredPointList == null) ? "degenerate" : + // (filteredPointList.size() + " points")) + ")"); if (filteredPointList == null) { return null; } try { - //First approximation to find a point + // First approximation to find a point final GeoPoint centerOfMass = getCenterOfMass(planetModel, filteredPointList); final Boolean isCenterOfMassInside = isInsidePolygon(centerOfMass, filteredPointList); if (isCenterOfMassInside != null) { - return generateGeoPolygon(planetModel, filteredPointList, holes, centerOfMass, isCenterOfMassInside); + return generateGeoPolygon( + planetModel, filteredPointList, holes, centerOfMass, isCenterOfMassInside); } - - //System.err.println("points="+pointList); - // Create a random number generator. Effectively this furnishes us with a repeatable sequence - // of points to use for poles. + + // System.err.println("points=" + pointList); + // Create a random number generator. Effectively this furnishes us with a repeatable + // sequence of points to use for poles. final Random generator = new Random(1234); for (int counter = 0; counter < 1000000; counter++) { - //counter++; + // counter++; // Pick the next random pole final GeoPoint pole = pickPole(generator, planetModel, filteredPointList); // Is it inside or outside? final Boolean isPoleInside = isInsidePolygon(pole, filteredPointList); if (isPoleInside != null) { // Legal pole - //System.out.println("Took "+counter+" iterations to find pole"); - //System.out.println("Pole = "+pole+"; isInside="+isPoleInside+"; pointList = "+pointList); + // System.out.println("Took " + counter + " iterations to find pole"); + // System.out.println("Pole = " + pole + "; isInside=" + isPoleInside + // + "; pointList = " + pointList); return generateGeoPolygon(planetModel, filteredPointList, holes, pole, isPoleInside); } // If pole choice was illegal, try another one } - throw new IllegalArgumentException("cannot find a point that is inside the polygon "+filteredPointList); + throw new IllegalArgumentException( + "cannot find a point that is inside the polygon " + filteredPointList); } catch (TileException e) { // Couldn't tile the polygon; use GeoComplexPolygon instead, if we can. } @@ -226,103 +244,116 @@ public class GeoPolygonFactory { return makeLargeGeoPolygon(planetModel, pd); } - /** Create a GeoPolygon using the specified points and holes, using order to determine - * siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space - * on the same side of the shape as being inside, and counter-clockwise to indicate the - * space on the opposite side as being inside. - * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. If points go - * clockwise from a given pole, then that pole should be within the polygon. If points go - * counter-clockwise, then that pole should be outside the polygon. + /** + * Create a GeoPolygon using the specified points and holes, using order to determine siding of + * the polygon. Much like ESRI, this method uses clockwise to indicate the space on the same side + * of the shape as being inside, and counter-clockwise to indicate the space on the opposite side + * as being inside. + * + * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. If points go + * clockwise from a given pole, then that pole should be within the polygon. If points go + * counter-clockwise, then that pole should be outside the polygon. * @return a GeoPolygon corresponding to what was specified. */ - public static GeoPolygon makeGeoPolygon(final PlanetModel planetModel, - final List pointList) { + public static GeoPolygon makeGeoPolygon( + final PlanetModel planetModel, final List pointList) { return makeGeoPolygon(planetModel, pointList, null); } - /** Create a GeoPolygon using the specified points and holes, using order to determine - * siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space - * on the same side of the shape as being inside, and counter-clockwise to indicate the - * space on the opposite side as being inside. - * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. If points go - * clockwise from a given pole, then that pole should be within the polygon. If points go - * counter-clockwise, then that pole should be outside the polygon. - * @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe the area outside - * each hole as being "in set". Null == none. - * @return a GeoPolygon corresponding to what was specified, or null if a valid polygon cannot be generated - * from this input. + /** + * Create a GeoPolygon using the specified points and holes, using order to determine siding of + * the polygon. Much like ESRI, this method uses clockwise to indicate the space on the same side + * of the shape as being inside, and counter-clockwise to indicate the space on the opposite side + * as being inside. + * + * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. If points go + * clockwise from a given pole, then that pole should be within the polygon. If points go + * counter-clockwise, then that pole should be outside the polygon. + * @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe + * the area outside each hole as being "in set". Null == none. + * @return a GeoPolygon corresponding to what was specified, or null if a valid polygon cannot be + * generated from this input. */ - public static GeoPolygon makeGeoPolygon(final PlanetModel planetModel, - final List pointList, - final List holes) { + public static GeoPolygon makeGeoPolygon( + final PlanetModel planetModel, final List pointList, final List holes) { return makeGeoPolygon(planetModel, pointList, holes, 0.0); } - /** Create a GeoPolygon using the specified points and holes, using order to determine - * siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space - * on the same side of the shape as being inside, and counter-clockwise to indicate the - * space on the opposite side as being inside. - * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. If points go - * clockwise from a given pole, then that pole should be within the polygon. If points go - * counter-clockwise, then that pole should be outside the polygon. - * @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe the area outside - * each hole as being "in set". Null == none. - * @param leniencyValue is the maximum distance (in units) that a point can be from the plane and still be considered as - * belonging to the plane. Any value greater than zero may cause some of the provided points that are in fact outside - * the strict definition of co-planarity, but are within this distance, to be discarded for the purposes of creating a - * "safe" polygon. - * @return a GeoPolygon corresponding to what was specified, or null if a valid polygon cannot be generated - * from this input. + /** + * Create a GeoPolygon using the specified points and holes, using order to determine siding of + * the polygon. Much like ESRI, this method uses clockwise to indicate the space on the same side + * of the shape as being inside, and counter-clockwise to indicate the space on the opposite side + * as being inside. + * + * @param pointList is a list of the GeoPoints to build an arbitrary polygon out of. If points go + * clockwise from a given pole, then that pole should be within the polygon. If points go + * counter-clockwise, then that pole should be outside the polygon. + * @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe + * the area outside each hole as being "in set". Null == none. + * @param leniencyValue is the maximum distance (in units) that a point can be from the plane and + * still be considered as belonging to the plane. Any value greater than zero may cause some + * of the provided points that are in fact outside the strict definition of co-planarity, but + * are within this distance, to be discarded for the purposes of creating a "safe" polygon. + * @return a GeoPolygon corresponding to what was specified, or null if a valid polygon cannot be + * generated from this input. */ - public static GeoPolygon makeGeoPolygon(final PlanetModel planetModel, - final List pointList, - final List holes, - final double leniencyValue) { - // First, exercise a sanity filter on the provided pointList, and remove identical points, linear points, and backtracks - //System.err.println(" filtering "+pointList.size()+" points..."); - //final long startTime = System.currentTimeMillis(); + public static GeoPolygon makeGeoPolygon( + final PlanetModel planetModel, + final List pointList, + final List holes, + final double leniencyValue) { + // First, exercise a sanity filter on the provided pointList, and remove identical points, + // linear points, and backtracks + // System.err.println(" filtering " + pointList.size() + " points..."); + // final long startTime = System.currentTimeMillis(); final List firstFilteredPointList = filterPoints(pointList); if (firstFilteredPointList == null) { return null; } final List filteredPointList = filterEdges(firstFilteredPointList, leniencyValue); - //System.err.println(" ...done in "+(System.currentTimeMillis()-startTime)+"ms ("+((filteredPointList==null)?"degenerate":(filteredPointList.size()+" points"))+")"); + // System.err.println(" ...done in " + (System.currentTimeMillis() - startTime) + // + "ms (" + ((filteredPointList==null) ? "degenerate" : + // (filteredPointList.size() + " points")) + ")"); if (filteredPointList == null) { return null; } try { - //First approximation to find a point + // First approximation to find a point final GeoPoint centerOfMass = getCenterOfMass(planetModel, filteredPointList); final Boolean isCenterOfMassInside = isInsidePolygon(centerOfMass, filteredPointList); if (isCenterOfMassInside != null) { - return generateGeoPolygon(planetModel, filteredPointList, holes, centerOfMass, isCenterOfMassInside); + return generateGeoPolygon( + planetModel, filteredPointList, holes, centerOfMass, isCenterOfMassInside); } - - //System.err.println("points="+pointList); + + // System.err.println("points=" + pointList); // Create a random number generator. Effectively this furnishes us with a repeatable sequence // of points to use for poles. final Random generator = new Random(1234); for (int counter = 0; counter < 1000000; counter++) { - //counter++; + // counter++; // Pick the next random pole final GeoPoint pole = pickPole(generator, planetModel, filteredPointList); // Is it inside or outside? final Boolean isPoleInside = isInsidePolygon(pole, filteredPointList); if (isPoleInside != null) { // Legal pole - //System.out.println("Took "+counter+" iterations to find pole"); - //System.out.println("Pole = "+pole+"; isInside="+isPoleInside+"; pointList = "+pointList); + // System.out.println("Took " + counter + " iterations to find pole"); + // System.out.println("Pole = " + pole + "; isInside=" + isPoleInside + // + "; pointList = " + pointList); return generateGeoPolygon(planetModel, filteredPointList, holes, pole, isPoleInside); } // If pole choice was illegal, try another one } - throw new IllegalArgumentException("cannot find a point that is inside the polygon "+filteredPointList); + throw new IllegalArgumentException( + "cannot find a point that is inside the polygon " + filteredPointList); } catch (TileException e) { // Couldn't tile the polygon; use GeoComplexPolygon instead, if we can. if (holes != null && holes.size() > 0) { - // We currently cannot get the list of points that went into making a hole back out, so don't allow this case. - // In order to support it, we really need to change the API contract, which is a bigger deal. + // We currently cannot get the list of points that went into making a hole back out, so + // don't allow this case. In order to support it, we really need to change the API + // contract, which is a bigger deal. throw new IllegalArgumentException(e.getMessage()); } final List description = new ArrayList<>(1); @@ -331,13 +362,13 @@ public class GeoPolygonFactory { } } - /** Generate a point at the center of mass of a list of points. - */ - private static GeoPoint getCenterOfMass(final PlanetModel planetModel, final List points) { + /** Generate a point at the center of mass of a list of points. */ + private static GeoPoint getCenterOfMass( + final PlanetModel planetModel, final List points) { double x = 0; double y = 0; double z = 0; - //get center of mass + // get center of mass for (final GeoPoint point : points) { x += point.x; y += point.y; @@ -346,47 +377,52 @@ public class GeoPolygonFactory { // Normalization is not needed because createSurfacePoint does the scaling anyway. return planetModel.createSurfacePoint(x, y, z); } - - /** Create a large GeoPolygon. This is one which has more than 100 sides and/or may have resolution problems - * with very closely spaced points, which often occurs when the polygon was constructed to approximate curves. No tiling - * is done, and intersections and membership are optimized for having large numbers of sides. + + /** + * Create a large GeoPolygon. This is one which has more than 100 sides and/or may have resolution + * problems with very closely spaced points, which often occurs when the polygon was constructed + * to approximate curves. No tiling is done, and intersections and membership are optimized for + * having large numbers of sides. * - * This method does very little checking for legality. It expects the incoming shapes to not intersect - * each other. The shapes can be disjoint or nested. If the shapes listed are nested, then we are describing holes. - * There is no limit to the depth of holes. However, if a shape is nested within another it must be explicitly - * described as being a child of the other shape. + *

    This method does very little checking for legality. It expects the incoming shapes to not + * intersect each other. The shapes can be disjoint or nested. If the shapes listed are nested, + * then we are describing holes. There is no limit to the depth of holes. However, if a shape is + * nested within another it must be explicitly described as being a child of the other shape. + * + *

    Membership in any given shape is described by the clockwise/counterclockwise direction of + * the points. The clockwise direction indicates that a point inside is "in-set", while a + * counter-clockwise direction implies that a point inside is "out-of-set". * - * Membership in any given shape is described by the clockwise/counterclockwise direction of the points. The - * clockwise direction indicates that a point inside is "in-set", while a counter-clockwise direction implies that - * a point inside is "out-of-set". - * * @param planetModel is the planet model. * @param shapesList is the list of polygons we should be making. * @return the GeoPolygon, or null if it cannot be constructed. */ - public static GeoPolygon makeLargeGeoPolygon(final PlanetModel planetModel, - final List shapesList) { - - // We're going to be building a single-level list of shapes in the end, with a single point that we know to be inside/outside, which is - // not on an edge. - + public static GeoPolygon makeLargeGeoPolygon( + final PlanetModel planetModel, final List shapesList) { + + // We're going to be building a single-level list of shapes in the end, with a single point that + // we know to be inside/outside, which is not on an edge. + final List> pointsList = new ArrayList<>(); - + BestShape testPointShape = null; for (final PolygonDescription shape : shapesList) { - // Convert this shape and its holes to a general list of shapes. We also need to identify exactly one - // legal, non-degenerate shape with no children that we can use to find a test point. We also optimize - // to choose as small as possible a polygon for determining the in-set-ness of the test point. + // Convert this shape and its holes to a general list of shapes. We also need to identify + // exactly one legal, non-degenerate shape with no children that we can use to find a test + // point. We also optimize to choose as small as possible a polygon for determining the + // in-set-ness of the test point. testPointShape = convertPolygon(pointsList, shape, testPointShape, true); } - + // If there's no polygon we can use to determine a test point, we throw up. if (testPointShape == null) { - throw new IllegalArgumentException("couldn't find a non-degenerate polygon for in-set determination"); + throw new IllegalArgumentException( + "couldn't find a non-degenerate polygon for in-set determination"); } final GeoPoint centerOfMass = getCenterOfMass(planetModel, testPointShape.points); - final GeoComplexPolygon comRval = testPointShape.createGeoComplexPolygon(planetModel, pointsList, centerOfMass); + final GeoComplexPolygon comRval = + testPointShape.createGeoComplexPolygon(planetModel, pointsList, centerOfMass); if (comRval != null) { return comRval; } @@ -398,29 +434,37 @@ public class GeoPolygonFactory { for (int counter = 0; counter < 1000000; counter++) { // Pick the next random pole final GeoPoint pole = pickPole(generator, planetModel, testPointShape.points); - final GeoComplexPolygon rval = testPointShape.createGeoComplexPolygon(planetModel, pointsList, pole); + final GeoComplexPolygon rval = + testPointShape.createGeoComplexPolygon(planetModel, pointsList, pole); if (rval != null) { - return rval; + return rval; } // If pole choice was illegal, try another one } - throw new IllegalArgumentException("cannot find a point that is inside the polygon "+testPointShape); - + throw new IllegalArgumentException( + "cannot find a point that is inside the polygon " + testPointShape); } - /** Convert a polygon description to a list of shapes. Also locate an optimal shape for evaluating a test point. + /** + * Convert a polygon description to a list of shapes. Also locate an optimal shape for evaluating + * a test point. + * * @param pointsList is the structure to add new polygons to. * @param shape is the current polygon description. * @param testPointShape is the current best choice for a low-level polygon to evaluate. * @return an updated best-choice for a test point polygon, and update the points list. */ - private static BestShape convertPolygon(final List> pointsList, final PolygonDescription shape, BestShape testPointShape, final boolean mustBeInside) { + private static BestShape convertPolygon( + final List> pointsList, + final PolygonDescription shape, + BestShape testPointShape, + final boolean mustBeInside) { // First, remove duplicate points. If degenerate, just ignore the shape. final List filteredPoints = filterPoints(shape.points); if (filteredPoints == null) { return testPointShape; } - + // Non-degenerate. Check if this is a candidate for in-set determination. if (shape.holes.size() == 0) { // This shape is a candidate for a test point. @@ -428,33 +472,35 @@ public class GeoPolygonFactory { testPointShape = new BestShape(filteredPoints, mustBeInside); } } - + pointsList.add(filteredPoints); - + // Now, do all holes too for (final PolygonDescription hole : shape.holes) { testPointShape = convertPolygon(pointsList, hole, testPointShape, !mustBeInside); } - + // Done; return the updated test point shape. return testPointShape; } - + /** - * Class for tracking the best shape for finding a pole, and whether or not the pole - * must be inside or outside of the shape. + * Class for tracking the best shape for finding a pole, and whether or not the pole must be + * inside or outside of the shape. */ private static class BestShape { public final List points; public boolean poleMustBeInside; - + public BestShape(final List points, final boolean poleMustBeInside) { this.points = points; this.poleMustBeInside = poleMustBeInside; } - - public GeoComplexPolygon createGeoComplexPolygon(final PlanetModel planetModel, - final List> pointsList, final GeoPoint testPoint) { + + public GeoComplexPolygon createGeoComplexPolygon( + final PlanetModel planetModel, + final List> pointsList, + final GeoPoint testPoint) { // Is it inside or outside? final Boolean isTestPointInside = isInsidePolygon(testPoint, points); if (isTestPointInside != null) { @@ -463,7 +509,11 @@ public class GeoPolygonFactory { if (isTestPointInside == poleMustBeInside) { return new GeoComplexPolygon(planetModel, pointsList, testPoint, isTestPointInside); } else { - return new GeoComplexPolygon(planetModel, pointsList, new GeoPoint(-testPoint.x, -testPoint.y, -testPoint.z), !isTestPointInside); + return new GeoComplexPolygon( + planetModel, + pointsList, + new GeoPoint(-testPoint.x, -testPoint.y, -testPoint.z), + !isTestPointInside); } } catch (IllegalArgumentException e) { // Probably bad choice of test point. @@ -473,45 +523,79 @@ public class GeoPolygonFactory { // If pole choice was illegal, try another one return null; } - } - + /** * Create a GeoPolygon using the specified points and holes and a test point. * - * @param filteredPointList is a filtered list of the GeoPoints to build an arbitrary polygon out of. - * @param holes is a list of polygons representing "holes" in the outside polygon. Null == none. + * @param filteredPointList is a filtered list of the GeoPoints to build an arbitrary polygon out + * of. + * @param holes is a list of polygons representing "holes" in the outside polygon. Null == none. * @param testPoint is a test point that is either known to be within the polygon area, or not. * @param testPointInside is true if the test point is within the area, false otherwise. - * @return a GeoPolygon corresponding to what was specified, or null if what was specified - * cannot be turned into a valid non-degenerate polygon. + * @return a GeoPolygon corresponding to what was specified, or null if what was specified cannot + * be turned into a valid non-degenerate polygon. */ - static GeoPolygon generateGeoPolygon(final PlanetModel planetModel, - final List filteredPointList, - final List holes, - final GeoPoint testPoint, - final boolean testPointInside) throws TileException { - // We will be trying twice to find the right GeoPolygon, using alternate siding choices for the first polygon - // side. While this looks like it might be 2x as expensive as it could be, there's really no other choice I can - // find. - final SidedPlane initialPlane = new SidedPlane(testPoint, filteredPointList.get(0), filteredPointList.get(1)); - // We don't know if this is the correct siding choice. We will only know as we build the complex polygon. - // So we need to be prepared to try both possibilities. + static GeoPolygon generateGeoPolygon( + final PlanetModel planetModel, + final List filteredPointList, + final List holes, + final GeoPoint testPoint, + final boolean testPointInside) + throws TileException { + // We will be trying twice to find the right GeoPolygon, using alternate siding choices for the + // first polygon side. While this looks like it might be 2x as expensive as it could be, + // there's really no other choice I can find. + final SidedPlane initialPlane = + new SidedPlane(testPoint, filteredPointList.get(0), filteredPointList.get(1)); + // We don't know if this is the correct siding choice. We will only know as we build the + // complex polygon. So we need to be prepared to try both possibilities. GeoCompositePolygon rval = new GeoCompositePolygon(planetModel); MutableBoolean seenConcave = new MutableBoolean(); - if (buildPolygonShape(rval, seenConcave, planetModel, filteredPointList, new BitSet(), 0, 1, initialPlane, holes, testPoint) == false) { + if (buildPolygonShape( + rval, + seenConcave, + planetModel, + filteredPointList, + new BitSet(), + 0, + 1, + initialPlane, + holes, + testPoint) + == false) { // The testPoint was within the shape. Was that intended? if (testPointInside) { // Yes: build it for real rval = new GeoCompositePolygon(planetModel); seenConcave = new MutableBoolean(); - buildPolygonShape(rval, seenConcave, planetModel, filteredPointList, new BitSet(), 0, 1, initialPlane, holes, null); + buildPolygonShape( + rval, + seenConcave, + planetModel, + filteredPointList, + new BitSet(), + 0, + 1, + initialPlane, + holes, + null); return rval; } // No: do the complement and return that. rval = new GeoCompositePolygon(planetModel); seenConcave = new MutableBoolean(); - buildPolygonShape(rval, seenConcave, planetModel, filteredPointList, new BitSet(), 0, 1, new SidedPlane(initialPlane), holes, null); + buildPolygonShape( + rval, + seenConcave, + planetModel, + filteredPointList, + new BitSet(), + 0, + 1, + new SidedPlane(initialPlane), + holes, + null); return rval; } else { // The testPoint was outside the shape. Was that intended? @@ -522,24 +606,36 @@ public class GeoPolygonFactory { // No: return the complement rval = new GeoCompositePolygon(planetModel); seenConcave = new MutableBoolean(); - buildPolygonShape(rval, seenConcave, planetModel, filteredPointList, new BitSet(), 0, 1, new SidedPlane(initialPlane), holes, null); + buildPolygonShape( + rval, + seenConcave, + planetModel, + filteredPointList, + new BitSet(), + 0, + 1, + new SidedPlane(initialPlane), + holes, + null); return rval; } } - /** Filter duplicate points. + /** + * Filter duplicate points. + * * @param input with input list of points * @return the filtered list, or null if we can't get a legit polygon from the input. */ static List filterPoints(final List input) { - + final List noIdenticalPoints = new ArrayList<>(input.size()); - + // Backtrack to find something different from the first point int startIndex = -1; final GeoPoint comparePoint = input.get(0); - for (int i = 0; i < input.size()-1; i++) { - final GeoPoint thePoint = input.get(getLegalIndex(- i - 1, input.size())); + for (int i = 0; i < input.size() - 1; i++) { + final GeoPoint thePoint = input.get(getLegalIndex(-i - 1, input.size())); if (!thePoint.isNumericallyIdentical(comparePoint)) { startIndex = getLegalIndex(-i, input.size()); break; @@ -548,7 +644,7 @@ public class GeoPolygonFactory { if (startIndex == -1) { return null; } - + // Now we can start the process of walking around, removing duplicate points. int currentIndex = startIndex; while (true) { @@ -568,27 +664,31 @@ public class GeoPolygonFactory { break; } } - + if (noIdenticalPoints.size() < 3) { return null; } - + return noIdenticalPoints; } - - /** Filter coplanar points. + + /** + * Filter coplanar points. + * * @param noIdenticalPoints with input list of points - * @param leniencyValue is the allowed distance of a point from the plane for cleanup of overly detailed polygons + * @param leniencyValue is the allowed distance of a point from the plane for cleanup of overly + * detailed polygons * @return the filtered list, or null if we can't get a legit polygon from the input. */ - static List filterEdges(final List noIdenticalPoints, final double leniencyValue) { - + static List filterEdges( + final List noIdenticalPoints, final double leniencyValue) { + // Now, do the search needed to find a path that has no coplanarities in it. // It is important to check coplanarities using the points that are further away so the // plane is more precise. - - for (int i = 0; i < noIdenticalPoints.size(); i++) { - //Search starting for current index. + + for (int i = 0; i < noIdenticalPoints.size(); i++) { + // Search starting for current index. final SafePath resultPath = findSafePath(noIdenticalPoints, i, leniencyValue); if (resultPath != null && resultPath.previous != null) { // Read out result, maintaining ordering @@ -601,71 +701,78 @@ public class GeoPolygonFactory { return null; } - /** Iterative path search through ordered list of points. The method merges together - * all consecutive coplanar points and builds the plane using the first and the last point. - * It does not converge if the starting point is coplanar with the last and next point of the path. + /** + * Iterative path search through ordered list of points. The method merges together all + * consecutive coplanar points and builds the plane using the first and the last point. It does + * not converge if the starting point is coplanar with the last and next point of the path. * * @param points is the ordered raw list of points under consideration. - * @param startIndex is index of the point that starts the current path, so that we can know when we are done. - * @param leniencyValue is the allowed distance of a point from the plane to be considered coplanar. + * @param startIndex is index of the point that starts the current path, so that we can know when + * we are done. + * @param leniencyValue is the allowed distance of a point from the plane to be considered + * coplanar. * @return null if the starting point is coplanar with the last and next point of the path. */ - private static SafePath findSafePath(final List points, final int startIndex, final double leniencyValue) { + private static SafePath findSafePath( + final List points, final int startIndex, final double leniencyValue) { SafePath safePath = null; for (int i = startIndex; i < startIndex + points.size(); i++) { - //get start point, always the same for an iteration - final int startPointIndex = getLegalIndex(i -1, points.size()); + // get start point, always the same for an iteration + final int startPointIndex = getLegalIndex(i - 1, points.size()); final GeoPoint startPoint = points.get(startPointIndex); - //get end point, can be coplanar and therefore change + // get end point, can be coplanar and therefore change int endPointIndex = getLegalIndex(i, points.size()); GeoPoint endPoint = points.get(endPointIndex); if (startPoint.isNumericallyIdentical(endPoint)) { - //go to next if identical + // go to next if identical continue; } - //Check if nextPoints are co-planar, if so advance to next point. - //if we go over the start index then we have no succeed. + // Check if nextPoints are co-planar, if so advance to next point. + // if we go over the start index then we have no succeed. while (true) { int nextPointIndex = getLegalIndex(endPointIndex + 1, points.size()); final GeoPoint nextPoint = points.get(nextPointIndex); if (startPoint.isNumericallyIdentical(nextPoint)) { - //all coplanar + // all coplanar return null; } if (!Plane.arePointsCoplanar(startPoint, endPoint, nextPoint)) { - //no coplanar. + // no coplanar. break; } if (endPointIndex == startIndex) { - //we are over the path, we fail. + // we are over the path, we fail. return null; } - //advance + // advance endPointIndex = nextPointIndex; endPoint = nextPoint; i++; } if (safePath != null && endPointIndex == startIndex) { - //We are already at the start, current point is coplanar with - //start point, no need to add this node. + // We are already at the start, current point is coplanar with + // start point, no need to add this node. break; } - //Create node and move to next one + // Create node and move to next one Plane currentPlane = new Plane(startPoint, endPoint); safePath = new SafePath(safePath, endPoint, endPointIndex, currentPlane); } return safePath; } - /** Pick a random pole that has a good chance of being inside the polygon described by the points. + /** + * Pick a random pole that has a good chance of being inside the polygon described by the points. + * * @param generator is the random number generator to use. * @param planetModel is the planet model to use. * @param points is the list of points available. * @return the randomly-determined pole selection. */ - private static GeoPoint pickPole(final Random generator, final PlanetModel planetModel, final List points) { + private static GeoPoint pickPole( + final Random generator, final PlanetModel planetModel, final List points) { final int pointIndex = generator.nextInt(points.size()); final GeoPoint closePoint = points.get(pointIndex); // We pick a random angle and random arc distance, then generate a point based on closePoint @@ -676,7 +783,8 @@ public class GeoPolygonFactory { maxArcDistance = trialArcDistance; } final double arcDistance = maxArcDistance - generator.nextDouble() * maxArcDistance; - // We come up with a unit circle (x,y,z) coordinate given the random angle and arc distance. The point is centered around the positive x axis. + // We come up with a unit circle (x,y,z) coordinate given the random angle and arc distance. + // The point is centered around the positive x axis. final double x = Math.cos(arcDistance); final double sinArcDistance = Math.sin(arcDistance); final double y = Math.cos(angle) * sinArcDistance; @@ -686,7 +794,8 @@ public class GeoPolygonFactory { final double cosLatitude = Math.cos(closePoint.getLatitude()); final double sinLongitude = Math.sin(closePoint.getLongitude()); final double cosLongitude = Math.cos(closePoint.getLongitude()); - // This transformation should take the point (1,0,0) and transform it to the closepoint's actual (x,y,z) coordinates. + // This transformation should take the point (1,0,0) and transform it to the closepoint's actual + // (x,y,z) coordinates. // Coordinate rotation formula: // x1 = x0 cos T - y0 sin T // y1 = x0 sin T + y0 cos T @@ -698,7 +807,8 @@ public class GeoPolygonFactory { // y2 = y1 // z2 = - x1 sin al + z1 cos al // So, we reverse the order of the transformations, AND we transform backwards. - // Transforming backwards means using these identities: sin(-angle) = -sin(angle), cos(-angle) = cos(angle) + // Transforming backwards means using these identities: sin(-angle) = -sin(angle), cos(-angle) = + // cos(angle) // So: // x1 = x0 cos al - z0 sin al // y1 = y0 @@ -715,13 +825,15 @@ public class GeoPolygonFactory { // Finally, scale to put the point on the surface return planetModel.createSurfacePoint(x2, y2, z2); } - - /** For a specified point and a list of poly points, determine based on point order whether the + + /** + * For a specified point and a list of poly points, determine based on point order whether the * point should be considered in or out of the polygon. + * * @param point is the point to check. * @param polyPoints is the list of points comprising the polygon. - * @return null if the point is illegal, otherwise false if the point is inside and true if the point is outside - * of the polygon. + * @return null if the point is illegal, otherwise false if the point is inside and true if the + * point is outside of the polygon. */ private static Boolean isInsidePolygon(final GeoPoint point, final List polyPoints) { // First, compute sine and cosine of pole point latitude and longitude @@ -731,17 +843,18 @@ public class GeoPolygonFactory { final double cosLatitude = Math.cos(latitude); final double sinLongitude = Math.sin(longitude); final double cosLongitude = Math.cos(longitude); - + // Now, compute the incremental arc distance around the points of the polygon double arcDistance = 0.0; Double prevAngle = null; - //System.out.println("Computing angles:"); + // System.out.println("Computing angles:"); for (final GeoPoint polyPoint : polyPoints) { - final Double angle = computeAngle(polyPoint, sinLatitude, cosLatitude, sinLongitude, cosLongitude); + final Double angle = + computeAngle(polyPoint, sinLatitude, cosLatitude, sinLongitude, cosLongitude); if (angle == null) { return null; } - //System.out.println("Computed angle: "+angle); + // System.out.println("Computed angle: " + angle); if (prevAngle != null) { // Figure out delta between prevAngle and current angle, and add it to arcDistance double angleDelta = angle - prevAngle; @@ -754,18 +867,20 @@ public class GeoPolygonFactory { if (Math.abs(angleDelta - Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return null; } - //System.out.println(" angle delta = "+angleDelta); + // System.out.println(" angle delta = " + angleDelta); arcDistance += angleDelta; - //System.out.println(" For point "+polyPoint+" angle is "+angle+"; delta is "+angleDelta+"; arcDistance is "+arcDistance); + // System.out.println(" For point " + polyPoint + " angle is " + angle + // + "; delta is " + angleDelta + "; arcDistance is " + arcDistance); } prevAngle = angle; } if (prevAngle != null) { - final Double lastAngle = computeAngle(polyPoints.get(0), sinLatitude, cosLatitude, sinLongitude, cosLongitude); + final Double lastAngle = + computeAngle(polyPoints.get(0), sinLatitude, cosLatitude, sinLongitude, cosLongitude); if (lastAngle == null) { return null; } - //System.out.println("Computed last angle: "+lastAngle); + // System.out.println("Computed last angle: " + lastAngle); // Figure out delta and add it double angleDelta = lastAngle - prevAngle; if (angleDelta < -Math.PI) { @@ -777,39 +892,44 @@ public class GeoPolygonFactory { if (Math.abs(angleDelta - Math.PI) < Vector.MINIMUM_ANGULAR_RESOLUTION) { return null; } - //System.out.println(" angle delta = "+angleDelta); + // System.out.println(" angle delta = " + angleDelta); arcDistance += angleDelta; - //System.out.println(" For point "+polyPoints.get(0)+" angle is "+lastAngle+"; delta is "+angleDelta+"; arcDistance is "+arcDistance); + // System.out.println(" For point " + polyPoints.get(0) + " angle is " + lastAngle + // + "; delta is " + angleDelta + "; arcDistance is " + arcDistance); } // Clockwise == inside == negative - //System.out.println("Arcdistance = "+arcDistance); + // System.out.println("Arcdistance = " + arcDistance); if (Math.abs(arcDistance) < Vector.MINIMUM_ANGULAR_RESOLUTION) { // No idea what direction, so try another pole. return null; } return arcDistance > 0.0; } - - /** Compute the angle for a point given rotation information. - * @param point is the point to assess - * @param sinLatitude the sine of the latitude - * @param cosLatitude the cosine of the latitude - * @param sinLongitude the sine of the longitude - * @param cosLongitude the cosine of the longitude - * @return the angle of rotation, or null if not computable - */ - private static Double computeAngle(final GeoPoint point, - final double sinLatitude, - final double cosLatitude, - final double sinLongitude, - final double cosLongitude) { + + /** + * Compute the angle for a point given rotation information. + * + * @param point is the point to assess + * @param sinLatitude the sine of the latitude + * @param cosLatitude the cosine of the latitude + * @param sinLongitude the sine of the longitude + * @param cosLongitude the cosine of the longitude + * @return the angle of rotation, or null if not computable + */ + private static Double computeAngle( + final GeoPoint point, + final double sinLatitude, + final double cosLatitude, + final double sinLongitude, + final double cosLongitude) { // Coordinate rotation formula: // x1 = x0 cos T - y0 sin T // y1 = x0 sin T + y0 cos T // We need to rotate the point in question into the coordinate frame specified by // the lat and lon trig functions. - // To do this we need to do two rotations on it. First rotation is in x/y. Second rotation is in x/z. + // To do this we need to do two rotations on it. First rotation is in x/y. Second rotation is + // in x/z. // And we rotate in the negative direction. // So: // x1 = x0 cos az + y0 sin az @@ -818,25 +938,29 @@ public class GeoPolygonFactory { // x2 = x1 cos al + z1 sin al // y2 = y1 // z2 = - x1 sin al + z1 cos al - + final double x1 = point.x * cosLongitude + point.y * sinLongitude; - final double y1 = - point.x * sinLongitude + point.y * cosLongitude; + final double y1 = -point.x * sinLongitude + point.y * cosLongitude; final double z1 = point.z; - + // final double x2 = x1 * cosLatitude + z1 * sinLatitude; final double y2 = y1; - final double z2 = - x1 * sinLatitude + z1 * cosLatitude; - - // Now we should be looking down the X axis; the original point has rotated coordinates (N, 0, 0). - // So we can just compute the angle using y2 and z2. (If Math.sqrt(y2*y2 + z2 * z2) is 0.0, then the point is on the pole and we need another one). - if (Math.sqrt(y2*y2 + z2*z2) < Vector.MINIMUM_RESOLUTION) { + final double z2 = -x1 * sinLatitude + z1 * cosLatitude; + + // Now we should be looking down the X axis; the original point has rotated coordinates (N, 0, + // 0). So we can just compute the angle using y2 and z2. (If Math.sqrt(y2*y2 + z2 * z2) + // is 0.0, then the point is on the pole and we need another one). + if (Math.sqrt(y2 * y2 + z2 * z2) < Vector.MINIMUM_RESOLUTION) { return null; } - + return Math.atan2(z2, y2); } - /** Build a GeoPolygon out of one concave part and multiple convex parts given points, starting edge, and whether starting edge is internal or not. + /** + * Build a GeoPolygon out of one concave part and multiple convex parts given points, starting + * edge, and whether starting edge is internal or not. + * * @param rval is the composite polygon to add to. * @param seenConcave is true if a concave polygon has been seen in this generation yet. * @param planetModel is the planet model. @@ -845,45 +969,49 @@ public class GeoPolygonFactory { * @param startPointIndex is the first of the points, constituting the starting edge. * @param startingEdge is the plane describing the starting edge. * @param holes is the list of holes in the polygon, or null if none. - * @param testPoint is an (optional) test point, which will be used to determine if we are generating - * a shape with the proper sidedness. It is passed in only when the test point is supposed to be outside - * of the generated polygon. In this case, if the generated polygon is found to contain the point, the - * method exits early with a null return value. - * This only makes sense in the context of evaluating both possible choices and using logic to determine - * which result to use. If the test point is supposed to be within the shape, then it must be outside of the - * complement shape. If the test point is supposed to be outside the shape, then it must be outside of the - * original shape. Either way, we can figure out the right thing to use. - * @return false if what was specified - * was inconsistent with what we generated. Specifically, if we specify an exterior point that is - * found in the interior of the shape we create here we return false, which is a signal that we chose - * our initial plane sidedness backwards. + * @param testPoint is an (optional) test point, which will be used to determine if we are + * generating a shape with the proper sidedness. It is passed in only when the test point is + * supposed to be outside of the generated polygon. In this case, if the generated polygon is + * found to contain the point, the method exits early with a null return value. This only + * makes sense in the context of evaluating both possible choices and using logic to determine + * which result to use. If the test point is supposed to be within the shape, then it must be + * outside of the complement shape. If the test point is supposed to be outside the shape, + * then it must be outside of the original shape. Either way, we can figure out the right + * thing to use. + * @return false if what was specified was inconsistent with what we generated. Specifically, if + * we specify an exterior point that is found in the interior of the shape we create here we + * return false, which is a signal that we chose our initial plane sidedness backwards. */ static boolean buildPolygonShape( - final GeoCompositePolygon rval, - final MutableBoolean seenConcave, - final PlanetModel planetModel, - final List pointsList, - final BitSet internalEdges, - final int startPointIndex, - final int endPointIndex, - final SidedPlane startingEdge, - final List holes, - final GeoPoint testPoint) throws TileException { + final GeoCompositePolygon rval, + final MutableBoolean seenConcave, + final PlanetModel planetModel, + final List pointsList, + final BitSet internalEdges, + final int startPointIndex, + final int endPointIndex, + final SidedPlane startingEdge, + final List holes, + final GeoPoint testPoint) + throws TileException { - // It could be the case that we need a concave polygon. So we need to try and look for that case - // as part of the general code for constructing complex polygons. + // It could be the case that we need a concave polygon. So we need to try and look for that + // case as part of the general code for constructing complex polygons. - // Note that there can be only one concave polygon. This code will enforce that condition and will return - // false if it is violated. - - // The code here must keep track of two lists of sided planes. The first list contains the planes consistent with - // a concave polygon. This list will grow and shrink. The second list is built starting at the current edge that - // was last consistent with the concave polygon, and contains all edges consistent with a convex polygon. - // When that sequence of edges is done, then an internal edge is created and the identified points are converted to a - // convex polygon. That internal edge is used to extend the list of edges in the concave polygon edge list. + // Note that there can be only one concave polygon. This code will enforce that condition and + // will return false if it is violated. + + // The code here must keep track of two lists of sided planes. The first list contains the + // planes consistent with a concave polygon. This list will grow and shrink. The second list + // is built starting at the current edge that was last consistent with the concave polygon, + // and contains all edges consistent with a convex polygon. + // When that sequence of edges is done, then an internal edge is created and the identified + // points are converted to a convex polygon. That internal edge is used to extend the list + // of edges in the concave polygon edge list. // The edge buffer. - final EdgeBuffer edgeBuffer = new EdgeBuffer(pointsList, internalEdges, startPointIndex, endPointIndex, startingEdge); + final EdgeBuffer edgeBuffer = + new EdgeBuffer(pointsList, internalEdges, startPointIndex, endPointIndex, startingEdge); /* // Verify that the polygon does not self-intersect @@ -946,13 +1074,13 @@ public class GeoPolygonFactory { } } */ - + // Starting state: // The stopping point Edge stoppingPoint = edgeBuffer.pickOne(); // The current edge Edge currentEdge = stoppingPoint; - + // Progressively look for convex sections. If we find one, we emit it and replace it. // Keep going until we have been around once and nothing needed to change, and then // do the concave polygon, if necessary. @@ -962,13 +1090,14 @@ public class GeoPolygonFactory { // We're done! break; } - + // Find convexity around the current edge, if any - final Boolean foundIt = findConvexPolygon(planetModel, currentEdge, rval, edgeBuffer, holes, testPoint); + final Boolean foundIt = + findConvexPolygon(planetModel, currentEdge, rval, edgeBuffer, holes, testPoint); if (foundIt == null) { return false; } - + if (foundIt) { // New start point stoppingPoint = edgeBuffer.pickOne(); @@ -976,27 +1105,28 @@ public class GeoPolygonFactory { // back around continue; } - + // Otherwise, go on to the next currentEdge = edgeBuffer.getNext(currentEdge); if (currentEdge == stoppingPoint) { break; } } - + // Look for any reason that the concave polygon cannot be created. // This test is really the converse of the one for a convex polygon. // Points on the edge of a convex polygon MUST be inside all the other // edges. For a concave polygon, this check is still the same, except we have // to look at the reverse sided planes, not the forward ones. - + // If we find a point that is outside of the complementary edges, it means that // the point is in fact able to form a convex polygon with the edge it is - // offending. - - // If what is left has any plane/point pair that is on the wrong side, we have to split using one of the plane endpoints and the - // point in question. This is best structured as a recursion, if detected. - + // offending. + + // If what is left has any plane/point pair that is on the wrong side, we have to split using + // one of the plane endpoints and the point in question. This is best structured as a + // recursion, if detected. + // Note: Any edge that fails means (I think!!) that there's another edge that will also fail. // This is because each point is included in two edges. // So, when we look for a non-conforming edge, and we can find one (but can't use it), we @@ -1015,11 +1145,16 @@ public class GeoPolygonFactory { if (confirmEdge == checkEdge) { continue; } - // Look for a point that is on the wrong side of the check edge. This means that we can't build the polygon. + // Look for a point that is on the wrong side of the check edge. This means that we can't + // build the polygon. final GeoPoint thePoint; - if (checkEdge.startPoint != confirmEdge.startPoint && checkEdge.endPoint != confirmEdge.startPoint && !flippedPlane.isWithin(confirmEdge.startPoint)) { + if (checkEdge.startPoint != confirmEdge.startPoint + && checkEdge.endPoint != confirmEdge.startPoint + && !flippedPlane.isWithin(confirmEdge.startPoint)) { thePoint = confirmEdge.startPoint; - } else if (checkEdge.startPoint != confirmEdge.endPoint && checkEdge.endPoint != confirmEdge.endPoint && !flippedPlane.isWithin(confirmEdge.endPoint)) { + } else if (checkEdge.startPoint != confirmEdge.endPoint + && checkEdge.endPoint != confirmEdge.endPoint + && !flippedPlane.isWithin(confirmEdge.endPoint)) { thePoint = confirmEdge.endPoint; } else { thePoint = null; @@ -1027,20 +1162,23 @@ public class GeoPolygonFactory { if (thePoint != null) { // Note that we found a problem. foundBadEdge = true; - // thePoint is on the wrong side of the complementary plane. That means we cannot build a concave polygon, because the complement would not - // be a legal convex polygon. - // But we can take advantage of the fact that the distance between the edge and thePoint is less than 180 degrees, and so we can split the - // would-be concave polygon into three segments. The first segment includes the edge and thePoint, and uses the sense of the edge to determine the sense - // of the polygon. - + // thePoint is on the wrong side of the complementary plane. That means we cannot build a + // concave polygon, because the complement would not be a legal convex polygon. + // But we can take advantage of the fact that the distance between the edge and thePoint + // is less than 180 degrees, and so we can split the would-be concave polygon into three + // segments. The first segment includes the edge and thePoint, and uses the sense of the + // edge to determine the sense of the polygon. + // This should be the only problematic part of the polygon. - // We know that thePoint is on the "wrong" side of the edge -- that is, it's on the side that the - // edge is pointing at. - - // The proposed tiling generates two new edges -- one from thePoint to the start point of the edge we found, and the other from thePoint - // to the end point of the edge. We generate that as a triangle convex polygon, and tile the two remaining pieces. + // We know that thePoint is on the "wrong" side of the edge -- that is, it's on the side + // that the edge is pointing at. + + // The proposed tiling generates two new edges -- one from thePoint to the start point of + // the edge we found, and the other from thePoint to the end point of the edge. We + // generate that as a triangle convex polygon, and tile the two remaining pieces. if (Plane.arePointsCoplanar(checkEdge.startPoint, checkEdge.endPoint, thePoint)) { - // Can't build this particular tile because of colinearity, so advance to another that maybe we can build. + // Can't build this particular tile because of colinearity, so advance to another that + // maybe we can build. continue; } final List thirdPartPoints = new ArrayList<>(3); @@ -1050,10 +1188,13 @@ public class GeoPolygonFactory { thirdPartPoints.add(checkEdge.endPoint); thirdPartInternal.set(1, true); thirdPartPoints.add(thePoint); - assert checkEdge.plane.isWithin(thePoint) : "Point was on wrong side of complementary plane, so must be on the right side of the non-complementary plane!"; - // Check for illegal argument using try/catch rather than pre-emptive check, since it cuts down on building objects for a rare case - final GeoPolygon convexPart = new GeoConvexPolygon(planetModel, thirdPartPoints, holes, thirdPartInternal, true); - //System.out.println("convex part = "+convexPart); + assert checkEdge.plane.isWithin(thePoint) + : "Point was on wrong side of complementary plane, so must be on the right side of the non-complementary plane!"; + // Check for illegal argument using try/catch rather than pre-emptive check, since it cuts + // down on building objects for a rare case + final GeoPolygon convexPart = + new GeoConvexPolygon(planetModel, thirdPartPoints, holes, thirdPartInternal, true); + // System.out.println("convex part = "+convexPart); rval.addShape(convexPart); // The part preceding the bad edge, back to thePoint, needs to be recursively @@ -1071,21 +1212,23 @@ public class GeoPolygonFactory { loopEdge = edgeBuffer.getPrevious(loopEdge); } firstPartInternal.set(i, true); - //System.out.println("Doing first part..."); - if (buildPolygonShape(rval, - seenConcave, - planetModel, - firstPartPoints, - firstPartInternal, - firstPartPoints.size()-1, - 0, - new SidedPlane(checkEdge.endPoint, false, checkEdge.startPoint, thePoint), - holes, - testPoint) == false) { + // System.out.println("Doing first part..."); + if (buildPolygonShape( + rval, + seenConcave, + planetModel, + firstPartPoints, + firstPartInternal, + firstPartPoints.size() - 1, + 0, + new SidedPlane(checkEdge.endPoint, false, checkEdge.startPoint, thePoint), + holes, + testPoint) + == false) { return false; } - //System.out.println("...done first part."); - + // System.out.println("...done first part."); + final List secondPartPoints = new ArrayList<>(); final BitSet secondPartInternal = new BitSet(); loopEdge = edgeBuffer.getNext(checkEdge); @@ -1099,21 +1242,23 @@ public class GeoPolygonFactory { loopEdge = edgeBuffer.getNext(loopEdge); } secondPartInternal.set(i, true); - //System.out.println("Doing second part..."); - if (buildPolygonShape(rval, - seenConcave, - planetModel, - secondPartPoints, - secondPartInternal, - secondPartPoints.size()-1, - 0, - new SidedPlane(checkEdge.startPoint, false, checkEdge.endPoint, thePoint), - holes, - testPoint) == false) { + // System.out.println("Doing second part..."); + if (buildPolygonShape( + rval, + seenConcave, + planetModel, + secondPartPoints, + secondPartInternal, + secondPartPoints.size() - 1, + 0, + new SidedPlane(checkEdge.startPoint, false, checkEdge.endPoint, thePoint), + holes, + testPoint) + == false) { return false; } - //System.out.println("... done second part"); - + // System.out.println("... done second part"); + return true; } } @@ -1121,21 +1266,24 @@ public class GeoPolygonFactory { if (foundBadEdge) { // Unaddressed bad edge - throw new TileException("Could not tile polygon; found a pathological coplanarity that couldn't be addressed"); + throw new TileException( + "Could not tile polygon; found a pathological coplanarity that couldn't be addressed"); } - + // No violations found: we know it's a legal concave polygon. - + // If there's anything left in the edge buffer, convert to concave polygon. - //System.out.println("adding concave part"); + // System.out.println("adding concave part"); if (makeConcavePolygon(planetModel, rval, seenConcave, edgeBuffer, holes, testPoint) == false) { return false; } return true; } - - /** Look for a concave polygon in the remainder of the edgebuffer. - * By this point, if there are any edges in the edgebuffer, they represent a concave polygon. + + /** + * Look for a concave polygon in the remainder of the edgebuffer. By this point, if there are any + * edges in the edgebuffer, they represent a concave polygon. + * * @param planetModel is the planet model. * @param rval is the composite polygon we're building. * @param seenConcave is true if we've already seen a concave polygon. @@ -1144,23 +1292,25 @@ public class GeoPolygonFactory { * @param testPoint is the optional test point. * @return true unless the testPoint caused failure. */ - private static boolean makeConcavePolygon(final PlanetModel planetModel, - final GeoCompositePolygon rval, - final MutableBoolean seenConcave, - final EdgeBuffer edgeBuffer, - final List holes, - final GeoPoint testPoint) throws TileException { - + private static boolean makeConcavePolygon( + final PlanetModel planetModel, + final GeoCompositePolygon rval, + final MutableBoolean seenConcave, + final EdgeBuffer edgeBuffer, + final List holes, + final GeoPoint testPoint) + throws TileException { + if (edgeBuffer.size() == 0) { return true; } - + if (seenConcave.value) { throw new IllegalArgumentException("Illegal polygon; polygon edges intersect each other"); } seenConcave.value = true; - + // If there are less than three edges, something got messed up somehow. Don't know how this // can happen but check. if (edgeBuffer.size() < 3) { @@ -1168,16 +1318,18 @@ public class GeoPolygonFactory { // Here we can emit GeoWorld, but probably this means we had a broken poly to start with. throw new IllegalArgumentException("Illegal polygon; polygon edges intersect each other"); } - + // Create the list of points final List points = new ArrayList(edgeBuffer.size()); - final BitSet internalEdges = new BitSet(edgeBuffer.size()-1); + final BitSet internalEdges = new BitSet(edgeBuffer.size() - 1); - //System.out.println("Concave polygon points:"); + // System.out.println("Concave polygon points:"); Edge edge = edgeBuffer.pickOne(); boolean isInternal = false; for (int i = 0; i < edgeBuffer.size(); i++) { - //System.out.println(" "+edge.plane+": "+edge.startPoint+"->"+edge.endPoint+"; previous? "+(edge.plane.isWithin(edgeBuffer.getPrevious(edge).startPoint)?"in":"out")+" next? "+(edge.plane.isWithin(edgeBuffer.getNext(edge).endPoint)?"in":"out")); + // System.out.println(" "+edge.plane+": "+edge.startPoint+"->"+edge.endPoint+"; previous? + // "+(edge.plane.isWithin(edgeBuffer.getPrevious(edge).startPoint)?"in":"out")+" next? + // "+(edge.plane.isWithin(edgeBuffer.getNext(edge).endPoint)?"in":"out")); points.add(edge.startPoint); if (i < edgeBuffer.size() - 1) { internalEdges.set(i, edge.isInternal); @@ -1186,48 +1338,57 @@ public class GeoPolygonFactory { } edge = edgeBuffer.getNext(edge); } - + try { if (testPoint != null && holes != null && holes.size() > 0) { // No holes, for test - final GeoPolygon testPolygon = new GeoConcavePolygon(planetModel, points, null, internalEdges, isInternal); + final GeoPolygon testPolygon = + new GeoConcavePolygon(planetModel, points, null, internalEdges, isInternal); if (testPolygon.isWithin(testPoint)) { return false; } } - - final GeoPolygon realPolygon = new GeoConcavePolygon(planetModel, points, holes, internalEdges, isInternal); + + final GeoPolygon realPolygon = + new GeoConcavePolygon(planetModel, points, holes, internalEdges, isInternal); if (testPoint != null && (holes == null || holes.size() == 0)) { if (realPolygon.isWithin(testPoint)) { return false; } } - + rval.addShape(realPolygon); return true; } catch (IllegalArgumentException e) { throw new TileException(e.getMessage()); } } - - /** Look for a convex polygon at the specified edge. If we find it, create one and adjust the edge buffer. + + /** + * Look for a convex polygon at the specified edge. If we find it, create one and adjust the edge + * buffer. + * * @param planetModel is the planet model. * @param currentEdge is the current edge to use starting the search. * @param rval is the composite polygon to build. * @param edgeBuffer is the edge buffer. * @param holes is the optional list of holes. * @param testPoint is the optional test point. - * @return null if the testPoint is within any polygon detected, otherwise true if a convex polygon was created. + * @return null if the testPoint is within any polygon detected, otherwise true if a convex + * polygon was created. */ - private static Boolean findConvexPolygon(final PlanetModel planetModel, - final Edge currentEdge, - final GeoCompositePolygon rval, - final EdgeBuffer edgeBuffer, - final List holes, - final GeoPoint testPoint) throws TileException { - - //System.out.println("Looking at edge "+currentEdge+" with startpoint "+currentEdge.startPoint+" endpoint "+currentEdge.endPoint); - + private static Boolean findConvexPolygon( + final PlanetModel planetModel, + final Edge currentEdge, + final GeoCompositePolygon rval, + final EdgeBuffer edgeBuffer, + final List holes, + final GeoPoint testPoint) + throws TileException { + + // System.out.println("Looking at edge "+currentEdge+" with startpoint + // "+currentEdge.startPoint+" endpoint "+currentEdge.endPoint); + // Initialize the structure. // We don't keep track of order here; we just care about membership. // The only exception is the head and tail pointers. @@ -1235,7 +1396,7 @@ public class GeoPolygonFactory { includedEdges.add(currentEdge); Edge firstEdge = currentEdge; Edge lastEdge = currentEdge; - + // First, walk towards the end until we need to stop while (true) { if (firstEdge.startPoint == lastEdge.endPoint) { @@ -1245,27 +1406,34 @@ public class GeoPolygonFactory { if (Plane.arePointsCoplanar(lastEdge.startPoint, lastEdge.endPoint, newLastEdge.endPoint)) { break; } - // Planes that are almost identical cannot be properly handled by the standard polygon logic. Detect this case and, if found, - // give up on the tiling -- we'll need to create a large poly instead. + // Planes that are almost identical cannot be properly handled by the standard polygon logic. + // Detect this case and, if found, give up on the tiling -- we'll need to create a large poly + // instead. if (lastEdge.plane.isFunctionallyIdentical(newLastEdge.plane)) { - throw new TileException("Two adjacent edge planes are effectively parallel despite filtering; give up on tiling"); + throw new TileException( + "Two adjacent edge planes are effectively parallel despite filtering; give up on tiling"); } if (isWithin(newLastEdge.endPoint, includedEdges)) { - //System.out.println(" maybe can extend to next edge"); - // Found a candidate for extension. But do some other checks first. Basically, we need to know if we construct a polygon - // here will overlap with other remaining points? + // System.out.println(" maybe can extend to next edge"); + // Found a candidate for extension. But do some other checks first. Basically, we need to + // know if we construct a polygon here will overlap with other remaining points? final SidedPlane returnBoundary; if (firstEdge.startPoint != newLastEdge.endPoint) { - if (Plane.arePointsCoplanar(firstEdge.endPoint, firstEdge.startPoint, newLastEdge.endPoint) || - Plane.arePointsCoplanar(firstEdge.startPoint, newLastEdge.endPoint, newLastEdge.startPoint)) { + if (Plane.arePointsCoplanar( + firstEdge.endPoint, firstEdge.startPoint, newLastEdge.endPoint) + || Plane.arePointsCoplanar( + firstEdge.startPoint, newLastEdge.endPoint, newLastEdge.startPoint)) { break; } - returnBoundary = new SidedPlane(firstEdge.endPoint, firstEdge.startPoint, newLastEdge.endPoint); + returnBoundary = + new SidedPlane(firstEdge.endPoint, firstEdge.startPoint, newLastEdge.endPoint); } else { returnBoundary = null; } - // The complete set of sided planes for the tentative new polygon include the ones in includedEdges, plus the one from newLastEdge, - // plus the new tentative return boundary. We have to make sure there are no points from elsewhere within the tentative convex polygon. + // The complete set of sided planes for the tentative new polygon include the ones in + // includedEdges, plus the one from newLastEdge, plus the new tentative return boundary. + // We have to make sure there are no points from elsewhere within the tentative convex + // polygon. boolean foundPointInside = false; final Iterator edgeIterator = edgeBuffer.iterator(); while (edgeIterator.hasNext()) { @@ -1275,7 +1443,7 @@ public class GeoPolygonFactory { if (edge.startPoint != newLastEdge.endPoint) { // look at edge.startPoint if (isWithin(edge.startPoint, includedEdges, newLastEdge, returnBoundary)) { - //System.out.println(" nope; point within found: "+edge.startPoint); + // System.out.println(" nope; point within found: " + edge.startPoint); foundPointInside = true; break; } @@ -1283,16 +1451,16 @@ public class GeoPolygonFactory { if (edge.endPoint != firstEdge.startPoint) { // look at edge.endPoint if (isWithin(edge.endPoint, includedEdges, newLastEdge, returnBoundary)) { - //System.out.println(" nope; point within found: "+edge.endPoint); + // System.out.println(" nope; point within found: " + edge.endPoint); foundPointInside = true; break; } } } } - + if (!foundPointInside) { - //System.out.println(" extending!"); + // System.out.println(" extending!"); // Extend the polygon by the new last edge includedEdges.add(newLastEdge); lastEdge = newLastEdge; @@ -1303,37 +1471,45 @@ public class GeoPolygonFactory { // We can't extend any more in this direction, so break from the loop. break; } - + // Now, walk towards the beginning until we need to stop while (true) { if (firstEdge.startPoint == lastEdge.endPoint) { break; } final Edge newFirstEdge = edgeBuffer.getPrevious(firstEdge); - if (Plane.arePointsCoplanar(newFirstEdge.startPoint, newFirstEdge.endPoint, firstEdge.endPoint)) { + if (Plane.arePointsCoplanar( + newFirstEdge.startPoint, newFirstEdge.endPoint, firstEdge.endPoint)) { break; } - // Planes that are almost identical cannot be properly handled by the standard polygon logic. Detect this case and, if found, - // give up on the tiling -- we'll need to create a large poly instead. + // Planes that are almost identical cannot be properly handled by the standard polygon logic. + // Detect this case and, if found, give up on the tiling -- we'll need to create a large poly + // instead. if (firstEdge.plane.isFunctionallyIdentical(newFirstEdge.plane)) { - throw new TileException("Two adjacent edge planes are effectively parallel despite filtering; give up on tiling"); + throw new TileException( + "Two adjacent edge planes are effectively parallel despite filtering; give up on tiling"); } if (isWithin(newFirstEdge.startPoint, includedEdges)) { - //System.out.println(" maybe can extend to previous edge"); - // Found a candidate for extension. But do some other checks first. Basically, we need to know if we construct a polygon - // here will overlap with other remaining points? + // System.out.println(" maybe can extend to previous edge"); + // Found a candidate for extension. But do some other checks first. Basically, we need to + // know if we construct a polygon here will overlap with other remaining points? final SidedPlane returnBoundary; if (newFirstEdge.startPoint != lastEdge.endPoint) { - if(Plane.arePointsCoplanar(lastEdge.startPoint, lastEdge.endPoint, newFirstEdge.startPoint) || - Plane.arePointsCoplanar(lastEdge.endPoint, newFirstEdge.startPoint, newFirstEdge.endPoint)) { + if (Plane.arePointsCoplanar( + lastEdge.startPoint, lastEdge.endPoint, newFirstEdge.startPoint) + || Plane.arePointsCoplanar( + lastEdge.endPoint, newFirstEdge.startPoint, newFirstEdge.endPoint)) { break; } - returnBoundary = new SidedPlane(lastEdge.startPoint, lastEdge.endPoint, newFirstEdge.startPoint); + returnBoundary = + new SidedPlane(lastEdge.startPoint, lastEdge.endPoint, newFirstEdge.startPoint); } else { returnBoundary = null; } - // The complete set of sided planes for the tentative new polygon include the ones in includedEdges, plus the one from newLastEdge, - // plus the new tentative return boundary. We have to make sure there are no points from elsewhere within the tentative convex polygon. + // The complete set of sided planes for the tentative new polygon include the ones in + // includedEdges, plus the one from newLastEdge, plus the new tentative return boundary. + // We have to make sure there are no points from elsewhere within the tentative convex + // polygon. boolean foundPointInside = false; final Iterator edgeIterator = edgeBuffer.iterator(); while (edgeIterator.hasNext()) { @@ -1343,7 +1519,7 @@ public class GeoPolygonFactory { if (edge.startPoint != lastEdge.endPoint) { // look at edge.startPoint if (isWithin(edge.startPoint, includedEdges, newFirstEdge, returnBoundary)) { - //System.out.println(" nope; point within found: "+edge.startPoint); + // System.out.println(" nope; point within found: " + edge.startPoint); foundPointInside = true; break; } @@ -1351,16 +1527,16 @@ public class GeoPolygonFactory { if (edge.endPoint != newFirstEdge.startPoint) { // look at edge.endPoint if (isWithin(edge.endPoint, includedEdges, newFirstEdge, returnBoundary)) { - //System.out.println(" nope; point within found: "+edge.endPoint); + // System.out.println(" nope; point within found: " + edge.endPoint); foundPointInside = true; break; } } } } - + if (!foundPointInside) { - //System.out.println(" extending!"); + // System.out.println(" extending!"); // Extend the polygon by the new last edge includedEdges.add(newFirstEdge); firstEdge = newFirstEdge; @@ -1373,23 +1549,24 @@ public class GeoPolygonFactory { } // Ok, figure out what we've accumulated. If it is enough for a polygon, build it. - + if (includedEdges.size() < 2) { - //System.out.println("Done edge "+currentEdge+": no poly found"); + // System.out.println("Done edge " + currentEdge + ": no poly found"); return false; } // It's enough to build a convex polygon - //System.out.println("Edge "+currentEdge+": Found complex poly"); - - // Create the point list and edge list, starting with the first edge and going to the last. The return edge will be between - // the start point of the first edge and the end point of the last edge. If the first edge start point is the same as the last edge end point, - // it's a degenerate case and we want to just clean out the edge buffer entirely. - - final List points = new ArrayList(includedEdges.size()+1); + // System.out.println("Edge " + currentEdge + ": Found complex poly"); + + // Create the point list and edge list, starting with the first edge and going to the last. The + // return edge will be between the start point of the first edge and the end point of the last + // edge. If the first edge start point is the same as the last edge end point, it's a + // degenerate case and we want to just clean out the edge buffer entirely. + + final List points = new ArrayList(includedEdges.size() + 1); final BitSet internalEdges = new BitSet(includedEdges.size()); final boolean returnIsInternal; - + if (firstEdge.startPoint == lastEdge.endPoint) { // Degenerate case!! There is no return edge -- or rather, we already have it. if (includedEdges.size() < 3) { @@ -1399,9 +1576,10 @@ public class GeoPolygonFactory { } if (firstEdge.plane.isFunctionallyIdentical(lastEdge.plane)) { - throw new TileException("Two adjacent edge planes are effectively parallel despite filtering; give up on tiling"); + throw new TileException( + "Two adjacent edge planes are effectively parallel despite filtering; give up on tiling"); } - + // Now look for completely planar points. This too is a degeneracy condition that we should // return "false" for. Edge edge = firstEdge; @@ -1419,11 +1597,14 @@ public class GeoPolygonFactory { edgeBuffer.clear(); } else { // Build the return edge (internal, of course) - final SidedPlane returnSidedPlane = new SidedPlane(firstEdge.endPoint, false, firstEdge.startPoint, lastEdge.endPoint); - final Edge returnEdge = new Edge(firstEdge.startPoint, lastEdge.endPoint, returnSidedPlane, true); - if (returnEdge.plane.isFunctionallyIdentical(lastEdge.plane) || - returnEdge.plane.isFunctionallyIdentical(firstEdge.plane)) { - throw new TileException("Two adjacent edge planes are effectively parallel despite filtering; give up on tiling"); + final SidedPlane returnSidedPlane = + new SidedPlane(firstEdge.endPoint, false, firstEdge.startPoint, lastEdge.endPoint); + final Edge returnEdge = + new Edge(firstEdge.startPoint, lastEdge.endPoint, returnSidedPlane, true); + if (returnEdge.plane.isFunctionallyIdentical(lastEdge.plane) + || returnEdge.plane.isFunctionallyIdentical(firstEdge.plane)) { + throw new TileException( + "Two adjacent edge planes are effectively parallel despite filtering; give up on tiling"); } // Build point list and edge list final List edges = new ArrayList(includedEdges.size()); @@ -1446,42 +1627,49 @@ public class GeoPolygonFactory { // Modify the edge buffer edgeBuffer.replace(edges, returnEdge); } - + // Now, construct the polygon // Failures in construction mean we have a polygon that is too large (>180 degrees) try { if (testPoint != null && holes != null && holes.size() > 0) { // No holes, for test - final GeoPolygon testPolygon = new GeoConvexPolygon(planetModel, points, null, internalEdges, returnIsInternal); + final GeoPolygon testPolygon = + new GeoConvexPolygon(planetModel, points, null, internalEdges, returnIsInternal); if (testPolygon.isWithin(testPoint)) { return null; } } - - final GeoPolygon realPolygon = new GeoConvexPolygon(planetModel, points, holes, internalEdges, returnIsInternal); + + final GeoPolygon realPolygon = + new GeoConvexPolygon(planetModel, points, holes, internalEdges, returnIsInternal); if (testPoint != null && (holes == null || holes.size() == 0)) { if (realPolygon.isWithin(testPoint)) { return null; } } - + rval.addShape(realPolygon); return true; } catch (IllegalArgumentException e) { throw new TileException(e.getMessage()); } - } - - /** Check if a point is within a set of edges. - * @param point is the point - * @param edgeSet is the set of edges - * @param extension is the new edge - * @param returnBoundary is the return edge - * @return true if within - */ - private static boolean isWithin(final GeoPoint point, final Set edgeSet, final Edge extension, final SidedPlane returnBoundary) { + + /** + * Check if a point is within a set of edges. + * + * @param point is the point + * @param edgeSet is the set of edges + * @param extension is the new edge + * @param returnBoundary is the return edge + * @return true if within + */ + private static boolean isWithin( + final GeoPoint point, + final Set edgeSet, + final Edge extension, + final SidedPlane returnBoundary) { if (!extension.plane.isWithin(point)) { return false; } @@ -1490,12 +1678,14 @@ public class GeoPolygonFactory { } return isWithin(point, edgeSet); } - - /** Check if a point is within a set of edges. - * @param point is the point - * @param edgeSet is the set of edges - * @return true if within - */ + + /** + * Check if a point is within a set of edges. + * + * @param point is the point + * @param edgeSet is the set of edges + * @return true if within + */ private static boolean isWithin(final GeoPoint point, final Set edgeSet) { for (final Edge edge : edgeSet) { if (!edge.plane.isWithin(point)) { @@ -1504,11 +1694,13 @@ public class GeoPolygonFactory { } return true; } - - /** Convert raw point index into valid array position. - *@param index is the array index. - *@param size is the array size. - *@return an updated index. + + /** + * Convert raw point index into valid array position. + * + * @param index is the array index. + * @param size is the array size. + * @return an updated index. */ private static int getLegalIndex(int index, int size) { while (index < 0) { @@ -1520,8 +1712,7 @@ public class GeoPolygonFactory { return index; } - /** Class representing a single (unused) edge. - */ + /** Class representing a single (unused) edge. */ private static class Edge { /** Plane */ public final SidedPlane plane; @@ -1532,32 +1723,37 @@ public class GeoPolygonFactory { /** Internal edge flag */ public final boolean isInternal; - /** Constructor. - * @param startPoint the edge start point - * @param endPoint the edge end point - * @param plane the edge plane - * @param isInternal true if internal edge - */ - public Edge(final GeoPoint startPoint, final GeoPoint endPoint, final SidedPlane plane, final boolean isInternal) { + /** + * Constructor. + * + * @param startPoint the edge start point + * @param endPoint the edge end point + * @param plane the edge plane + * @param isInternal true if internal edge + */ + public Edge( + final GeoPoint startPoint, + final GeoPoint endPoint, + final SidedPlane plane, + final boolean isInternal) { this.startPoint = startPoint; this.endPoint = endPoint; this.plane = plane; this.isInternal = isInternal; } - + @Override public int hashCode() { return System.identityHashCode(this); } - + @Override public boolean equals(final Object o) { return o == this; } } - - /** Class representing an iterator over an EdgeBuffer. - */ + + /** Class representing an iterator over an EdgeBuffer. */ private static class EdgeBufferIterator implements Iterator { /** Edge buffer */ protected final EdgeBuffer edgeBuffer; @@ -1565,21 +1761,23 @@ public class GeoPolygonFactory { protected final Edge firstEdge; /** Current edge */ protected Edge currentEdge; - - /** Constructor. - * @param edgeBuffer the edge buffer - */ + + /** + * Constructor. + * + * @param edgeBuffer the edge buffer + */ public EdgeBufferIterator(final EdgeBuffer edgeBuffer) { this.edgeBuffer = edgeBuffer; this.currentEdge = edgeBuffer.pickOne(); this.firstEdge = currentEdge; } - + @Override public boolean hasNext() { return currentEdge != null; } - + @Override public Edge next() { final Edge rval = currentEdge; @@ -1591,15 +1789,14 @@ public class GeoPolygonFactory { } return rval; } - + @Override public void remove() { throw new RuntimeException("Unsupported operation"); } } - - /** Class representing a pool of unused edges, all linked together by vertices. - */ + + /** Class representing a pool of unused edges, all linked together by vertices. */ private static class EdgeBuffer { /** Starting edge */ protected Edge oneEdge; @@ -1610,22 +1807,34 @@ public class GeoPolygonFactory { /** Map to next edge */ protected final Map nextEdges = new HashMap<>(); - /** Constructor. - * @param pointList is the list of points. - * @param internalEdges is the list of edges that are internal (includes return edge) - * @param startPlaneStartIndex is the index of the startPlane's starting point - * @param startPlaneEndIndex is the index of the startPlane's ending point - * @param startPlane is the starting plane - */ - public EdgeBuffer(final List pointList, final BitSet internalEdges, final int startPlaneStartIndex, final int startPlaneEndIndex, final SidedPlane startPlane) { + /** + * Constructor. + * + * @param pointList is the list of points. + * @param internalEdges is the list of edges that are internal (includes return edge) + * @param startPlaneStartIndex is the index of the startPlane's starting point + * @param startPlaneEndIndex is the index of the startPlane's ending point + * @param startPlane is the starting plane + */ + public EdgeBuffer( + final List pointList, + final BitSet internalEdges, + final int startPlaneStartIndex, + final int startPlaneEndIndex, + final SidedPlane startPlane) { /* System.out.println("Start plane index: "+startPlaneStartIndex+" End plane index: "+startPlaneEndIndex+" Initial points:"); for (final GeoPoint p : pointList) { System.out.println(" "+p); } */ - - final Edge startEdge = new Edge(pointList.get(startPlaneStartIndex), pointList.get(startPlaneEndIndex), startPlane, internalEdges.get(startPlaneStartIndex)); + + final Edge startEdge = + new Edge( + pointList.get(startPlaneStartIndex), + pointList.get(startPlaneEndIndex), + startPlane, + internalEdges.get(startPlaneStartIndex)); // Fill in the EdgeBuffer by walking around creating more stuff Edge currentEdge = startEdge; int startIndex = startPlaneStartIndex; @@ -1637,7 +1846,7 @@ public class GeoPolygonFactory { System.out.println(" "+p+" is: "+(currentEdge.plane.isWithin(p)?"in":"out")); } */ - + // Check termination condition if (currentEdge.endPoint == startEdge.startPoint) { // We finish here. Link the current edge to the start edge, and exit @@ -1656,29 +1865,35 @@ public class GeoPolygonFactory { // Get the next point final GeoPoint newPoint = pointList.get(endIndex); // Build the new edge - // We need to know the sidedness of the new plane. The point we're going to be presenting to it has - // a certain relationship with the sided plane we already have for the current edge. If the current edge - // is colinear with the new edge, then we want to maintain the same relationship. If the new edge - // is not colinear, then we can use the new point's relationship with the current edge as our guide. - + // We need to know the sidedness of the new plane. The point we're going to be presenting + // to it has a certain relationship with the sided plane we already have for the current + // edge. If the current edge is colinear with the new edge, then we want to maintain the + // same relationship. If the new edge is not colinear, then we can use the new point's + // relationship with the current edge as our guide. + final boolean isNewPointWithin = currentEdge.plane.isWithin(newPoint); final GeoPoint pointToPresent = currentEdge.startPoint; - final SidedPlane newPlane = new SidedPlane(pointToPresent, isNewPointWithin, pointList.get(startIndex), newPoint); - final Edge newEdge = new Edge(pointList.get(startIndex), pointList.get(endIndex), newPlane, internalEdges.get(startIndex)); - + final SidedPlane newPlane = + new SidedPlane(pointToPresent, isNewPointWithin, pointList.get(startIndex), newPoint); + final Edge newEdge = + new Edge( + pointList.get(startIndex), + pointList.get(endIndex), + newPlane, + internalEdges.get(startIndex)); + // Link it in previousEdges.put(newEdge, currentEdge); nextEdges.put(currentEdge, newEdge); edges.add(newEdge); currentEdge = newEdge; - } - + oneEdge = startEdge; - - // Verify the structure. - //verify(); + + // Verify the structure. + // verify(); } /* @@ -1711,27 +1926,33 @@ public class GeoPolygonFactory { } } */ - - /** Get the previous edge. - * @param currentEdge is the current edge. - * @return the previous edge, if found. - */ + + /** + * Get the previous edge. + * + * @param currentEdge is the current edge. + * @return the previous edge, if found. + */ public Edge getPrevious(final Edge currentEdge) { return previousEdges.get(currentEdge); } - - /** Get the next edge. - * @param currentEdge is the current edge. - * @return the next edge, if found. - */ + + /** + * Get the next edge. + * + * @param currentEdge is the current edge. + * @return the next edge, if found. + */ public Edge getNext(final Edge currentEdge) { return nextEdges.get(currentEdge); } - - /** Replace a list of edges with a new edge. - * @param removeList is the list of edges to remove. - * @param newEdge is the edge to add. - */ + + /** + * Replace a list of edges with a new edge. + * + * @param removeList is the list of edges to remove. + * @param newEdge is the edge to add. + */ public void replace(final List removeList, final Edge newEdge) { /* System.out.println("Replacing: "); @@ -1741,7 +1962,7 @@ public class GeoPolygonFactory { System.out.println("...with: "+newEdge.startPoint+"-->"+newEdge.endPoint); */ final Edge previous = previousEdges.get(removeList.get(0)); - final Edge next = nextEdges.get(removeList.get(removeList.size()-1)); + final Edge next = nextEdges.get(removeList.get(removeList.size() - 1)); edges.add(newEdge); previousEdges.put(newEdge, previous); nextEdges.put(previous, newEdge); @@ -1755,67 +1976,72 @@ public class GeoPolygonFactory { previousEdges.remove(edge); nextEdges.remove(edge); } - //verify(); + // verify(); } - /** Clear all edges. - */ + /** Clear all edges. */ public void clear() { edges.clear(); previousEdges.clear(); nextEdges.clear(); oneEdge = null; } - - /** Get the size of the edge buffer. - * @return the size. - */ + + /** + * Get the size of the edge buffer. + * + * @return the size. + */ public int size() { return edges.size(); } - - /** Get an iterator to iterate over edges. - * @return the iterator. - */ + + /** + * Get an iterator to iterate over edges. + * + * @return the iterator. + */ public Iterator iterator() { return new EdgeBufferIterator(this); } - - /** Return a first edge. - * @return the edge. - */ + + /** + * Return a first edge. + * + * @return the edge. + */ public Edge pickOne() { return oneEdge; } - } - - /** An instance of this class represents a known-good - * path of nodes that contains no coplanar points , no matter - * how assessed. It's used in the depth-first search that - * must be executed to find a valid complete polygon without - * coplanarities. + + /** + * An instance of this class represents a known-good path of nodes that contains no coplanar + * points , no matter how assessed. It's used in the depth-first search that must be executed to + * find a valid complete polygon without coplanarities. */ private static class SafePath { public final GeoPoint lastPoint; public final int lastPointIndex; public final Plane lastPlane; public final SafePath previous; - - /** Create a new safe end point. - */ - public SafePath(final SafePath previous, final GeoPoint lastPoint, final int lastPointIndex, final Plane lastPlane) { + + /** Create a new safe end point. */ + public SafePath( + final SafePath previous, + final GeoPoint lastPoint, + final int lastPointIndex, + final Plane lastPlane) { this.lastPoint = lastPoint; this.lastPointIndex = lastPointIndex; this.lastPlane = lastPlane; this.previous = previous; } - /** Fill in a list, in order, of safe points. - */ + /** Fill in a list, in order, of safe points. */ public void fillInList(final List pointList) { - //we don't use recursion because it can be problematic - //for polygons with many points. + // we don't use recursion because it can be problematic + // for polygons with many points. SafePath safePath = this; while (safePath.previous != null) { pointList.add(safePath.lastPoint); @@ -1825,13 +2051,12 @@ public class GeoPolygonFactory { Collections.reverse(pointList); } } - + static class MutableBoolean { public boolean value = false; } - - /** Exception we throw when we can't tile a polygon due to numerical precision issues. - */ + + /** Exception we throw when we can't tile a polygon due to numerical precision issues. */ private static class TileException extends Exception { public TileException(final String msg) { super(msg); diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoRectangle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoRectangle.java index 16073cd03da..668e198775b 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoRectangle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoRectangle.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Bounding box limited on four sides (top lat, bottom lat, left lon, right lon). - * The left-right maximum extent for this shape is PI; for anything larger, use - * GeoWideRectangle. + * Bounding box limited on four sides (top lat, bottom lat, left lon, right lon). The left-right + * maximum extent for this shape is PI; for anything larger, use GeoWideRectangle. * * @lucene.internal */ @@ -76,13 +75,19 @@ class GeoRectangle extends GeoBaseBBox { /** * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param topLat is the top latitude. - *@param bottomLat is the bottom latitude. - *@param leftLon is the left longitude. - *@param rightLon is the right longitude. + * + * @param planetModel is the planet model. + * @param topLat is the top latitude. + * @param bottomLat is the bottom latitude. + * @param leftLon is the left longitude. + * @param rightLon is the right longitude. */ - public GeoRectangle(final PlanetModel planetModel, final double topLat, final double bottomLat, final double leftLon, double rightLon) { + public GeoRectangle( + final PlanetModel planetModel, + final double topLat, + final double bottomLat, + final double leftLon, + double rightLon) { super(planetModel); // Argument checking if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5) @@ -99,8 +104,7 @@ class GeoRectangle extends GeoBaseBBox { if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent > Math.PI) - throw new IllegalArgumentException("Width of rectangle too great"); + if (extent > Math.PI) throw new IllegalArgumentException("Width of rectangle too great"); this.topLat = topLat; this.bottomLat = bottomLat; @@ -117,10 +121,16 @@ class GeoRectangle extends GeoBaseBBox { final double cosRightLon = Math.cos(rightLon); // Now build the four points - this.ULHC = new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon, topLat, leftLon); - this.URHC = new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon, topLat, rightLon); - this.LRHC = new GeoPoint(planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon, bottomLat, rightLon); - this.LLHC = new GeoPoint(planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon, bottomLat, leftLon); + this.ULHC = + new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon, topLat, leftLon); + this.URHC = + new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon, topLat, rightLon); + this.LRHC = + new GeoPoint( + planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon, bottomLat, rightLon); + this.LLHC = + new GeoPoint( + planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon, bottomLat, leftLon); final double middleLat = (topLat + bottomLat) * 0.5; final double sinMiddleLat = Math.sin(middleLat); @@ -133,7 +143,8 @@ class GeoRectangle extends GeoBaseBBox { final double sinMiddleLon = Math.sin(middleLon); final double cosMiddleLon = Math.cos(middleLon); - this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); + this.centerPoint = + new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat); this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat); @@ -141,25 +152,32 @@ class GeoRectangle extends GeoBaseBBox { this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); // Compute the backing plane - // The normal for this plane is a unit vector through the origin that goes through the middle lon. The plane's D is 0, - // because it goes through the origin. + // The normal for this plane is a unit vector through the origin that goes through the middle + // lon. The plane's D is 0, because it goes through the origin. this.backingPlane = new SidedPlane(this.centerPoint, cosMiddleLon, sinMiddleLon, 0.0, 0.0); - this.topPlanePoints = new GeoPoint[]{ULHC, URHC}; - this.bottomPlanePoints = new GeoPoint[]{LLHC, LRHC}; - this.leftPlanePoints = new GeoPoint[]{ULHC, LLHC}; - this.rightPlanePoints = new GeoPoint[]{URHC, LRHC}; + this.topPlanePoints = new GeoPoint[] {ULHC, URHC}; + this.bottomPlanePoints = new GeoPoint[] {LLHC, LRHC}; + this.leftPlanePoints = new GeoPoint[] {ULHC, LLHC}; + this.rightPlanePoints = new GeoPoint[] {URHC, LRHC}; - this.edgePoints = new GeoPoint[]{ULHC}; + this.edgePoints = new GeoPoint[] {ULHC}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoRectangle(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoRectangle(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -176,31 +194,34 @@ class GeoRectangle extends GeoBaseBBox { final double newBottomLat = bottomLat - angle; // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return backingPlane.isWithin(x,y,z) && - topPlane.isWithin(x, y, z) && - bottomPlane.isWithin(x, y, z) && - leftPlane.isWithin(x, y, z) && - rightPlane.isWithin(x, y, z); + return backingPlane.isWithin(x, y, z) + && topPlane.isWithin(x, y, z) + && bottomPlane.isWithin(x, y, z) + && leftPlane.isWithin(x, y, z) + && rightPlane.isWithin(x, y, z); } @Override public double getRadius() { - // Here we compute the distance from the middle point to one of the corners. However, we need to be careful - // to use the longest of three distances: the distance to a corner on the top; the distnace to a corner on the bottom, and - // the distance to the right or left edge from the center. + // Here we compute the distance from the middle point to one of the corners. However, we need + // to be careful to use the longest of three distances: the distance to a corner on the top; + // the distance to a corner on the bottom, and the distance to the right or left edge from the + // center. final double centerAngle = (rightLon - (rightLon + leftLon) * 0.5) * cosMiddleLat; final double topAngle = centerPoint.arcDistance(URHC); final double bottomAngle = centerPoint.arcDistance(LLHC); @@ -218,57 +239,100 @@ class GeoRectangle extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, bottomPlane, leftPlane, rightPlane) || - p.intersects(planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, topPlane, leftPlane, rightPlane) || - p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, topPlane, bottomPlane) || - p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, topPlane, bottomPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects( + planetModel, + topPlane, + notablePoints, + topPlanePoints, + bounds, + bottomPlane, + leftPlane, + rightPlane) + || p.intersects( + planetModel, + bottomPlane, + notablePoints, + bottomPlanePoints, + bounds, + topPlane, + leftPlane, + rightPlane) + || p.intersects( + planetModel, + leftPlane, + notablePoints, + leftPlanePoints, + bounds, + rightPlane, + topPlane, + bottomPlane) + || p.intersects( + planetModel, + rightPlane, + notablePoints, + rightPlanePoints, + bounds, + leftPlane, + topPlane, + bottomPlane); } @Override public boolean intersects(GeoShape geoShape) { - return geoShape.intersects(topPlane, topPlanePoints, bottomPlane, leftPlane, rightPlane) || - geoShape.intersects(bottomPlane, bottomPlanePoints, topPlane, leftPlane, rightPlane) || - geoShape.intersects(leftPlane, leftPlanePoints, rightPlane, topPlane, bottomPlane) || - geoShape.intersects(rightPlane, rightPlanePoints, leftPlane, topPlane, bottomPlane); + return geoShape.intersects(topPlane, topPlanePoints, bottomPlane, leftPlane, rightPlane) + || geoShape.intersects(bottomPlane, bottomPlanePoints, topPlane, leftPlane, rightPlane) + || geoShape.intersects(leftPlane, leftPlanePoints, rightPlane, topPlane, bottomPlane) + || geoShape.intersects(rightPlane, rightPlanePoints, leftPlane, topPlane, bottomPlane); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.addHorizontalPlane(planetModel, topLat, topPlane, bottomPlane, leftPlane, rightPlane) - .addVerticalPlane(planetModel, rightLon, rightPlane, topPlane, bottomPlane, leftPlane) - .addHorizontalPlane(planetModel, bottomLat, bottomPlane, topPlane, leftPlane, rightPlane) - .addVerticalPlane(planetModel, leftLon, leftPlane, topPlane, bottomPlane, rightPlane) - //.addIntersection(planetModel, leftPlane, rightPlane, topPlane, bottomPlane) - .addPoint(ULHC).addPoint(URHC).addPoint(LLHC).addPoint(LRHC); + bounds + .addHorizontalPlane(planetModel, topLat, topPlane, bottomPlane, leftPlane, rightPlane) + .addVerticalPlane(planetModel, rightLon, rightPlane, topPlane, bottomPlane, leftPlane) + .addHorizontalPlane(planetModel, bottomLat, bottomPlane, topPlane, leftPlane, rightPlane) + .addVerticalPlane(planetModel, leftLon, leftPlane, topPlane, bottomPlane, rightPlane) + // .addIntersection(planetModel, leftPlane, rightPlane, topPlane, bottomPlane) + .addPoint(ULHC) + .addPoint(URHC) + .addPoint(LLHC) + .addPoint(LRHC); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double topDistance = distanceStyle.computeDistance(planetModel, topPlane, x,y,z, bottomPlane, leftPlane, rightPlane); - final double bottomDistance = distanceStyle.computeDistance(planetModel, bottomPlane, x,y,z, topPlane, leftPlane, rightPlane); - final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x,y,z, rightPlane, topPlane, bottomPlane); - final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x,y,z, leftPlane, topPlane, bottomPlane); - - final double ULHCDistance = distanceStyle.computeDistance(ULHC, x,y,z); - final double URHCDistance = distanceStyle.computeDistance(URHC, x,y,z); - final double LRHCDistance = distanceStyle.computeDistance(LRHC, x,y,z); - final double LLHCDistance = distanceStyle.computeDistance(LLHC, x,y,z); - + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double topDistance = + distanceStyle.computeDistance( + planetModel, topPlane, x, y, z, bottomPlane, leftPlane, rightPlane); + final double bottomDistance = + distanceStyle.computeDistance( + planetModel, bottomPlane, x, y, z, topPlane, leftPlane, rightPlane); + final double leftDistance = + distanceStyle.computeDistance( + planetModel, leftPlane, x, y, z, rightPlane, topPlane, bottomPlane); + final double rightDistance = + distanceStyle.computeDistance( + planetModel, rightPlane, x, y, z, leftPlane, topPlane, bottomPlane); + + final double ULHCDistance = distanceStyle.computeDistance(ULHC, x, y, z); + final double URHCDistance = distanceStyle.computeDistance(URHC, x, y, z); + final double LRHCDistance = distanceStyle.computeDistance(LRHC, x, y, z); + final double LLHCDistance = distanceStyle.computeDistance(LLHC, x, y, z); + return Math.min( - Math.min( - Math.min(topDistance, bottomDistance), - Math.min(leftDistance, rightDistance)), - Math.min( - Math.min(ULHCDistance, URHCDistance), - Math.min(LRHCDistance, LLHCDistance))); + Math.min(Math.min(topDistance, bottomDistance), Math.min(leftDistance, rightDistance)), + Math.min(Math.min(ULHCDistance, URHCDistance), Math.min(LRHCDistance, LLHCDistance))); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoRectangle)) + if (!(o instanceof GeoRectangle)) { return false; + } GeoRectangle other = (GeoRectangle) o; return super.equals(other) && other.ULHC.equals(ULHC) && other.LRHC.equals(LRHC); } @@ -276,14 +340,31 @@ class GeoRectangle extends GeoBaseBBox { @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + ULHC.hashCode(); - result = 31 * result + LRHC.hashCode(); + result = 31 * result + ULHC.hashCode(); + result = 31 * result + LRHC.hashCode(); return result; } @Override public String toString() { - return "GeoRectangle: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoRectangle: {planetmodel=" + + planetModel + + ", toplat=" + + topLat + + "(" + + topLat * 180.0 / Math.PI + + "), bottomlat=" + + bottomLat + + "(" + + bottomLat * 180.0 / Math.PI + + "), leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightlon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoS2Shape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoS2Shape.java index b4c5d06ec99..6e0347de0b8 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoS2Shape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoS2Shape.java @@ -22,181 +22,205 @@ import java.io.InputStream; import java.io.OutputStream; /** - * Fast implementation of a polygon representing S2 geometry cell. There are no checks validating that - * points are convex therefore users must be provide four points in CCW or the logic will fail. + * Fast implementation of a polygon representing S2 geometry cell. There are no checks validating + * that points are convex therefore users must be provide four points in CCW or the logic will fail. * * @lucene.internal */ class GeoS2Shape extends GeoBasePolygon { - /** The first point */ - protected final GeoPoint point1; - /** The second point */ - protected final GeoPoint point2; - /** The third point */ - protected final GeoPoint point3; - /** The fourth point */ - protected final GeoPoint point4; + /** The first point */ + protected final GeoPoint point1; + /** The second point */ + protected final GeoPoint point2; + /** The third point */ + protected final GeoPoint point3; + /** The fourth point */ + protected final GeoPoint point4; - /** The first plane */ - protected final SidedPlane plane1; - /** The second plane */ - protected final SidedPlane plane2; - /** The third plane */ - protected final SidedPlane plane3; - /** The fourth plane */ - protected final SidedPlane plane4; + /** The first plane */ + protected final SidedPlane plane1; + /** The second plane */ + protected final SidedPlane plane2; + /** The third plane */ + protected final SidedPlane plane3; + /** The fourth plane */ + protected final SidedPlane plane4; - /** Notable points for the first plane */ - protected final GeoPoint[] plane1Points; - /** Notable points for second plane */ - protected final GeoPoint[] plane2Points; - /** Notable points for third plane */ - protected final GeoPoint[] plane3Points; - /** Notable points for fourth plane */ - protected final GeoPoint[] plane4Points; + /** Notable points for the first plane */ + protected final GeoPoint[] plane1Points; + /** Notable points for second plane */ + protected final GeoPoint[] plane2Points; + /** Notable points for third plane */ + protected final GeoPoint[] plane3Points; + /** Notable points for fourth plane */ + protected final GeoPoint[] plane4Points; - /** Edge point for this S2 cell */ - protected final GeoPoint[] edgePoints; + /** Edge point for this S2 cell */ + protected final GeoPoint[] edgePoints; - /** - * It builds from 4 points given in CCW. It must be convex or logic will fail. - * - *@param planetModel is the planet model. - *@param point1 the first point. - *@param point2 the second point. - *@param point3 the third point. - *@param point4 the four point. - */ - public GeoS2Shape(final PlanetModel planetModel, GeoPoint point1, GeoPoint point2, GeoPoint point3, GeoPoint point4) { - super(planetModel); - this.point1 = point1; - this.point2 = point2; - this.point3 = point3; - this.point4 = point4; + /** + * It builds from 4 points given in CCW. It must be convex or logic will fail. + * + * @param planetModel is the planet model. + * @param point1 the first point. + * @param point2 the second point. + * @param point3 the third point. + * @param point4 the four point. + */ + public GeoS2Shape( + final PlanetModel planetModel, + GeoPoint point1, + GeoPoint point2, + GeoPoint point3, + GeoPoint point4) { + super(planetModel); + this.point1 = point1; + this.point2 = point2; + this.point3 = point3; + this.point4 = point4; - // Now build the four planes - this.plane1 = new SidedPlane(point4, point1, point2); - this.plane2 = new SidedPlane(point1, point2, point3); - this.plane3 = new SidedPlane(point2, point3, point4); - this.plane4 = new SidedPlane(point3, point4, point1); + // Now build the four planes + this.plane1 = new SidedPlane(point4, point1, point2); + this.plane2 = new SidedPlane(point1, point2, point3); + this.plane3 = new SidedPlane(point2, point3, point4); + this.plane4 = new SidedPlane(point3, point4, point1); - //collect the notable points for the planes - this.plane1Points = new GeoPoint[]{point1, point2}; - this.plane2Points = new GeoPoint[]{point2, point3}; - this.plane3Points = new GeoPoint[]{point3, point4}; - this.plane4Points = new GeoPoint[]{point4, point1}; + // collect the notable points for the planes + this.plane1Points = new GeoPoint[] {point1, point2}; + this.plane2Points = new GeoPoint[] {point2, point3}; + this.plane3Points = new GeoPoint[] {point3, point4}; + this.plane4Points = new GeoPoint[] {point4, point1}; - this.edgePoints = new GeoPoint[]{point1}; + this.edgePoints = new GeoPoint[] {point1}; + } + + /** + * Constructor for deserialization. + * + * @param planetModel is the planet model. + * @param inputStream is the input stream. + */ + public GeoS2Shape(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + (GeoPoint) SerializableObject.readObject(inputStream), + (GeoPoint) SerializableObject.readObject(inputStream), + (GeoPoint) SerializableObject.readObject(inputStream), + (GeoPoint) SerializableObject.readObject(inputStream)); + } + + @Override + public void write(final OutputStream outputStream) throws IOException { + SerializableObject.writeObject(outputStream, point1); + SerializableObject.writeObject(outputStream, point2); + SerializableObject.writeObject(outputStream, point3); + SerializableObject.writeObject(outputStream, point4); + } + + @Override + public boolean isWithin(final double x, final double y, final double z) { + return plane1.isWithin(x, y, z) + && plane2.isWithin(x, y, z) + && plane3.isWithin(x, y, z) + && plane4.isWithin(x, y, z); + } + + @Override + public GeoPoint[] getEdgePoints() { + return edgePoints; + } + + @Override + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects(planetModel, plane1, notablePoints, plane1Points, bounds, plane2, plane4) + || p.intersects(planetModel, plane2, notablePoints, plane2Points, bounds, plane3, plane1) + || p.intersects(planetModel, plane3, notablePoints, plane3Points, bounds, plane4, plane2) + || p.intersects(planetModel, plane4, notablePoints, plane4Points, bounds, plane1, plane3); + } + + @Override + public boolean intersects(GeoShape geoShape) { + return geoShape.intersects(plane1, plane1Points, plane2, plane4) + || geoShape.intersects(plane2, plane2Points, plane3, plane1) + || geoShape.intersects(plane3, plane3Points, plane4, plane2) + || geoShape.intersects(plane4, plane4Points, plane1, plane3); + } + + @Override + public void getBounds(Bounds bounds) { + super.getBounds(bounds); + bounds + .addPlane(planetModel, plane1, plane2, plane4) + .addPlane(planetModel, plane2, plane3, plane1) + .addPlane(planetModel, plane3, plane4, plane2) + .addPlane(planetModel, plane4, plane1, plane3) + .addPoint(point1) + .addPoint(point2) + .addPoint(point3) + .addPoint(point4); + } + + @Override + public double outsideDistance(DistanceStyle distanceStyle, double x, double y, double z) { + final double planeDistance1 = + distanceStyle.computeDistance(planetModel, plane1, x, y, z, plane2, plane4); + final double planeDistance2 = + distanceStyle.computeDistance(planetModel, plane2, x, y, z, plane3, plane1); + final double planeDistance3 = + distanceStyle.computeDistance(planetModel, plane3, x, y, z, plane4, plane2); + final double planeDistance4 = + distanceStyle.computeDistance(planetModel, plane4, x, y, z, plane1, plane3); + + final double pointDistance1 = distanceStyle.computeDistance(point1, x, y, z); + final double pointDistance2 = distanceStyle.computeDistance(point2, x, y, z); + final double pointDistance3 = distanceStyle.computeDistance(point3, x, y, z); + final double pointDistance4 = distanceStyle.computeDistance(point4, x, y, z); + + return Math.min( + Math.min( + Math.min(planeDistance1, planeDistance2), Math.min(planeDistance3, planeDistance4)), + Math.min( + Math.min(pointDistance1, pointDistance2), Math.min(pointDistance3, pointDistance4))); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof GeoS2Shape)) { + return false; } + GeoS2Shape other = (GeoS2Shape) o; + return super.equals(other) + && other.point1.equals(point1) + && other.point2.equals(point2) + && other.point3.equals(point3) + && other.point4.equals(point4); + } - /** - * Constructor for deserialization. - * @param planetModel is the planet model. - * @param inputStream is the input stream. - */ - public GeoS2Shape(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - (GeoPoint) SerializableObject.readObject(inputStream), - (GeoPoint) SerializableObject.readObject(inputStream), - (GeoPoint) SerializableObject.readObject(inputStream), - (GeoPoint) SerializableObject.readObject(inputStream)); - } - - @Override - public void write(final OutputStream outputStream) throws IOException { - SerializableObject.writeObject(outputStream, point1); - SerializableObject.writeObject(outputStream, point2); - SerializableObject.writeObject(outputStream, point3); - SerializableObject.writeObject(outputStream, point4); - } - - - @Override - public boolean isWithin(final double x, final double y, final double z) { - return plane1.isWithin(x, y, z) && - plane2.isWithin(x, y, z) && - plane3.isWithin(x, y, z) && - plane4.isWithin(x, y, z); - } - - - @Override - public GeoPoint[] getEdgePoints() { - return edgePoints; - } - - @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return p.intersects(planetModel, plane1, notablePoints, plane1Points, bounds, plane2, plane4) || - p.intersects(planetModel, plane2, notablePoints, plane2Points, bounds, plane3, plane1) || - p.intersects(planetModel, plane3, notablePoints, plane3Points, bounds, plane4, plane2) || - p.intersects(planetModel, plane4, notablePoints, plane4Points, bounds, plane1, plane3); - } - - @Override - public boolean intersects(GeoShape geoShape) { - return geoShape.intersects(plane1, plane1Points, plane2, plane4) || - geoShape.intersects(plane2, plane2Points, plane3, plane1) || - geoShape.intersects(plane3, plane3Points, plane4, plane2) || - geoShape.intersects(plane4, plane4Points, plane1, plane3); - } - - @Override - public void getBounds(Bounds bounds) { - super.getBounds(bounds); - bounds.addPlane(planetModel, plane1, plane2, plane4) - .addPlane(planetModel, plane2, plane3, plane1) - .addPlane(planetModel, plane3, plane4, plane2) - .addPlane(planetModel, plane4, plane1, plane3) - .addPoint(point1).addPoint(point2).addPoint(point3).addPoint(point4); - } - - @Override - public double outsideDistance(DistanceStyle distanceStyle, double x, double y, double z) { - final double planeDistance1 = distanceStyle.computeDistance(planetModel, plane1, x,y,z, plane2, plane4); - final double planeDistance2 = distanceStyle.computeDistance(planetModel, plane2, x,y,z, plane3, plane1); - final double planeDistance3 = distanceStyle.computeDistance(planetModel, plane3, x,y,z, plane4, plane2); - final double planeDistance4 = distanceStyle.computeDistance(planetModel, plane4, x,y,z, plane1, plane3); - - final double pointDistance1 = distanceStyle.computeDistance(point1, x,y,z); - final double pointDistance2 = distanceStyle.computeDistance(point2, x,y,z); - final double pointDistance3 = distanceStyle.computeDistance(point3, x,y,z); - final double pointDistance4 = distanceStyle.computeDistance(point4, x,y,z); - - return Math.min( - Math.min( - Math.min(planeDistance1, planeDistance2), - Math.min(planeDistance3, planeDistance4)), - Math.min( - Math.min(pointDistance1, pointDistance2), - Math.min(pointDistance3, pointDistance4))); - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof GeoS2Shape)) - return false; - GeoS2Shape other = (GeoS2Shape) o; - return super.equals(other) && other.point1.equals(point1) - && other.point2.equals(point2) && other.point3.equals(point3) - && other.point4.equals(point4); - } - - @Override - public int hashCode() { - int result = super.hashCode(); - result = 31 * result + point1.hashCode(); - result = 31 * result + point2.hashCode(); - result = 31 * result + point3.hashCode(); - result = 31 * result + point4.hashCode(); - return result; - } - - @Override - public String toString() { - return "GeoS2Shape: {planetmodel="+planetModel+", point1=" + point1 +", point2=" + point2 +", point3=" + point3 +", point4=" + point4+ "}"; - } + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + point1.hashCode(); + result = 31 * result + point2.hashCode(); + result = 31 * result + point3.hashCode(); + result = 31 * result + point4.hashCode(); + return result; + } + @Override + public String toString() { + return "GeoS2Shape: {planetmodel=" + + planetModel + + ", point1=" + + point1 + + ", point2=" + + point2 + + ", point3=" + + point3 + + ", point4=" + + point4 + + "}"; + } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoS2ShapeFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoS2ShapeFactory.java index 848b2e68db5..0d5d5bd9a52 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoS2ShapeFactory.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoS2ShapeFactory.java @@ -24,13 +24,11 @@ package org.apache.lucene.spatial3d.geom; */ public class GeoS2ShapeFactory { - private GeoS2ShapeFactory() { - } + private GeoS2ShapeFactory() {} /** - * Creates a convex polygon with 4 planes by providing 4 points in CCW. - * This is a very fast shape and there are no checks that the points currently define - * a convex shape. + * Creates a convex polygon with 4 planes by providing 4 points in CCW. This is a very fast shape + * and there are no checks that the points currently define a convex shape. * * @param planetModel The planet model * @param point1 the first point. @@ -39,12 +37,12 @@ public class GeoS2ShapeFactory { * @param point4 the four point. * @return the generated shape. */ - public static GeoPolygon makeGeoS2Shape(final PlanetModel planetModel, - final GeoPoint point1, - final GeoPoint point2, - final GeoPoint point3, - final GeoPoint point4) { + public static GeoPolygon makeGeoS2Shape( + final PlanetModel planetModel, + final GeoPoint point1, + final GeoPoint point2, + final GeoPoint point3, + final GeoPoint point4) { return new GeoS2Shape(planetModel, point1, point2, point3, point4); } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoShape.java index 5cb07a5625f..8262ecf5a9e 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoShape.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoShape.java @@ -17,9 +17,8 @@ package org.apache.lucene.spatial3d.geom; /** - * Generic shape. This describes methods that help GeoAreas figure out - * how they interact with a shape, for the purposes of coming up with a - * set of geo hash values. + * Generic shape. This describes methods that help GeoAreas figure out how they interact with a + * shape, for the purposes of coming up with a set of geo hash values. * * @lucene.experimental */ @@ -28,29 +27,27 @@ public interface GeoShape extends Bounded, Membership, PlanetObject { /** * Return a sample point that is on the outside edge/boundary of the shape. * - * @return samples of all edge points from distinct edge sections. Typically one point - * is returned, but zero or two are also possible. + * @return samples of all edge points from distinct edge sections. Typically one point is + * returned, but zero or two are also possible. */ public GeoPoint[] getEdgePoints(); /** - * Assess whether a plane, within the provided bounds, intersects - * with the shape's edges. Any overlap, even a single point, is considered to be an - * intersection. Note well that this method is allowed to return "true" - * if there are internal edges of a composite shape which intersect the plane. - * Doing this can cause getRelationship() for most GeoBBox shapes to return - * OVERLAPS rather than the more correct CONTAINS, but that cannot be - * helped for some complex shapes that are built out of overlapping parts. + * Assess whether a plane, within the provided bounds, intersects with the shape's edges. Any + * overlap, even a single point, is considered to be an intersection. Note well that this method + * is allowed to return "true" if there are internal edges of a composite shape which intersect + * the plane. Doing this can cause getRelationship() for most GeoBBox shapes to return OVERLAPS + * rather than the more correct CONTAINS, but that cannot be helped for some complex shapes that + * are built out of overlapping parts. * - * @param plane is the plane to assess for intersection with the shape's edges or - * bounding curves. - * @param notablePoints represents the intersections of the plane with the supplied - * bounds. These are used to disambiguate when two planes are identical and it needs - * to be determined whether any points exist that fulfill all the bounds. - * @param bounds are a set of bounds that define an area that an - * intersection must be within in order to qualify (provided by a GeoArea). + * @param plane is the plane to assess for intersection with the shape's edges or bounding curves. + * @param notablePoints represents the intersections of the plane with the supplied bounds. These + * are used to disambiguate when two planes are identical and it needs to be determined + * whether any points exist that fulfill all the bounds. + * @param bounds are a set of bounds that define an area that an intersection must be within in + * order to qualify (provided by a GeoArea). * @return true if there's such an intersection, false if not. */ - public boolean intersects(final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds); - + public boolean intersects( + final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds); } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSizeable.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSizeable.java index 3c7e2efce0e..fbcaa35c9ed 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSizeable.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSizeable.java @@ -23,8 +23,7 @@ package org.apache.lucene.spatial3d.geom; */ public interface GeoSizeable { /** - * Returns the radius of a circle into which the GeoSizeable area can - * be inscribed. + * Returns the radius of a circle into which the GeoSizeable area can be inscribed. * * @return the radius. */ @@ -36,5 +35,4 @@ public interface GeoSizeable { * @return the center. */ public GeoPoint getCenter(); - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSouthLatitudeZone.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSouthLatitudeZone.java index 4b6a549b05a..cfb44d1ad48 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSouthLatitudeZone.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSouthLatitudeZone.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * This GeoBBox represents an area rectangle limited only in north latitude. @@ -35,15 +35,17 @@ class GeoSouthLatitudeZone extends GeoBaseBBox { /** An interior point of the zone */ protected final GeoPoint interiorPoint; /** Notable points for the plane (none) */ - protected final static GeoPoint[] planePoints = new GeoPoint[0]; + protected static final GeoPoint[] planePoints = new GeoPoint[0]; /** A point on the top boundary */ protected final GeoPoint topBoundaryPoint; /** Edge points; a reference to the topBoundaryPoint */ protected final GeoPoint[] edgePoints; - /** Constructor. - *@param planetModel is the planet model. - *@param topLat is the top latitude of the zone. + /** + * Constructor. + * + * @param planetModel is the planet model. + * @param topLat is the top latitude of the zone. */ public GeoSouthLatitudeZone(final PlanetModel planetModel, final double topLat) { super(planetModel); @@ -55,20 +57,25 @@ class GeoSouthLatitudeZone extends GeoBaseBBox { // Compute an interior point. Pick one whose lat is between top and bottom. final double middleLat = (topLat - Math.PI * 0.5) * 0.5; final double sinMiddleLat = Math.sin(middleLat); - this.interiorPoint = new GeoPoint(planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0); - this.topBoundaryPoint = new GeoPoint(planetModel, sinTopLat, 0.0, Math.sqrt(1.0 - sinTopLat * sinTopLat), 1.0); + this.interiorPoint = + new GeoPoint( + planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0); + this.topBoundaryPoint = + new GeoPoint(planetModel, sinTopLat, 0.0, Math.sqrt(1.0 - sinTopLat * sinTopLat), 1.0); this.topPlane = new SidedPlane(interiorPoint, planetModel, sinTopLat); - this.edgePoints = new GeoPoint[]{topBoundaryPoint}; + this.edgePoints = new GeoPoint[] {topBoundaryPoint}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoSouthLatitudeZone(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public GeoSouthLatitudeZone(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { this(planetModel, SerializableObject.readDouble(inputStream)); } @@ -93,8 +100,7 @@ class GeoSouthLatitudeZone extends GeoBaseBBox { public double getRadius() { // This is a bit tricky. I guess we should interpret this as meaning the angle of a circle that // would contain all the bounding box points, when starting in the "center". - if (topLat > 0.0) - return Math.PI; + if (topLat > 0.0) return Math.PI; double maxCosLat = cosTopLat; return maxCosLat * Math.PI; } @@ -115,7 +121,8 @@ class GeoSouthLatitudeZone extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { return p.intersects(planetModel, topPlane, notablePoints, planePoints, bounds); } @@ -127,19 +134,18 @@ class GeoSouthLatitudeZone extends GeoBaseBBox { @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds - .addHorizontalPlane(planetModel, topLat, topPlane); + bounds.addHorizontalPlane(planetModel, topLat, topPlane); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - return distanceStyle.computeDistance(planetModel, topPlane, x,y,z); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + return distanceStyle.computeDistance(planetModel, topPlane, x, y, z); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoSouthLatitudeZone)) - return false; + if (!(o instanceof GeoSouthLatitudeZone)) return false; GeoSouthLatitudeZone other = (GeoSouthLatitudeZone) o; return super.equals(other) && other.topBoundaryPoint.equals(topBoundaryPoint); } @@ -153,7 +159,12 @@ class GeoSouthLatitudeZone extends GeoBaseBBox { @Override public String toString() { - return "GeoSouthLatitudeZone: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + ")}"; + return "GeoSouthLatitudeZone: {planetmodel=" + + planetModel + + ", toplat=" + + topLat + + "(" + + topLat * 180.0 / Math.PI + + ")}"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSouthRectangle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSouthRectangle.java index fc05fc4f8b9..eafd56ff397 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSouthRectangle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoSouthRectangle.java @@ -16,15 +16,14 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Bounding box limited on three sides (top lat, left lon, right lon). The - * other corner is the south pole. - * The left-right maximum extent for this shape is PI; for anything larger, use - * {@link GeoWideSouthRectangle}. + * Bounding box limited on three sides (top lat, left lon, right lon). The other corner is the south + * pole. The left-right maximum extent for this shape is PI; for anything larger, use {@link + * GeoWideSouthRectangle}. * * @lucene.internal */ @@ -66,12 +65,14 @@ class GeoSouthRectangle extends GeoBaseBBox { /** * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI} - *@param planetModel is the planet model. - *@param topLat is the top latitude. - *@param leftLon is the left longitude. - *@param rightLon is the right longitude. + * + * @param planetModel is the planet model. + * @param topLat is the top latitude. + * @param leftLon is the left longitude. + * @param rightLon is the right longitude. */ - public GeoSouthRectangle(final PlanetModel planetModel, final double topLat, final double leftLon, double rightLon) { + public GeoSouthRectangle( + final PlanetModel planetModel, final double topLat, final double leftLon, double rightLon) { super(planetModel); // Argument checking if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5) @@ -84,8 +85,9 @@ class GeoSouthRectangle extends GeoBaseBBox { if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent > Math.PI) + if (extent > Math.PI) { throw new IllegalArgumentException("Width of rectangle too great"); + } this.topLat = topLat; this.leftLon = leftLon; @@ -99,8 +101,10 @@ class GeoSouthRectangle extends GeoBaseBBox { final double cosRightLon = Math.cos(rightLon); // Now build the four points - this.ULHC = new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon, topLat, leftLon); - this.URHC = new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon, topLat, rightLon); + this.ULHC = + new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon, topLat, leftLon); + this.URHC = + new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon, topLat, rightLon); final double middleLat = (topLat - Math.PI * 0.5) * 0.5; final double sinMiddleLat = Math.sin(middleLat); @@ -113,36 +117,43 @@ class GeoSouthRectangle extends GeoBaseBBox { final double sinMiddleLon = Math.sin(middleLon); final double cosMiddleLon = Math.cos(middleLon); - this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); + this.centerPoint = + new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat); this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - assert(topPlane.isWithin(centerPoint)); - assert(leftPlane.isWithin(centerPoint)); - assert(rightPlane.isWithin(centerPoint)); + assert (topPlane.isWithin(centerPoint)); + assert (leftPlane.isWithin(centerPoint)); + assert (rightPlane.isWithin(centerPoint)); // Compute the backing plane - // The normal for this plane is a unit vector through the origin that goes through the middle lon. The plane's D is 0, + // The normal for this plane is a unit vector through the origin that goes through the middle + // lon. The plane's D is 0, // because it goes through the origin. this.backingPlane = new SidedPlane(this.centerPoint, cosMiddleLon, sinMiddleLon, 0.0, 0.0); - this.topPlanePoints = new GeoPoint[]{ULHC, URHC}; - this.leftPlanePoints = new GeoPoint[]{ULHC, planetModel.SOUTH_POLE}; - this.rightPlanePoints = new GeoPoint[]{URHC, planetModel.SOUTH_POLE}; - - this.edgePoints = new GeoPoint[]{planetModel.SOUTH_POLE}; + this.topPlanePoints = new GeoPoint[] {ULHC, URHC}; + this.leftPlanePoints = new GeoPoint[] {ULHC, planetModel.SOUTH_POLE}; + this.rightPlanePoints = new GeoPoint[] {URHC, planetModel.SOUTH_POLE}; + this.edgePoints = new GeoPoint[] {planetModel.SOUTH_POLE}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoSouthRectangle(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoSouthRectangle(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -158,30 +169,31 @@ class GeoSouthRectangle extends GeoBaseBBox { final double newBottomLat = -Math.PI * 0.5; // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) - currentLonSpan += Math.PI * 2.0; + if (currentLonSpan < 0.0) currentLonSpan += Math.PI * 2.0; double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return backingPlane.isWithin(x, y, z) && - topPlane.isWithin(x, y, z) && - leftPlane.isWithin(x, y, z) && - rightPlane.isWithin(x, y, z); + return backingPlane.isWithin(x, y, z) + && topPlane.isWithin(x, y, z) + && leftPlane.isWithin(x, y, z) + && rightPlane.isWithin(x, y, z); } @Override public double getRadius() { - // Here we compute the distance from the middle point to one of the corners. However, we need to be careful - // to use the longest of three distances: the distance to a corner on the top; the distnace to a corner on the bottom, and - // the distance to the right or left edge from the center. + // Here we compute the distance from the middle point to one of the corners. However, we need + // to be careful to use the longest of three distances: the distance to a corner on the top; + // the distance to a corner on the bottom, and the distance to the right or left edge from the + // center. final double centerAngle = (rightLon - (rightLon + leftLon) * 0.5) * cosMiddleLat; final double topAngle = centerPoint.arcDistance(URHC); return Math.max(centerAngle, topAngle); @@ -198,50 +210,59 @@ class GeoSouthRectangle extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, leftPlane, rightPlane) || - p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, topPlane) || - p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, topPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + return p.intersects( + planetModel, topPlane, notablePoints, topPlanePoints, bounds, leftPlane, rightPlane) + || p.intersects( + planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, topPlane) + || p.intersects( + planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, topPlane); } @Override public boolean intersects(final GeoShape geoShape) { - return geoShape.intersects(topPlane, topPlanePoints, leftPlane, rightPlane) || - geoShape.intersects(leftPlane, leftPlanePoints, rightPlane, topPlane) || - geoShape.intersects(rightPlane, rightPlanePoints, leftPlane, topPlane); + return geoShape.intersects(topPlane, topPlanePoints, leftPlane, rightPlane) + || geoShape.intersects(leftPlane, leftPlanePoints, rightPlane, topPlane) + || geoShape.intersects(rightPlane, rightPlanePoints, leftPlane, topPlane); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); bounds - .addHorizontalPlane(planetModel, topLat, topPlane, leftPlane, rightPlane) - .addVerticalPlane(planetModel, leftLon, leftPlane, topPlane, rightPlane) - .addVerticalPlane(planetModel, rightLon, rightPlane, topPlane, leftPlane) - //.addIntersection(planetModel, rightPlane, leftPlane, topPlane) - .addPoint(URHC).addPoint(ULHC).addPoint(planetModel.SOUTH_POLE); + .addHorizontalPlane(planetModel, topLat, topPlane, leftPlane, rightPlane) + .addVerticalPlane(planetModel, leftLon, leftPlane, topPlane, rightPlane) + .addVerticalPlane(planetModel, rightLon, rightPlane, topPlane, leftPlane) + // .addIntersection(planetModel, rightPlane, leftPlane, topPlane) + .addPoint(URHC) + .addPoint(ULHC) + .addPoint(planetModel.SOUTH_POLE); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double topDistance = distanceStyle.computeDistance(planetModel, topPlane, x,y,z, leftPlane, rightPlane); - final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x,y,z, rightPlane, topPlane); - final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x,y,z, leftPlane, topPlane); - - final double ULHCDistance = distanceStyle.computeDistance(ULHC, x,y,z); - final double URHCDistance = distanceStyle.computeDistance(URHC, x,y,z); - + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double topDistance = + distanceStyle.computeDistance(planetModel, topPlane, x, y, z, leftPlane, rightPlane); + final double leftDistance = + distanceStyle.computeDistance(planetModel, leftPlane, x, y, z, rightPlane, topPlane); + final double rightDistance = + distanceStyle.computeDistance(planetModel, rightPlane, x, y, z, leftPlane, topPlane); + + final double ULHCDistance = distanceStyle.computeDistance(ULHC, x, y, z); + final double URHCDistance = distanceStyle.computeDistance(URHC, x, y, z); + return Math.min( - Math.min( - topDistance, - Math.min(leftDistance, rightDistance)), - Math.min(ULHCDistance, URHCDistance)); + Math.min(topDistance, Math.min(leftDistance, rightDistance)), + Math.min(ULHCDistance, URHCDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoSouthRectangle)) + if (!(o instanceof GeoSouthRectangle)) { return false; + } GeoSouthRectangle other = (GeoSouthRectangle) o; return super.equals(other) && other.ULHC.equals(ULHC) && other.URHC.equals(URHC); } @@ -256,8 +277,20 @@ class GeoSouthRectangle extends GeoBaseBBox { @Override public String toString() { - return "GeoSouthRectangle: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoSouthRectangle: {planetmodel=" + + planetModel + + ", toplat=" + + topLat + + "(" + + topLat * 180.0 / Math.PI + + "), leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightlon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardCircle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardCircle.java index a3c0d8954f7..adb9e734d7a 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardCircle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardCircle.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * Circular area with a center and cutoff angle that represents the latitude and longitude distance @@ -39,13 +39,16 @@ class GeoStandardCircle extends GeoBaseCircle { /** Notable points for a circle -- there aren't any */ protected static final GeoPoint[] circlePoints = new GeoPoint[0]; - /** Constructor. - *@param planetModel is the planet model. - *@param lat is the center latitude. - *@param lon is the center longitude. - *@param cutoffAngle is the cutoff angle for the circle. + /** + * Constructor. + * + * @param planetModel is the planet model. + * @param lat is the center latitude. + * @param lon is the center longitude. + * @param cutoffAngle is the cutoff angle for the circle. */ - public GeoStandardCircle(final PlanetModel planetModel, final double lat, final double lon, final double cutoffAngle) { + public GeoStandardCircle( + final PlanetModel planetModel, final double lat, final double lon, final double cutoffAngle) { super(planetModel); if (lat < -Math.PI * 0.5 || lat > Math.PI * 0.5) throw new IllegalArgumentException("Latitude out of bounds"); @@ -65,16 +68,18 @@ class GeoStandardCircle extends GeoBaseCircle { double upperLon = lon; if (upperLat > Math.PI * 0.5) { upperLon += Math.PI; - if (upperLon > Math.PI) + if (upperLon > Math.PI) { upperLon -= 2.0 * Math.PI; + } upperLat = Math.PI - upperLat; } double lowerLat = lat - cutoffAngle; double lowerLon = lon; if (lowerLat < -Math.PI * 0.5) { lowerLon += Math.PI; - if (lowerLon > Math.PI) + if (lowerLon > Math.PI) { lowerLon -= 2.0 * Math.PI; + } lowerLat = -Math.PI - lowerLat; } final GeoPoint upperPoint = new GeoPoint(planetModel, upperLat, upperLon); @@ -86,27 +91,44 @@ class GeoStandardCircle extends GeoBaseCircle { } else { // Construct normal plane final Plane normalPlane = Plane.constructNormalizedZPlane(upperPoint, lowerPoint, center); - // Construct a sided plane that goes through the two points and whose normal is in the normalPlane. - this.circlePlane = SidedPlane.constructNormalizedPerpendicularSidedPlane(center, normalPlane, upperPoint, lowerPoint); - if (circlePlane == null) - throw new IllegalArgumentException("Couldn't construct circle plane, probably too small? Cutoff angle = "+cutoffAngle+"; upperPoint = "+upperPoint+"; lowerPoint = "+lowerPoint); - final GeoPoint recomputedIntersectionPoint = circlePlane.getSampleIntersectionPoint(planetModel, normalPlane); - if (recomputedIntersectionPoint == null) - throw new IllegalArgumentException("Couldn't construct intersection point, probably circle too small? Plane = "+circlePlane); - this.edgePoints = new GeoPoint[]{recomputedIntersectionPoint}; + // Construct a sided plane that goes through the two points and whose normal is in the + // normalPlane. + this.circlePlane = + SidedPlane.constructNormalizedPerpendicularSidedPlane( + center, normalPlane, upperPoint, lowerPoint); + if (circlePlane == null) { + throw new IllegalArgumentException( + "Couldn't construct circle plane, probably too small? Cutoff angle = " + + cutoffAngle + + "; upperPoint = " + + upperPoint + + "; lowerPoint = " + + lowerPoint); + } + final GeoPoint recomputedIntersectionPoint = + circlePlane.getSampleIntersectionPoint(planetModel, normalPlane); + if (recomputedIntersectionPoint == null) { + throw new IllegalArgumentException( + "Couldn't construct intersection point, probably circle too small? Plane = " + + circlePlane); + } + this.edgePoints = new GeoPoint[] {recomputedIntersectionPoint}; } } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoStandardCircle(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public GeoStandardCircle(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -127,18 +149,21 @@ class GeoStandardCircle extends GeoBaseCircle { } @Override - protected double distance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double distance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return distanceStyle.computeDistance(this.center, x, y, z); } @Override - protected void distanceBounds(final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { + protected void distanceBounds( + final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { // TBD: Compute actual bounds based on distance getBounds(bounds); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return distanceStyle.computeDistance(planetModel, circlePlane, x, y, z); } @@ -157,7 +182,8 @@ class GeoStandardCircle extends GeoBaseCircle { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { if (circlePlane == null) { return false; } @@ -175,7 +201,7 @@ class GeoStandardCircle extends GeoBaseCircle { @Override public int getRelationship(GeoShape geoShape) { if (circlePlane == null) { - //same as GeoWorld + // same as GeoWorld if (geoShape.getEdgePoints().length > 0) { return WITHIN; } @@ -197,8 +223,9 @@ class GeoStandardCircle extends GeoBaseCircle { @Override public boolean equals(Object o) { - if (!(o instanceof GeoStandardCircle)) + if (!(o instanceof GeoStandardCircle)) { return false; + } GeoStandardCircle other = (GeoStandardCircle) o; return super.equals(other) && other.center.equals(center) && other.cutoffAngle == cutoffAngle; } @@ -214,6 +241,14 @@ class GeoStandardCircle extends GeoBaseCircle { @Override public String toString() { - return "GeoStandardCircle: {planetmodel=" + planetModel+", center=" + center + ", radius=" + cutoffAngle + "(" + cutoffAngle * 180.0 / Math.PI + ")}"; + return "GeoStandardCircle: {planetmodel=" + + planetModel + + ", center=" + + center + + ", radius=" + + cutoffAngle + + "(" + + cutoffAngle * 180.0 / Math.PI + + ")}"; } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardPath.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardPath.java index bc01cd91e8f..b49588d23ac 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardPath.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardPath.java @@ -16,10 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; - import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -27,10 +26,9 @@ import java.util.List; import java.util.Map; /** - * GeoShape representing a path across the surface of the globe, - * with a specified half-width. Path is described by a series of points. - * Distances are measured from the starting point along the path, and then at right - * angles to the path. + * GeoShape representing a path across the surface of the globe, with a specified half-width. Path + * is described by a series of points. Distances are measured from the starting point along the + * path, and then at right angles to the path. * * @lucene.internal */ @@ -45,7 +43,7 @@ class GeoStandardPath extends GeoBasePath { /** The original list of path points */ protected final List points = new ArrayList(); - + /** A list of SegmentEndpoints */ protected List endPoints; /** A list of PathSegments */ @@ -56,48 +54,58 @@ class GeoStandardPath extends GeoBasePath { /** Set to true if path has been completely constructed */ protected boolean isDone = false; - - /** Constructor. - *@param planetModel is the planet model. - *@param maxCutoffAngle is the width of the path, measured as an angle. - *@param pathPoints are the points in the path. + + /** + * Constructor. + * + * @param planetModel is the planet model. + * @param maxCutoffAngle is the width of the path, measured as an angle. + * @param pathPoints are the points in the path. */ - public GeoStandardPath(final PlanetModel planetModel, final double maxCutoffAngle, final GeoPoint[] pathPoints) { + public GeoStandardPath( + final PlanetModel planetModel, final double maxCutoffAngle, final GeoPoint[] pathPoints) { this(planetModel, maxCutoffAngle); Collections.addAll(points, pathPoints); done(); } - /** Piece-wise constructor. Use in conjunction with addPoint() and done(). - *@param planetModel is the planet model. - *@param maxCutoffAngle is the width of the path, measured as an angle. + /** + * Piece-wise constructor. Use in conjunction with addPoint() and done(). + * + * @param planetModel is the planet model. + * @param maxCutoffAngle is the width of the path, measured as an angle. */ public GeoStandardPath(final PlanetModel planetModel, final double maxCutoffAngle) { super(planetModel); - if (maxCutoffAngle <= 0.0 || maxCutoffAngle > Math.PI * 0.5) + if (maxCutoffAngle <= 0.0 || maxCutoffAngle > Math.PI * 0.5) { throw new IllegalArgumentException("Cutoff angle out of bounds"); + } this.cutoffAngle = maxCutoffAngle; this.cosAngle = Math.cos(maxCutoffAngle); this.sinAngle = Math.sin(maxCutoffAngle); } - /** Add a point to the path. - *@param lat is the latitude of the point. - *@param lon is the longitude of the point. + /** + * Add a point to the path. + * + * @param lat is the latitude of the point. + * @param lon is the longitude of the point. */ public void addPoint(final double lat, final double lon) { - if (isDone) + if (isDone) { throw new IllegalStateException("Can't call addPoint() if done() already called"); + } points.add(new GeoPoint(planetModel, lat, lon)); } - - /** Complete the path. - */ + + /** Complete the path. */ public void done() { - if (isDone) + if (isDone) { throw new IllegalStateException("Can't call done() twice"); - if (points.size() == 0) + } + if (points.size() == 0) { throw new IllegalArgumentException("Path must have at least one point"); + } isDone = true; endPoints = new ArrayList<>(points.size()); @@ -105,7 +113,7 @@ class GeoStandardPath extends GeoBasePath { // Compute an offset to use for all segments. This will be based on the minimum magnitude of // the entire ellipsoid. final double cutoffOffset = this.sinAngle * planetModel.getMinimumMagnitude(); - + // First, build all segments. We'll then go back and build corresponding segment endpoints. GeoPoint lastPoint = null; for (final GeoPoint end : points) { @@ -114,11 +122,12 @@ class GeoStandardPath extends GeoBasePath { if (normalizedConnectingPlane == null) { continue; } - segments.add(new PathSegment(planetModel, lastPoint, end, normalizedConnectingPlane, cutoffOffset)); + segments.add( + new PathSegment(planetModel, lastPoint, end, normalizedConnectingPlane, cutoffOffset)); } lastPoint = end; } - + if (segments.size() == 0) { // Simple circle double lat = points.get(0).getLatitude(); @@ -129,77 +138,102 @@ class GeoStandardPath extends GeoBasePath { double upperLon = lon; if (upperLat > Math.PI * 0.5) { upperLon += Math.PI; - if (upperLon > Math.PI) + if (upperLon > Math.PI) { upperLon -= 2.0 * Math.PI; + } upperLat = Math.PI - upperLat; } double lowerLat = lat - cutoffAngle; double lowerLon = lon; if (lowerLat < -Math.PI * 0.5) { lowerLon += Math.PI; - if (lowerLon > Math.PI) + if (lowerLon > Math.PI) { lowerLon -= 2.0 * Math.PI; + } lowerLat = -Math.PI - lowerLat; } final GeoPoint upperPoint = new GeoPoint(planetModel, upperLat, upperLon); final GeoPoint lowerPoint = new GeoPoint(planetModel, lowerLat, lowerLon); final GeoPoint point = points.get(0); - + // Construct normal plane final Plane normalPlane = Plane.constructNormalizedZPlane(upperPoint, lowerPoint, point); - final CircleSegmentEndpoint onlyEndpoint = new CircleSegmentEndpoint(point, normalPlane, upperPoint, lowerPoint); + final CircleSegmentEndpoint onlyEndpoint = + new CircleSegmentEndpoint(point, normalPlane, upperPoint, lowerPoint); endPoints.add(onlyEndpoint); - this.edgePoints = new GeoPoint[]{onlyEndpoint.circlePlane.getSampleIntersectionPoint(planetModel, normalPlane)}; + this.edgePoints = + new GeoPoint[] { + onlyEndpoint.circlePlane.getSampleIntersectionPoint(planetModel, normalPlane) + }; return; } - + // Create segment endpoints. Use an appropriate constructor for the start and end of the path. for (int i = 0; i < segments.size(); i++) { final PathSegment currentSegment = segments.get(i); - + if (i == 0) { // Starting endpoint - final SegmentEndpoint startEndpoint = new CutoffSingleCircleSegmentEndpoint(currentSegment.start, - currentSegment.startCutoffPlane, currentSegment.ULHC, currentSegment.LLHC); + final SegmentEndpoint startEndpoint = + new CutoffSingleCircleSegmentEndpoint( + currentSegment.start, + currentSegment.startCutoffPlane, + currentSegment.ULHC, + currentSegment.LLHC); endPoints.add(startEndpoint); - this.edgePoints = new GeoPoint[]{currentSegment.ULHC}; + this.edgePoints = new GeoPoint[] {currentSegment.ULHC}; continue; } - + // General intersection case - final PathSegment prevSegment = segments.get(i-1); - if (prevSegment.endCutoffPlane.isWithin(currentSegment.ULHC) && prevSegment.endCutoffPlane.isWithin(currentSegment.LLHC) && - currentSegment.startCutoffPlane.isWithin(prevSegment.URHC) && currentSegment.startCutoffPlane.isWithin(prevSegment.LRHC)) { + final PathSegment prevSegment = segments.get(i - 1); + if (prevSegment.endCutoffPlane.isWithin(currentSegment.ULHC) + && prevSegment.endCutoffPlane.isWithin(currentSegment.LLHC) + && currentSegment.startCutoffPlane.isWithin(prevSegment.URHC) + && currentSegment.startCutoffPlane.isWithin(prevSegment.LRHC)) { // The planes are identical. We wouldn't need a circle at all except for the possibility of // backing up, which is hard to detect here. - final SegmentEndpoint midEndpoint = new CutoffSingleCircleSegmentEndpoint(currentSegment.start, - prevSegment.endCutoffPlane, currentSegment.startCutoffPlane, currentSegment.ULHC, currentSegment.LLHC); - //don't need a circle at all. Special constructor... + final SegmentEndpoint midEndpoint = + new CutoffSingleCircleSegmentEndpoint( + currentSegment.start, + prevSegment.endCutoffPlane, + currentSegment.startCutoffPlane, + currentSegment.ULHC, + currentSegment.LLHC); + // don't need a circle at all. Special constructor... endPoints.add(midEndpoint); } else { - endPoints.add(new CutoffDualCircleSegmentEndpoint(currentSegment.start, - prevSegment.endCutoffPlane, currentSegment.startCutoffPlane, - prevSegment.URHC, prevSegment.LRHC, - currentSegment.ULHC, currentSegment.LLHC)); + endPoints.add( + new CutoffDualCircleSegmentEndpoint( + currentSegment.start, + prevSegment.endCutoffPlane, + currentSegment.startCutoffPlane, + prevSegment.URHC, + prevSegment.LRHC, + currentSegment.ULHC, + currentSegment.LLHC)); } } // Do final endpoint - final PathSegment lastSegment = segments.get(segments.size()-1); - endPoints.add(new CutoffSingleCircleSegmentEndpoint(lastSegment.end, - lastSegment.endCutoffPlane, lastSegment.URHC, lastSegment.LRHC)); - + final PathSegment lastSegment = segments.get(segments.size() - 1); + endPoints.add( + new CutoffSingleCircleSegmentEndpoint( + lastSegment.end, lastSegment.endCutoffPlane, lastSegment.URHC, lastSegment.LRHC)); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoStandardPath(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readPointArray(planetModel, inputStream)); + public GeoStandardPath(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readPointArray(planetModel, inputStream)); } @Override @@ -209,12 +243,14 @@ class GeoStandardPath extends GeoBasePath { } @Override - public double computePathCenterDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double computePathCenterDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { // Walk along path and keep track of the closest distance we find double closestDistance = Double.POSITIVE_INFINITY; // Segments first for (PathSegment segment : segments) { - final double segmentDistance = segment.pathCenterDistance(planetModel, distanceStyle, x, y, z); + final double segmentDistance = + segment.pathCenterDistance(planetModel, distanceStyle, x, y, z); if (segmentDistance < closestDistance) { closestDistance = segmentDistance; } @@ -230,12 +266,13 @@ class GeoStandardPath extends GeoBasePath { } @Override - public double computeNearestDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double computeNearestDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { double currentDistance = 0.0; double minPathCenterDistance = Double.POSITIVE_INFINITY; double bestDistance = Double.POSITIVE_INFINITY; int segmentIndex = 0; - + for (final SegmentEndpoint endpoint : endPoints) { final double endpointPathCenterDistance = endpoint.pathCenterDistance(distanceStyle, x, y, z); if (endpointPathCenterDistance < minPathCenterDistance) { @@ -246,35 +283,46 @@ class GeoStandardPath extends GeoBasePath { // Look at the following segment, if any if (segmentIndex < segments.size()) { final PathSegment segment = segments.get(segmentIndex++); - final double segmentPathCenterDistance = segment.pathCenterDistance(planetModel, distanceStyle, x, y, z); + final double segmentPathCenterDistance = + segment.pathCenterDistance(planetModel, distanceStyle, x, y, z); if (segmentPathCenterDistance < minPathCenterDistance) { minPathCenterDistance = segmentPathCenterDistance; - bestDistance = distanceStyle.aggregateDistances(currentDistance, segment.nearestPathDistance(planetModel, distanceStyle, x, y, z)); + bestDistance = + distanceStyle.aggregateDistances( + currentDistance, + segment.nearestPathDistance(planetModel, distanceStyle, x, y, z)); } - currentDistance = distanceStyle.aggregateDistances(currentDistance, segment.fullPathDistance(distanceStyle)); + currentDistance = + distanceStyle.aggregateDistances( + currentDistance, segment.fullPathDistance(distanceStyle)); } } return bestDistance; } @Override - protected double distance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double distance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { // Algorithm: // (1) If the point is within any of the segments along the path, return that value. // (2) If the point is within any of the segment end circles along the path, return that value. // The algorithm loops over the whole path to get the shortest distance double bestDistance = Double.POSITIVE_INFINITY; - + double currentDistance = 0.0; for (final PathSegment segment : segments) { - double distance = segment.pathDistance(planetModel, distanceStyle, x,y,z); + double distance = segment.pathDistance(planetModel, distanceStyle, x, y, z); if (distance != Double.POSITIVE_INFINITY) { - final double thisDistance = distanceStyle.fromAggregationForm(distanceStyle.aggregateDistances(currentDistance, distance)); + final double thisDistance = + distanceStyle.fromAggregationForm( + distanceStyle.aggregateDistances(currentDistance, distance)); if (thisDistance < bestDistance) { bestDistance = thisDistance; } } - currentDistance = distanceStyle.aggregateDistances(currentDistance, segment.fullPathDistance(distanceStyle)); + currentDistance = + distanceStyle.aggregateDistances( + currentDistance, segment.fullPathDistance(distanceStyle)); } int segmentIndex = 0; @@ -282,26 +330,31 @@ class GeoStandardPath extends GeoBasePath { for (final SegmentEndpoint endpoint : endPoints) { double distance = endpoint.pathDistance(distanceStyle, x, y, z); if (distance != Double.POSITIVE_INFINITY) { - final double thisDistance = distanceStyle.fromAggregationForm(distanceStyle.aggregateDistances(currentDistance, distance)); + final double thisDistance = + distanceStyle.fromAggregationForm( + distanceStyle.aggregateDistances(currentDistance, distance)); if (thisDistance < bestDistance) { bestDistance = thisDistance; } } if (segmentIndex < segments.size()) - currentDistance = distanceStyle.aggregateDistances(currentDistance, segments.get(segmentIndex++).fullPathDistance(distanceStyle)); + currentDistance = + distanceStyle.aggregateDistances( + currentDistance, segments.get(segmentIndex++).fullPathDistance(distanceStyle)); } return bestDistance; } @Override - protected double deltaDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double deltaDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { // Algorithm: // (1) If the point is within any of the segments along the path, return that value. // (2) If the point is within any of the segment end circles along the path, return that value. // Finds best distance double bestDistance = Double.POSITIVE_INFINITY; - + for (final PathSegment segment : segments) { final double distance = segment.pathDeltaDistance(planetModel, distanceStyle, x, y, z); if (distance != Double.POSITIVE_INFINITY) { @@ -324,25 +377,29 @@ class GeoStandardPath extends GeoBasePath { return bestDistance; } - + @Override - protected void distanceBounds(final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { + protected void distanceBounds( + final Bounds bounds, final DistanceStyle distanceStyle, final double distanceValue) { // TBD: Compute actual bounds based on distance getBounds(bounds); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { double minDistance = Double.POSITIVE_INFINITY; for (final SegmentEndpoint endpoint : endPoints) { - final double newDistance = endpoint.outsideDistance(distanceStyle, x,y,z); - if (newDistance < minDistance) + final double newDistance = endpoint.outsideDistance(distanceStyle, x, y, z); + if (newDistance < minDistance) { minDistance = newDistance; + } } for (final PathSegment segment : segments) { final double newDistance = segment.outsideDistance(planetModel, distanceStyle, x, y, z); - if (newDistance < minDistance) + if (newDistance < minDistance) { minDistance = newDistance; + } } return minDistance; } @@ -368,7 +425,8 @@ class GeoStandardPath extends GeoBasePath { } @Override - public boolean intersects(final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane plane, final GeoPoint[] notablePoints, final Membership... bounds) { // We look for an intersection with any of the exterior edges of the path. // We also have to look for intersections with the cones described by the endpoints. // Return "true" if any such intersections are found. @@ -378,7 +436,7 @@ class GeoStandardPath extends GeoBasePath { // any of the intersection points are within the bounds, then we've detected an intersection. // Well, sort of. We can detect intersections also due to overlap of segments with each other. // But that's an edge case and we won't be optimizing for it. - //System.err.println(" Looking for intersection of plane "+plane+" with path "+this); + // System.err.println(" Looking for intersection of plane " + plane + " with path " + this); for (final SegmentEndpoint pathPoint : endPoints) { if (pathPoint.intersects(planetModel, plane, notablePoints, bounds)) { return true; @@ -427,13 +485,16 @@ class GeoStandardPath extends GeoBasePath { @Override public boolean equals(Object o) { - if (!(o instanceof GeoStandardPath)) + if (!(o instanceof GeoStandardPath)) { return false; + } GeoStandardPath p = (GeoStandardPath) o; - if (!super.equals(p)) + if (!super.equals(p)) { return false; - if (cutoffAngle != p.cutoffAngle) + } + if (cutoffAngle != p.cutoffAngle) { return false; + } return points.equals(p.points); } @@ -448,114 +509,151 @@ class GeoStandardPath extends GeoBasePath { @Override public String toString() { - return "GeoStandardPath: {planetmodel=" + planetModel+", width=" + cutoffAngle + "(" + cutoffAngle * 180.0 / Math.PI + "), points={" + points + "}}"; + return "GeoStandardPath: {planetmodel=" + + planetModel + + ", width=" + + cutoffAngle + + "(" + + cutoffAngle * 180.0 / Math.PI + + "), points={" + + points + + "}}"; } /** - * Internal interface describing segment endpoint implementations. - * There are several different such implementations, each corresponding to a different geometric conformation. - * Note well: This is not necessarily a circle. There are four cases: - * (1) The path consists of a single endpoint. In this case, we build a simple circle with the proper cutoff offset. - * (2) This is the end of a path. The circle plane must be constructed to go through two supplied points and be perpendicular to a connecting plane. - * (2.5) Intersection, but the path on both sides is linear. We generate a circle, but we use the cutoff planes to limit its influence in the straight line case. - * (3) This is an intersection in a path. We are supplied FOUR planes. If there are intersections within bounds for both upper and lower, then - * we generate no circle at all. If there is one intersection only, then we generate a plane that includes that intersection, as well as the remaining - * cutoff plane/edge plane points. + * Internal interface describing segment endpoint implementations. There are several different + * such implementations, each corresponding to a different geometric conformation. Note well: This + * is not necessarily a circle. There are four cases: (1) The path consists of a single endpoint. + * In this case, we build a simple circle with the proper cutoff offset. (2) This is the end of a + * path. The circle plane must be constructed to go through two supplied points and be + * perpendicular to a connecting plane. (2.5) Intersection, but the path on both sides is linear. + * We generate a circle, but we use the cutoff planes to limit its influence in the straight line + * case. (3) This is an intersection in a path. We are supplied FOUR planes. If there are + * intersections within bounds for both upper and lower, then we generate no circle at all. If + * there is one intersection only, then we generate a plane that includes that intersection, as + * well as the remaining cutoff plane/edge plane points. */ private interface SegmentEndpoint { - - /** Check if point is within this endpoint. - *@param point is the point. - *@return true of within. + + /** + * Check if point is within this endpoint. + * + * @param point is the point. + * @return true of within. */ boolean isWithin(final Vector point); - /** Check if point is within this endpoint. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return true of within. + /** + * Check if point is within this endpoint. + * + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return true of within. */ boolean isWithin(final double x, final double y, final double z); - - /** Compute delta path distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, in aggregation form. - */ - double pathDeltaDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); - /** Compute interior path distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, in aggregation form. + /** + * Compute delta path distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, in aggregation form. */ - double pathDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); + double pathDeltaDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); - /** Compute nearest path distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric (always value zero), in aggregation form, or POSITIVE_INFINITY - * if the point is not within the bounds of the endpoint. + /** + * Compute interior path distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, in aggregation form. */ - double nearestPathDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); + double pathDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); - /** Compute path center distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, or POSITIVE_INFINITY - * if the point is not within the bounds of the endpoint. + /** + * Compute nearest path distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric (always value zero), in aggregation form, or POSITIVE_INFINITY if + * the point is not within the bounds of the endpoint. */ - double pathCenterDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); + double nearestPathDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); - /** Compute external distance. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric. + /** + * Compute path center distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, or POSITIVE_INFINITY if the point is not within the bounds of + * the endpoint. */ - double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z); + double pathCenterDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); - /** Determine if this endpoint intersects a specified plane. - *@param planetModel is the planet model. - *@param p is the plane. - *@param notablePoints are the points associated with the plane. - *@param bounds are any bounds which the intersection must lie within. - *@return true if there is a matching intersection. + /** + * Compute external distance. + * + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric. */ - boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds); + double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z); - /** Determine if this endpoint intersects a GeoShape. - *@param geoShape is the GeoShape. - *@return true if there is shape intersect this endpoint. + /** + * Determine if this endpoint intersects a specified plane. + * + * @param planetModel is the planet model. + * @param p is the plane. + * @param notablePoints are the points associated with the plane. + * @param bounds are any bounds which the intersection must lie within. + * @return true if there is a matching intersection. + */ + boolean intersects( + final PlanetModel planetModel, + final Plane p, + final GeoPoint[] notablePoints, + final Membership[] bounds); + + /** + * Determine if this endpoint intersects a GeoShape. + * + * @param geoShape is the GeoShape. + * @return true if there is shape intersect this endpoint. */ boolean intersects(final GeoShape geoShape); - /** Get the bounds for a segment endpoint. - *@param planetModel is the planet model. - *@param bounds are the bounds to be modified. + /** + * Get the bounds for a segment endpoint. + * + * @param planetModel is the planet model. + * @param bounds are the bounds to be modified. */ void getBounds(final PlanetModel planetModel, Bounds bounds); } - /** - * Base implementation of SegmentEndpoint - */ + /** Base implementation of SegmentEndpoint */ private static class BaseSegmentEndpoint implements SegmentEndpoint { /** The center point of the endpoint */ protected final GeoPoint point; /** Null membership */ - protected final static Membership[] NO_MEMBERSHIP = new Membership[0]; - + protected static final Membership[] NO_MEMBERSHIP = new Membership[0]; + public BaseSegmentEndpoint(final GeoPoint point) { this.point = point; } @@ -571,37 +669,49 @@ class GeoStandardPath extends GeoBasePath { } @Override - public double pathDeltaDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (!isWithin(x,y,z)) + public double pathDeltaDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + if (!isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; - final double theDistance = distanceStyle.toAggregationForm(distanceStyle.computeDistance(this.point, x, y, z)); + } + final double theDistance = + distanceStyle.toAggregationForm(distanceStyle.computeDistance(this.point, x, y, z)); return distanceStyle.aggregateDistances(theDistance, theDistance); } @Override - public double pathDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (!isWithin(x,y,z)) + public double pathDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + if (!isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; + } return distanceStyle.toAggregationForm(distanceStyle.computeDistance(this.point, x, y, z)); } @Override - public double nearestPathDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double nearestPathDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return distanceStyle.toAggregationForm(0.0); } @Override - public double pathCenterDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double pathCenterDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return distanceStyle.computeDistance(this.point, x, y, z); } @Override - public double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return distanceStyle.computeDistance(this.point, x, y, z); } @Override - public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) { + public boolean intersects( + final PlanetModel planetModel, + final Plane p, + final GeoPoint[] notablePoints, + final Membership[] bounds) { return false; } @@ -617,65 +727,70 @@ class GeoStandardPath extends GeoBasePath { @Override public boolean equals(final Object o) { - if (!(o instanceof BaseSegmentEndpoint)) + if (!(o instanceof BaseSegmentEndpoint)) { return false; + } final BaseSegmentEndpoint other = (BaseSegmentEndpoint) o; return point.equals(other.point); } - + @Override public int hashCode() { return point.hashCode(); } - + @Override public String toString() { return point.toString(); } - } - - /** - * Simplest possible implementation of segment endpoint: a single point. - */ + + /** Simplest possible implementation of segment endpoint: a single point. */ private static class DegenerateSegmentEndpoint extends BaseSegmentEndpoint { public DegenerateSegmentEndpoint(final GeoPoint point) { super(point); } - } - - /** - * Endpoint that's a simple circle. - */ + + /** Endpoint that's a simple circle. */ private static class CircleSegmentEndpoint extends BaseSegmentEndpoint { /** A plane describing the circle */ protected final SidedPlane circlePlane; /** No notable points from the circle itself */ - protected final static GeoPoint[] circlePoints = new GeoPoint[0]; + protected static final GeoPoint[] circlePoints = new GeoPoint[0]; - /** Constructor for case (1). - * Generate a simple circle cutoff plane. - *@param point is the center point. - *@param upperPoint is a point that must be on the circle plane. - *@param lowerPoint is another point that must be on the circle plane. + /** + * Constructor for case (1). Generate a simple circle cutoff plane. + * + * @param point is the center point. + * @param upperPoint is a point that must be on the circle plane. + * @param lowerPoint is another point that must be on the circle plane. */ - public CircleSegmentEndpoint(final GeoPoint point, final Plane normalPlane, final GeoPoint upperPoint, final GeoPoint lowerPoint) { + public CircleSegmentEndpoint( + final GeoPoint point, + final Plane normalPlane, + final GeoPoint upperPoint, + final GeoPoint lowerPoint) { super(point); - // Construct a sided plane that goes through the two points and whose normal is in the normalPlane. - this.circlePlane = SidedPlane.constructNormalizedPerpendicularSidedPlane(point, normalPlane, upperPoint, lowerPoint); + // Construct a sided plane that goes through the two points and whose normal is in the + // normalPlane. + this.circlePlane = + SidedPlane.constructNormalizedPerpendicularSidedPlane( + point, normalPlane, upperPoint, lowerPoint); } - /** Constructor for case (3). Called by superclass only. - *@param point is the center point. - *@param circlePlane is the circle plane. + /** + * Constructor for case (3). Called by superclass only. + * + * @param point is the center point. + * @param circlePlane is the circle plane. */ protected CircleSegmentEndpoint(final GeoPoint point, final SidedPlane circlePlane) { super(point); this.circlePlane = circlePlane; } - + @Override public boolean isWithin(final Vector point) { return circlePlane.isWithin(point); @@ -687,7 +802,11 @@ class GeoStandardPath extends GeoBasePath { } @Override - public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) { + public boolean intersects( + final PlanetModel planetModel, + final Plane p, + final GeoPoint[] notablePoints, + final Membership[] bounds) { return circlePlane.intersects(planetModel, p, notablePoints, circlePoints, bounds); } @@ -701,46 +820,58 @@ class GeoStandardPath extends GeoBasePath { super.getBounds(planetModel, bounds); bounds.addPlane(planetModel, circlePlane); } - } - - /** - * Endpoint that's a single circle with cutoff(s). - */ + + /** Endpoint that's a single circle with cutoff(s). */ private static class CutoffSingleCircleSegmentEndpoint extends CircleSegmentEndpoint { - + /** Pertinent cutoff plane from adjoining segments */ protected final Membership[] cutoffPlanes; /** Notable points for this segment endpoint */ private final GeoPoint[] notablePoints; - /** Constructor for case (2). - * Generate an endpoint, given a single cutoff plane plus upper and lower edge points. - *@param point is the center point. - *@param cutoffPlane is the plane from the adjoining path segment marking the boundary between this endpoint and that segment. - *@param topEdgePoint is a point on the cutoffPlane that should be also on the circle plane. - *@param bottomEdgePoint is another point on the cutoffPlane that should be also on the circle plane. + /** + * Constructor for case (2). Generate an endpoint, given a single cutoff plane plus upper and + * lower edge points. + * + * @param point is the center point. + * @param cutoffPlane is the plane from the adjoining path segment marking the boundary between + * this endpoint and that segment. + * @param topEdgePoint is a point on the cutoffPlane that should be also on the circle plane. + * @param bottomEdgePoint is another point on the cutoffPlane that should be also on the circle + * plane. */ - public CutoffSingleCircleSegmentEndpoint(final GeoPoint point, - final SidedPlane cutoffPlane, final GeoPoint topEdgePoint, final GeoPoint bottomEdgePoint) { + public CutoffSingleCircleSegmentEndpoint( + final GeoPoint point, + final SidedPlane cutoffPlane, + final GeoPoint topEdgePoint, + final GeoPoint bottomEdgePoint) { super(point, cutoffPlane, topEdgePoint, bottomEdgePoint); - this.cutoffPlanes = new Membership[]{new SidedPlane(cutoffPlane)}; - this.notablePoints = new GeoPoint[]{topEdgePoint, bottomEdgePoint}; + this.cutoffPlanes = new Membership[] {new SidedPlane(cutoffPlane)}; + this.notablePoints = new GeoPoint[] {topEdgePoint, bottomEdgePoint}; } - /** Constructor for case (2.5). - * Generate an endpoint, given two cutoff planes plus upper and lower edge points. - *@param point is the center. - *@param cutoffPlane1 is one adjoining path segment cutoff plane. - *@param cutoffPlane2 is another adjoining path segment cutoff plane. - *@param topEdgePoint is a point on the cutoffPlane that should be also on the circle plane. - *@param bottomEdgePoint is another point on the cutoffPlane that should be also on the circle plane. + /** + * Constructor for case (2.5). Generate an endpoint, given two cutoff planes plus upper and + * lower edge points. + * + * @param point is the center. + * @param cutoffPlane1 is one adjoining path segment cutoff plane. + * @param cutoffPlane2 is another adjoining path segment cutoff plane. + * @param topEdgePoint is a point on the cutoffPlane that should be also on the circle plane. + * @param bottomEdgePoint is another point on the cutoffPlane that should be also on the circle + * plane. */ - public CutoffSingleCircleSegmentEndpoint(final GeoPoint point, - final SidedPlane cutoffPlane1, final SidedPlane cutoffPlane2, final GeoPoint topEdgePoint, final GeoPoint bottomEdgePoint) { + public CutoffSingleCircleSegmentEndpoint( + final GeoPoint point, + final SidedPlane cutoffPlane1, + final SidedPlane cutoffPlane2, + final GeoPoint topEdgePoint, + final GeoPoint bottomEdgePoint) { super(point, cutoffPlane1, topEdgePoint, bottomEdgePoint); - this.cutoffPlanes = new Membership[]{new SidedPlane(cutoffPlane1), new SidedPlane(cutoffPlane2)}; - this.notablePoints = new GeoPoint[]{topEdgePoint, bottomEdgePoint}; + this.cutoffPlanes = + new Membership[] {new SidedPlane(cutoffPlane1), new SidedPlane(cutoffPlane2)}; + this.notablePoints = new GeoPoint[] {topEdgePoint, bottomEdgePoint}; } @Override @@ -762,7 +893,7 @@ class GeoStandardPath extends GeoBasePath { return false; } for (final Membership m : cutoffPlanes) { - if (!m.isWithin(x,y,z)) { + if (!m.isWithin(x, y, z)) { return false; } } @@ -770,9 +901,10 @@ class GeoStandardPath extends GeoBasePath { } @Override - public double nearestPathDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double nearestPathDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { for (final Membership m : cutoffPlanes) { - if (!m.isWithin(x,y,z)) { + if (!m.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } } @@ -780,9 +912,10 @@ class GeoStandardPath extends GeoBasePath { } @Override - public double pathCenterDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double pathCenterDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { for (final Membership m : cutoffPlanes) { - if (!m.isWithin(x,y,z)) { + if (!m.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } } @@ -790,29 +923,32 @@ class GeoStandardPath extends GeoBasePath { } @Override - public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) { - return circlePlane.intersects(planetModel, p, notablePoints, this.notablePoints, bounds, this.cutoffPlanes); + public boolean intersects( + final PlanetModel planetModel, + final Plane p, + final GeoPoint[] notablePoints, + final Membership[] bounds) { + return circlePlane.intersects( + planetModel, p, notablePoints, this.notablePoints, bounds, this.cutoffPlanes); } @Override public boolean intersects(final GeoShape geoShape) { return geoShape.intersects(circlePlane, this.notablePoints, this.cutoffPlanes); } - } - + /** - * Endpoint that's a dual circle with cutoff(s). - * This SegmentEndpoint is used when we have two adjoining segments that are not colinear, and when we are on a non-spherical world. - * (1) We construct two circles. Each circle uses the two segment endpoints for one of the two segments, plus the one segment endpoint - * that is on the other side of the segment's cutoff plane. - * (2) isWithin() is computed using both circles, using just the portion that is within both segments' cutoff planes. If either matches, the point is included. - * (3) intersects() is computed using both circles, with similar cutoffs. - * (4) bounds() uses both circles too. - * + * Endpoint that's a dual circle with cutoff(s). This SegmentEndpoint is used when we have two + * adjoining segments that are not colinear, and when we are on a non-spherical world. (1) We + * construct two circles. Each circle uses the two segment endpoints for one of the two segments, + * plus the one segment endpoint that is on the other side of the segment's cutoff plane. (2) + * isWithin() is computed using both circles, using just the portion that is within both segments' + * cutoff planes. If either matches, the point is included. (3) intersects() is computed using + * both circles, with similar cutoffs. (4) bounds() uses both circles too. */ private static class CutoffDualCircleSegmentEndpoint extends BaseSegmentEndpoint { - + /** First circle */ protected final SidedPlane circlePlane1; /** Second circle */ @@ -823,34 +959,51 @@ class GeoStandardPath extends GeoBasePath { protected final GeoPoint[] notablePoints2; /** Both cutoff planes are included here */ protected final Membership[] cutoffPlanes; - - public CutoffDualCircleSegmentEndpoint(final GeoPoint point, - final SidedPlane prevCutoffPlane, final SidedPlane nextCutoffPlane, - final GeoPoint prevURHC, final GeoPoint prevLRHC, - final GeoPoint currentULHC, final GeoPoint currentLLHC) { + + public CutoffDualCircleSegmentEndpoint( + final GeoPoint point, + final SidedPlane prevCutoffPlane, + final SidedPlane nextCutoffPlane, + final GeoPoint prevURHC, + final GeoPoint prevLRHC, + final GeoPoint currentULHC, + final GeoPoint currentLLHC) { // Initialize superclass super(point); - // First plane consists of prev endpoints plus one of the current endpoints (the one past the end of the prev segment) + // First plane consists of prev endpoints plus one of the current endpoints (the one past the + // end of the prev segment) if (!prevCutoffPlane.isWithin(currentULHC)) { - circlePlane1 = SidedPlane.constructNormalizedThreePointSidedPlane(point, prevURHC, prevLRHC, currentULHC); - notablePoints1 = new GeoPoint[]{prevURHC, prevLRHC, currentULHC}; + circlePlane1 = + SidedPlane.constructNormalizedThreePointSidedPlane( + point, prevURHC, prevLRHC, currentULHC); + notablePoints1 = new GeoPoint[] {prevURHC, prevLRHC, currentULHC}; } else if (!prevCutoffPlane.isWithin(currentLLHC)) { - circlePlane1 = SidedPlane.constructNormalizedThreePointSidedPlane(point, prevURHC, prevLRHC, currentLLHC); - notablePoints1 = new GeoPoint[]{prevURHC, prevLRHC, currentLLHC}; + circlePlane1 = + SidedPlane.constructNormalizedThreePointSidedPlane( + point, prevURHC, prevLRHC, currentLLHC); + notablePoints1 = new GeoPoint[] {prevURHC, prevLRHC, currentLLHC}; } else { - throw new IllegalArgumentException("Constructing CutoffDualCircleSegmentEndpoint with colinear segments"); + throw new IllegalArgumentException( + "Constructing CutoffDualCircleSegmentEndpoint with colinear segments"); } - // Second plane consists of current endpoints plus one of the prev endpoints (the one past the end of the current segment) + // Second plane consists of current endpoints plus one of the prev endpoints (the one past the + // end of the current segment) if (!nextCutoffPlane.isWithin(prevURHC)) { - circlePlane2 = SidedPlane.constructNormalizedThreePointSidedPlane(point, currentULHC, currentLLHC, prevURHC); - notablePoints2 = new GeoPoint[]{currentULHC, currentLLHC, prevURHC}; + circlePlane2 = + SidedPlane.constructNormalizedThreePointSidedPlane( + point, currentULHC, currentLLHC, prevURHC); + notablePoints2 = new GeoPoint[] {currentULHC, currentLLHC, prevURHC}; } else if (!nextCutoffPlane.isWithin(prevLRHC)) { - circlePlane2 = SidedPlane.constructNormalizedThreePointSidedPlane(point, currentULHC, currentLLHC, prevLRHC); - notablePoints2 = new GeoPoint[]{currentULHC, currentLLHC, prevLRHC}; + circlePlane2 = + SidedPlane.constructNormalizedThreePointSidedPlane( + point, currentULHC, currentLLHC, prevLRHC); + notablePoints2 = new GeoPoint[] {currentULHC, currentLLHC, prevLRHC}; } else { - throw new IllegalArgumentException("Constructing CutoffDualCircleSegmentEndpoint with colinear segments"); - } - this.cutoffPlanes = new Membership[]{new SidedPlane(prevCutoffPlane), new SidedPlane(nextCutoffPlane)}; + throw new IllegalArgumentException( + "Constructing CutoffDualCircleSegmentEndpoint with colinear segments"); + } + this.cutoffPlanes = + new Membership[] {new SidedPlane(prevCutoffPlane), new SidedPlane(nextCutoffPlane)}; } @Override @@ -866,7 +1019,7 @@ class GeoStandardPath extends GeoBasePath { @Override public boolean isWithin(final double x, final double y, final double z) { for (final Membership m : cutoffPlanes) { - if (!m.isWithin(x,y,z)) { + if (!m.isWithin(x, y, z)) { return false; } } @@ -874,9 +1027,10 @@ class GeoStandardPath extends GeoBasePath { } @Override - public double nearestPathDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double nearestPathDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { for (final Membership m : cutoffPlanes) { - if (!m.isWithin(x,y,z)) { + if (!m.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } } @@ -884,9 +1038,10 @@ class GeoStandardPath extends GeoBasePath { } @Override - public double pathCenterDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double pathCenterDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { for (final Membership m : cutoffPlanes) { - if (!m.isWithin(x,y,z)) { + if (!m.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } } @@ -894,15 +1049,21 @@ class GeoStandardPath extends GeoBasePath { } @Override - public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) { - return circlePlane1.intersects(planetModel, p, notablePoints, this.notablePoints1, bounds, this.cutoffPlanes) || - circlePlane2.intersects(planetModel, p, notablePoints, this.notablePoints2, bounds, this.cutoffPlanes); + public boolean intersects( + final PlanetModel planetModel, + final Plane p, + final GeoPoint[] notablePoints, + final Membership[] bounds) { + return circlePlane1.intersects( + planetModel, p, notablePoints, this.notablePoints1, bounds, this.cutoffPlanes) + || circlePlane2.intersects( + planetModel, p, notablePoints, this.notablePoints2, bounds, this.cutoffPlanes); } @Override public boolean intersects(final GeoShape geoShape) { - return geoShape.intersects(circlePlane1, this.notablePoints1, this.cutoffPlanes) || - geoShape.intersects(circlePlane2, this.notablePoints2, this.cutoffPlanes); + return geoShape.intersects(circlePlane1, this.notablePoints1, this.cutoffPlanes) + || geoShape.intersects(circlePlane2, this.notablePoints2, this.cutoffPlanes); } @Override @@ -911,19 +1072,16 @@ class GeoStandardPath extends GeoBasePath { bounds.addPlane(planetModel, circlePlane1); bounds.addPlane(planetModel, circlePlane2); } - } - - /** - * This is the pre-calculated data for a path segment. - */ + + /** This is the pre-calculated data for a path segment. */ private static class PathSegment { /** Starting point of the segment */ public final GeoPoint start; /** End point of the segment */ public final GeoPoint end; /** Place to keep any complete segment distances we've calculated so far */ - public final Map fullDistanceCache = new HashMap(); + public final Map fullDistanceCache = new HashMap<>(); /** Normalized plane connecting the two points and going through world center */ public final Plane normalizedConnectingPlane; /** Cutoff plane parallel to connecting plane representing one side of the path segment */ @@ -951,359 +1109,574 @@ class GeoStandardPath extends GeoBasePath { /** Notable points for the end cutoff plane */ public final GeoPoint[] endCutoffPlanePoints; - /** Construct a path segment. - *@param planetModel is the planet model. - *@param start is the starting point. - *@param end is the ending point. - *@param normalizedConnectingPlane is the connecting plane. - *@param planeBoundingOffset is the linear offset from the connecting plane to either side. + /** + * Construct a path segment. + * + * @param planetModel is the planet model. + * @param start is the starting point. + * @param end is the ending point. + * @param normalizedConnectingPlane is the connecting plane. + * @param planeBoundingOffset is the linear offset from the connecting plane to either side. */ - public PathSegment(final PlanetModel planetModel, final GeoPoint start, final GeoPoint end, - final Plane normalizedConnectingPlane, final double planeBoundingOffset) { + public PathSegment( + final PlanetModel planetModel, + final GeoPoint start, + final GeoPoint end, + final Plane normalizedConnectingPlane, + final double planeBoundingOffset) { this.start = start; this.end = end; this.normalizedConnectingPlane = normalizedConnectingPlane; - + // Either start or end should be on the correct side upperConnectingPlane = new SidedPlane(start, normalizedConnectingPlane, -planeBoundingOffset); lowerConnectingPlane = new SidedPlane(start, normalizedConnectingPlane, planeBoundingOffset); // Cutoff planes use opposite endpoints as correct side examples startCutoffPlane = new SidedPlane(end, normalizedConnectingPlane, start); endCutoffPlane = new SidedPlane(start, normalizedConnectingPlane, end); - final Membership[] upperSide = new Membership[]{upperConnectingPlane}; - final Membership[] lowerSide = new Membership[]{lowerConnectingPlane}; - final Membership[] startSide = new Membership[]{startCutoffPlane}; - final Membership[] endSide = new Membership[]{endCutoffPlane}; + final Membership[] upperSide = new Membership[] {upperConnectingPlane}; + final Membership[] lowerSide = new Membership[] {lowerConnectingPlane}; + final Membership[] startSide = new Membership[] {startCutoffPlane}; + final Membership[] endSide = new Membership[] {endCutoffPlane}; GeoPoint[] points; - points = upperConnectingPlane.findIntersections(planetModel, startCutoffPlane, lowerSide, endSide); + points = + upperConnectingPlane.findIntersections(planetModel, startCutoffPlane, lowerSide, endSide); if (points.length == 0) { - throw new IllegalArgumentException("Some segment boundary points are off the ellipsoid; path too wide"); + throw new IllegalArgumentException( + "Some segment boundary points are off the ellipsoid; path too wide"); } if (points.length > 1) { throw new IllegalArgumentException("Ambiguous boundary points; path too short"); } this.ULHC = points[0]; - points = upperConnectingPlane.findIntersections(planetModel, endCutoffPlane, lowerSide, startSide); + points = + upperConnectingPlane.findIntersections(planetModel, endCutoffPlane, lowerSide, startSide); if (points.length == 0) { - throw new IllegalArgumentException("Some segment boundary points are off the ellipsoid; path too wide"); + throw new IllegalArgumentException( + "Some segment boundary points are off the ellipsoid; path too wide"); } if (points.length > 1) { throw new IllegalArgumentException("Ambiguous boundary points; path too short"); } this.URHC = points[0]; - points = lowerConnectingPlane.findIntersections(planetModel, startCutoffPlane, upperSide, endSide); + points = + lowerConnectingPlane.findIntersections(planetModel, startCutoffPlane, upperSide, endSide); if (points.length == 0) { - throw new IllegalArgumentException("Some segment boundary points are off the ellipsoid; path too wide"); + throw new IllegalArgumentException( + "Some segment boundary points are off the ellipsoid; path too wide"); } if (points.length > 1) { throw new IllegalArgumentException("Ambiguous boundary points; path too short"); } this.LLHC = points[0]; - points = lowerConnectingPlane.findIntersections(planetModel, endCutoffPlane, upperSide, startSide); + points = + lowerConnectingPlane.findIntersections(planetModel, endCutoffPlane, upperSide, startSide); if (points.length == 0) { - throw new IllegalArgumentException("Some segment boundary points are off the ellipsoid; path too wide"); + throw new IllegalArgumentException( + "Some segment boundary points are off the ellipsoid; path too wide"); } if (points.length > 1) { throw new IllegalArgumentException("Ambiguous boundary points; path too short"); } this.LRHC = points[0]; - upperConnectingPlanePoints = new GeoPoint[]{ULHC, URHC}; - lowerConnectingPlanePoints = new GeoPoint[]{LLHC, LRHC}; - startCutoffPlanePoints = new GeoPoint[]{ULHC, LLHC}; - endCutoffPlanePoints = new GeoPoint[]{URHC, LRHC}; + upperConnectingPlanePoints = new GeoPoint[] {ULHC, URHC}; + lowerConnectingPlanePoints = new GeoPoint[] {LLHC, LRHC}; + startCutoffPlanePoints = new GeoPoint[] {ULHC, LLHC}; + endCutoffPlanePoints = new GeoPoint[] {URHC, LRHC}; } - /** Compute the full distance along this path segment. - *@param distanceStyle is the distance style. - *@return the distance metric, in aggregation form. + /** + * Compute the full distance along this path segment. + * + * @param distanceStyle is the distance style. + * @return the distance metric, in aggregation form. */ public double fullPathDistance(final DistanceStyle distanceStyle) { synchronized (fullDistanceCache) { Double dist = fullDistanceCache.get(distanceStyle); if (dist == null) { - dist = distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, end.x, end.y, end.z)); + dist = + distanceStyle.toAggregationForm( + distanceStyle.computeDistance(start, end.x, end.y, end.z)); fullDistanceCache.put(distanceStyle, dist); } return dist.doubleValue(); } } - - /** Check if point is within this segment. - *@param point is the point. - *@return true of within. + + /** + * Check if point is within this segment. + * + * @param point is the point. + * @return true of within. */ public boolean isWithin(final Vector point) { - return startCutoffPlane.isWithin(point) && - endCutoffPlane.isWithin(point) && - upperConnectingPlane.isWithin(point) && - lowerConnectingPlane.isWithin(point); + return startCutoffPlane.isWithin(point) + && endCutoffPlane.isWithin(point) + && upperConnectingPlane.isWithin(point) + && lowerConnectingPlane.isWithin(point); } - /** Check if point is within this segment. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return true of within. + /** + * Check if point is within this segment. + * + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return true of within. */ public boolean isWithin(final double x, final double y, final double z) { - return startCutoffPlane.isWithin(x, y, z) && - endCutoffPlane.isWithin(x, y, z) && - upperConnectingPlane.isWithin(x, y, z) && - lowerConnectingPlane.isWithin(x, y, z); + return startCutoffPlane.isWithin(x, y, z) + && endCutoffPlane.isWithin(x, y, z) + && upperConnectingPlane.isWithin(x, y, z) + && lowerConnectingPlane.isWithin(x, y, z); } - /** Compute path center distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, or Double.POSITIVE_INFINITY if outside this segment + /** + * Compute path center distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, or Double.POSITIVE_INFINITY if outside this segment */ - public double pathCenterDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { + public double pathCenterDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { // First, if this point is outside the endplanes of the segment, return POSITIVE_INFINITY. if (!startCutoffPlane.isWithin(x, y, z) || !endCutoffPlane.isWithin(x, y, z)) { return Double.POSITIVE_INFINITY; } - // (1) Compute normalizedPerpPlane. If degenerate, then there is no such plane, which means that the point given - // is insufficient to distinguish between a family of such planes. This can happen only if the point is one of the - // "poles", imagining the normalized plane to be the "equator". In that case, the distance returned should be zero. - // Want no allocations or expensive operations! so we do this the hard way - final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; - final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; - final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; - final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); - if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) - return distanceStyle.computeDistance(start, x, y, z); - final double normFactor = 1.0/magnitude; - final Plane normalizedPerpPlane = new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); - - final GeoPoint[] intersectionPoints = normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); - GeoPoint thePoint; - if (intersectionPoints.length == 0) - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - else if (intersectionPoints.length == 1) - thePoint = intersectionPoints[0]; - else { - if (startCutoffPlane.isWithin(intersectionPoints[0]) && endCutoffPlane.isWithin(intersectionPoints[0])) - thePoint = intersectionPoints[0]; - else if (startCutoffPlane.isWithin(intersectionPoints[1]) && endCutoffPlane.isWithin(intersectionPoints[1])) - thePoint = intersectionPoints[1]; - else - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - } - return distanceStyle.computeDistance(thePoint, x, y, z); - } - - /** Compute nearest path distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, in aggregation form, or Double.POSITIVE_INFINITY if outside this segment - */ - public double nearestPathDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { - // First, if this point is outside the endplanes of the segment, return POSITIVE_INFINITY. - if (!startCutoffPlane.isWithin(x, y, z) || !endCutoffPlane.isWithin(x, y, z)) { - return Double.POSITIVE_INFINITY; - } - // (1) Compute normalizedPerpPlane. If degenerate, then there is no such plane, which means that the point given - // is insufficient to distinguish between a family of such planes. This can happen only if the point is one of the - // "poles", imagining the normalized plane to be the "equator". In that case, the distance returned should be zero. - // Want no allocations or expensive operations! so we do this the hard way - final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; - final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; - final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; - final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); - if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) - return distanceStyle.toAggregationForm(0.0); - final double normFactor = 1.0/magnitude; - final Plane normalizedPerpPlane = new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); - - final GeoPoint[] intersectionPoints = normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); - GeoPoint thePoint; - if (intersectionPoints.length == 0) - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - else if (intersectionPoints.length == 1) - thePoint = intersectionPoints[0]; - else { - if (startCutoffPlane.isWithin(intersectionPoints[0]) && endCutoffPlane.isWithin(intersectionPoints[0])) - thePoint = intersectionPoints[0]; - else if (startCutoffPlane.isWithin(intersectionPoints[1]) && endCutoffPlane.isWithin(intersectionPoints[1])) - thePoint = intersectionPoints[1]; - else - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - } - return distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, thePoint.x, thePoint.y, thePoint.z)); - } - - /** Compute delta path distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, in aggregation form, or Double.POSITIVE_INFINITY if outside the segment. - */ - public double pathDeltaDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (!isWithin(x,y,z)) - return Double.POSITIVE_INFINITY; - // (1) Compute normalizedPerpPlane. If degenerate, then return point distance from start to point. + // (1) Compute normalizedPerpPlane. If degenerate, then there is no such plane, which means + // that the point given is insufficient to distinguish between a family of such planes. + // This can happen only if the point is one of the "poles", imagining the normalized plane + // to be the "equator". In that case, the distance returned should be zero. // Want no allocations or expensive operations! so we do this the hard way final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) { - final double theDistance = distanceStyle.computeDistance(start, x,y,z); - return distanceStyle.aggregateDistances(theDistance, theDistance); + return distanceStyle.computeDistance(start, x, y, z); } - final double normFactor = 1.0/magnitude; - final Plane normalizedPerpPlane = new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); - - // Old computation: too expensive, because it calculates the intersection point twice. - //return distanceStyle.computeDistance(planetModel, normalizedConnectingPlane, x, y, z, startCutoffPlane, endCutoffPlane) + - // distanceStyle.computeDistance(planetModel, normalizedPerpPlane, start.x, start.y, start.z, upperConnectingPlane, lowerConnectingPlane); + final double normFactor = 1.0 / magnitude; + final Plane normalizedPerpPlane = + new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); - final GeoPoint[] intersectionPoints = normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); + final GeoPoint[] intersectionPoints = + normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); GeoPoint thePoint; - if (intersectionPoints.length == 0) - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - else if (intersectionPoints.length == 1) + if (intersectionPoints.length == 0) { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } else if (intersectionPoints.length == 1) { thePoint = intersectionPoints[0]; - else { - if (startCutoffPlane.isWithin(intersectionPoints[0]) && endCutoffPlane.isWithin(intersectionPoints[0])) + } else { + if (startCutoffPlane.isWithin(intersectionPoints[0]) + && endCutoffPlane.isWithin(intersectionPoints[0])) { thePoint = intersectionPoints[0]; - else if (startCutoffPlane.isWithin(intersectionPoints[1]) && endCutoffPlane.isWithin(intersectionPoints[1])) + } else if (startCutoffPlane.isWithin(intersectionPoints[1]) + && endCutoffPlane.isWithin(intersectionPoints[1])) { thePoint = intersectionPoints[1]; - else - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); + } else { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } } - final double theDistance = distanceStyle.toAggregationForm(distanceStyle.computeDistance(thePoint, x, y, z)); - return distanceStyle.aggregateDistances(theDistance, theDistance); + return distanceStyle.computeDistance(thePoint, x, y, z); } - - /** Compute interior path distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric, in aggregation form. - */ - public double pathDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { - if (!isWithin(x,y,z)) - return Double.POSITIVE_INFINITY; - // (1) Compute normalizedPerpPlane. If degenerate, then return point distance from start to point. + /** + * Compute nearest path distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, in aggregation form, or Double.POSITIVE_INFINITY if outside this + * segment + */ + public double nearestPathDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { + // First, if this point is outside the endplanes of the segment, return POSITIVE_INFINITY. + if (!startCutoffPlane.isWithin(x, y, z) || !endCutoffPlane.isWithin(x, y, z)) { + return Double.POSITIVE_INFINITY; + } + // (1) Compute normalizedPerpPlane. If degenerate, then there is no such plane, which means + // that the point given is insufficient to distinguish between a family of such planes. + // This can happen only if the point is one of the "poles", imagining the normalized plane + // to be the "equator". In that case, the distance returned should be zero. // Want no allocations or expensive operations! so we do this the hard way final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); - if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) - return distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, x,y,z)); - final double normFactor = 1.0/magnitude; - final Plane normalizedPerpPlane = new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); - - // Old computation: too expensive, because it calculates the intersection point twice. - //return distanceStyle.computeDistance(planetModel, normalizedConnectingPlane, x, y, z, startCutoffPlane, endCutoffPlane) + - // distanceStyle.computeDistance(planetModel, normalizedPerpPlane, start.x, start.y, start.z, upperConnectingPlane, lowerConnectingPlane); - - final GeoPoint[] intersectionPoints = normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); - GeoPoint thePoint; - if (intersectionPoints.length == 0) - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); - else if (intersectionPoints.length == 1) - thePoint = intersectionPoints[0]; - else { - if (startCutoffPlane.isWithin(intersectionPoints[0]) && endCutoffPlane.isWithin(intersectionPoints[0])) - thePoint = intersectionPoints[0]; - else if (startCutoffPlane.isWithin(intersectionPoints[1]) && endCutoffPlane.isWithin(intersectionPoints[1])) - thePoint = intersectionPoints[1]; - else - throw new RuntimeException("Can't find world intersection for point x="+x+" y="+y+" z="+z); + if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) { + return distanceStyle.toAggregationForm(0.0); } - return distanceStyle.aggregateDistances(distanceStyle.toAggregationForm(distanceStyle.computeDistance(thePoint, x, y, z)), - distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, thePoint.x, thePoint.y, thePoint.z))); + final double normFactor = 1.0 / magnitude; + final Plane normalizedPerpPlane = + new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); + + final GeoPoint[] intersectionPoints = + normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); + GeoPoint thePoint; + if (intersectionPoints.length == 0) { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } else if (intersectionPoints.length == 1) { + thePoint = intersectionPoints[0]; + } else { + if (startCutoffPlane.isWithin(intersectionPoints[0]) + && endCutoffPlane.isWithin(intersectionPoints[0])) { + thePoint = intersectionPoints[0]; + } else if (startCutoffPlane.isWithin(intersectionPoints[1]) + && endCutoffPlane.isWithin(intersectionPoints[1])) { + thePoint = intersectionPoints[1]; + } else { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } + } + return distanceStyle.toAggregationForm( + distanceStyle.computeDistance(start, thePoint.x, thePoint.y, thePoint.z)); } - /** Compute external distance. - *@param planetModel is the planet model. - *@param distanceStyle is the distance style. - *@param x is the point x. - *@param y is the point y. - *@param z is the point z. - *@return the distance metric. + /** + * Compute delta path distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, in aggregation form, or Double.POSITIVE_INFINITY if outside the + * segment. */ - public double outsideDistance(final PlanetModel planetModel, final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double upperDistance = distanceStyle.computeDistance(planetModel, upperConnectingPlane, x,y,z, lowerConnectingPlane, startCutoffPlane, endCutoffPlane); - final double lowerDistance = distanceStyle.computeDistance(planetModel, lowerConnectingPlane, x,y,z, upperConnectingPlane, startCutoffPlane, endCutoffPlane); - final double startDistance = distanceStyle.computeDistance(planetModel, startCutoffPlane, x,y,z, endCutoffPlane, lowerConnectingPlane, upperConnectingPlane); - final double endDistance = distanceStyle.computeDistance(planetModel, endCutoffPlane, x,y,z, startCutoffPlane, lowerConnectingPlane, upperConnectingPlane); - final double ULHCDistance = distanceStyle.computeDistance(ULHC, x,y,z); - final double URHCDistance = distanceStyle.computeDistance(URHC, x,y,z); - final double LLHCDistance = distanceStyle.computeDistance(LLHC, x,y,z); - final double LRHCDistance = distanceStyle.computeDistance(LRHC, x,y,z); + public double pathDeltaDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { + if (!isWithin(x, y, z)) { + return Double.POSITIVE_INFINITY; + } + // (1) Compute normalizedPerpPlane. If degenerate, then return point distance from start to + // point. + // Want no allocations or expensive operations! so we do this the hard way + final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; + final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; + final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; + final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); + if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) { + final double theDistance = distanceStyle.computeDistance(start, x, y, z); + return distanceStyle.aggregateDistances(theDistance, theDistance); + } + final double normFactor = 1.0 / magnitude; + final Plane normalizedPerpPlane = + new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); + + // Old computation: too expensive, because it calculates the intersection point twice. + // return distanceStyle.computeDistance(planetModel, normalizedConnectingPlane, x, y, z, + // startCutoffPlane, endCutoffPlane) + + // distanceStyle.computeDistance(planetModel, normalizedPerpPlane, start.x, start.y, start.z, + // upperConnectingPlane, lowerConnectingPlane); + + final GeoPoint[] intersectionPoints = + normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); + GeoPoint thePoint; + if (intersectionPoints.length == 0) { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } else if (intersectionPoints.length == 1) { + thePoint = intersectionPoints[0]; + } else { + if (startCutoffPlane.isWithin(intersectionPoints[0]) + && endCutoffPlane.isWithin(intersectionPoints[0])) { + thePoint = intersectionPoints[0]; + } else if (startCutoffPlane.isWithin(intersectionPoints[1]) + && endCutoffPlane.isWithin(intersectionPoints[1])) { + thePoint = intersectionPoints[1]; + } else { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } + } + final double theDistance = + distanceStyle.toAggregationForm(distanceStyle.computeDistance(thePoint, x, y, z)); + return distanceStyle.aggregateDistances(theDistance, theDistance); + } + + /** + * Compute interior path distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric, in aggregation form. + */ + public double pathDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { + if (!isWithin(x, y, z)) { + return Double.POSITIVE_INFINITY; + } + + // (1) Compute normalizedPerpPlane. If degenerate, then return point distance from start to + // point. + // Want no allocations or expensive operations! so we do this the hard way + final double perpX = normalizedConnectingPlane.y * z - normalizedConnectingPlane.z * y; + final double perpY = normalizedConnectingPlane.z * x - normalizedConnectingPlane.x * z; + final double perpZ = normalizedConnectingPlane.x * y - normalizedConnectingPlane.y * x; + final double magnitude = Math.sqrt(perpX * perpX + perpY * perpY + perpZ * perpZ); + if (Math.abs(magnitude) < Vector.MINIMUM_RESOLUTION) { + return distanceStyle.toAggregationForm(distanceStyle.computeDistance(start, x, y, z)); + } + final double normFactor = 1.0 / magnitude; + final Plane normalizedPerpPlane = + new Plane(perpX * normFactor, perpY * normFactor, perpZ * normFactor, 0.0); + + // Old computation: too expensive, because it calculates the intersection point twice. + // return distanceStyle.computeDistance(planetModel, normalizedConnectingPlane, x, y, z, + // startCutoffPlane, endCutoffPlane) + + // distanceStyle.computeDistance(planetModel, normalizedPerpPlane, start.x, start.y, start.z, + // upperConnectingPlane, lowerConnectingPlane); + + final GeoPoint[] intersectionPoints = + normalizedConnectingPlane.findIntersections(planetModel, normalizedPerpPlane); + GeoPoint thePoint; + if (intersectionPoints.length == 0) { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } else if (intersectionPoints.length == 1) { + thePoint = intersectionPoints[0]; + } else { + if (startCutoffPlane.isWithin(intersectionPoints[0]) + && endCutoffPlane.isWithin(intersectionPoints[0])) { + thePoint = intersectionPoints[0]; + } else if (startCutoffPlane.isWithin(intersectionPoints[1]) + && endCutoffPlane.isWithin(intersectionPoints[1])) { + thePoint = intersectionPoints[1]; + } else { + throw new RuntimeException( + "Can't find world intersection for point x=" + x + " y=" + y + " z=" + z); + } + } + return distanceStyle.aggregateDistances( + distanceStyle.toAggregationForm(distanceStyle.computeDistance(thePoint, x, y, z)), + distanceStyle.toAggregationForm( + distanceStyle.computeDistance(start, thePoint.x, thePoint.y, thePoint.z))); + } + + /** + * Compute external distance. + * + * @param planetModel is the planet model. + * @param distanceStyle is the distance style. + * @param x is the point x. + * @param y is the point y. + * @param z is the point z. + * @return the distance metric. + */ + public double outsideDistance( + final PlanetModel planetModel, + final DistanceStyle distanceStyle, + final double x, + final double y, + final double z) { + final double upperDistance = + distanceStyle.computeDistance( + planetModel, + upperConnectingPlane, + x, + y, + z, + lowerConnectingPlane, + startCutoffPlane, + endCutoffPlane); + final double lowerDistance = + distanceStyle.computeDistance( + planetModel, + lowerConnectingPlane, + x, + y, + z, + upperConnectingPlane, + startCutoffPlane, + endCutoffPlane); + final double startDistance = + distanceStyle.computeDistance( + planetModel, + startCutoffPlane, + x, + y, + z, + endCutoffPlane, + lowerConnectingPlane, + upperConnectingPlane); + final double endDistance = + distanceStyle.computeDistance( + planetModel, + endCutoffPlane, + x, + y, + z, + startCutoffPlane, + lowerConnectingPlane, + upperConnectingPlane); + final double ULHCDistance = distanceStyle.computeDistance(ULHC, x, y, z); + final double URHCDistance = distanceStyle.computeDistance(URHC, x, y, z); + final double LLHCDistance = distanceStyle.computeDistance(LLHC, x, y, z); + final double LRHCDistance = distanceStyle.computeDistance(LRHC, x, y, z); return Math.min( - Math.min( - Math.min(upperDistance,lowerDistance), - Math.min(startDistance,endDistance)), - Math.min( - Math.min(ULHCDistance, URHCDistance), - Math.min(LLHCDistance, LRHCDistance))); + Math.min(Math.min(upperDistance, lowerDistance), Math.min(startDistance, endDistance)), + Math.min(Math.min(ULHCDistance, URHCDistance), Math.min(LLHCDistance, LRHCDistance))); } - /** Determine if this endpoint intersects a specified plane. - *@param planetModel is the planet model. - *@param p is the plane. - *@param notablePoints are the points associated with the plane. - *@param bounds are any bounds which the intersection must lie within. - *@return true if there is a matching intersection. + /** + * Determine if this endpoint intersects a specified plane. + * + * @param planetModel is the planet model. + * @param p is the plane. + * @param notablePoints are the points associated with the plane. + * @param bounds are any bounds which the intersection must lie within. + * @return true if there is a matching intersection. */ - public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) { - return upperConnectingPlane.intersects(planetModel, p, notablePoints, upperConnectingPlanePoints, bounds, lowerConnectingPlane, startCutoffPlane, endCutoffPlane) || - lowerConnectingPlane.intersects(planetModel, p, notablePoints, lowerConnectingPlanePoints, bounds, upperConnectingPlane, startCutoffPlane, endCutoffPlane); - /* || - // These two are necessary because our segment endpoints are not necessarily good fits to their adjoining segments. The checks should really be part of the segment endpoint, however - startCutoffPlane.intersects(planetModel, p, notablePoints, startCutoffPlanePoints, bounds, endCutoffPlane, upperConnectingPlane, lowerConnectingPlane) || - endCutoffPlane.intersects(planetModel, p, notablePoints, endCutoffPlanePoints, bounds, startCutoffPlane, upperConnectingPlane, lowerConnectingPlane); - */ + public boolean intersects( + final PlanetModel planetModel, + final Plane p, + final GeoPoint[] notablePoints, + final Membership[] bounds) { + return upperConnectingPlane.intersects( + planetModel, + p, + notablePoints, + upperConnectingPlanePoints, + bounds, + lowerConnectingPlane, + startCutoffPlane, + endCutoffPlane) + || lowerConnectingPlane.intersects( + planetModel, + p, + notablePoints, + lowerConnectingPlanePoints, + bounds, + upperConnectingPlane, + startCutoffPlane, + endCutoffPlane); + /* || + // These two are necessary because our segment endpoints are not necessarily good fits to their adjoining segments. The checks should really be part of the segment endpoint, however + startCutoffPlane.intersects(planetModel, p, notablePoints, startCutoffPlanePoints, bounds, endCutoffPlane, upperConnectingPlane, lowerConnectingPlane) || + endCutoffPlane.intersects(planetModel, p, notablePoints, endCutoffPlanePoints, bounds, startCutoffPlane, upperConnectingPlane, lowerConnectingPlane); + */ } - /** Determine if this endpoint intersects a specified GeoShape. - *@param geoShape is the GeoShape. - *@return true if there GeoShape intersects this endpoint. + /** + * Determine if this endpoint intersects a specified GeoShape. + * + * @param geoShape is the GeoShape. + * @return true if there GeoShape intersects this endpoint. */ public boolean intersects(final GeoShape geoShape) { - return geoShape.intersects(upperConnectingPlane, upperConnectingPlanePoints, lowerConnectingPlane, startCutoffPlane, endCutoffPlane) || - geoShape.intersects(lowerConnectingPlane, lowerConnectingPlanePoints, upperConnectingPlane, startCutoffPlane, endCutoffPlane); - /*|| - // These two are necessary because our segment endpoints are not necessarily good fits to their adjoining segments. The checks should really be part of the segment endpoint, however - geoShape.intersects(startCutoffPlane, startCutoffPlanePoints, endCutoffPlane, upperConnectingPlane, lowerConnectingPlane) || - geoShape.intersects(endCutoffPlane, endCutoffPlanePoints, startCutoffPlane, upperConnectingPlane, lowerConnectingPlane); - */ + return geoShape.intersects( + upperConnectingPlane, + upperConnectingPlanePoints, + lowerConnectingPlane, + startCutoffPlane, + endCutoffPlane) + || geoShape.intersects( + lowerConnectingPlane, + lowerConnectingPlanePoints, + upperConnectingPlane, + startCutoffPlane, + endCutoffPlane); + /*|| + // These two are necessary because our segment endpoints are not necessarily good fits to their adjoining segments. The checks should really be part of the segment endpoint, however + geoShape.intersects(startCutoffPlane, startCutoffPlanePoints, endCutoffPlane, upperConnectingPlane, lowerConnectingPlane) || + geoShape.intersects(endCutoffPlane, endCutoffPlanePoints, startCutoffPlane, upperConnectingPlane, lowerConnectingPlane); + */ } - /** Get the bounds for a segment endpoint. - *@param planetModel is the planet model. - *@param bounds are the bounds to be modified. + /** + * Get the bounds for a segment endpoint. + * + * @param planetModel is the planet model. + * @param bounds are the bounds to be modified. */ public void getBounds(final PlanetModel planetModel, Bounds bounds) { // We need to do all bounding planes as well as corner points - bounds.addPoint(start).addPoint(end) - .addPoint(ULHC).addPoint(URHC).addPoint(LRHC).addPoint(LLHC) - .addPlane(planetModel, upperConnectingPlane, lowerConnectingPlane, startCutoffPlane, endCutoffPlane) - .addPlane(planetModel, lowerConnectingPlane, upperConnectingPlane, startCutoffPlane, endCutoffPlane) - .addPlane(planetModel, startCutoffPlane, endCutoffPlane, upperConnectingPlane, lowerConnectingPlane) - .addPlane(planetModel, endCutoffPlane, startCutoffPlane, upperConnectingPlane, lowerConnectingPlane) - .addIntersection(planetModel, upperConnectingPlane, startCutoffPlane, lowerConnectingPlane, endCutoffPlane) - .addIntersection(planetModel, startCutoffPlane, lowerConnectingPlane, endCutoffPlane, upperConnectingPlane) - .addIntersection(planetModel, lowerConnectingPlane, endCutoffPlane, upperConnectingPlane, startCutoffPlane) - .addIntersection(planetModel, endCutoffPlane, upperConnectingPlane, startCutoffPlane, lowerConnectingPlane); + bounds + .addPoint(start) + .addPoint(end) + .addPoint(ULHC) + .addPoint(URHC) + .addPoint(LRHC) + .addPoint(LLHC) + .addPlane( + planetModel, + upperConnectingPlane, + lowerConnectingPlane, + startCutoffPlane, + endCutoffPlane) + .addPlane( + planetModel, + lowerConnectingPlane, + upperConnectingPlane, + startCutoffPlane, + endCutoffPlane) + .addPlane( + planetModel, + startCutoffPlane, + endCutoffPlane, + upperConnectingPlane, + lowerConnectingPlane) + .addPlane( + planetModel, + endCutoffPlane, + startCutoffPlane, + upperConnectingPlane, + lowerConnectingPlane) + .addIntersection( + planetModel, + upperConnectingPlane, + startCutoffPlane, + lowerConnectingPlane, + endCutoffPlane) + .addIntersection( + planetModel, + startCutoffPlane, + lowerConnectingPlane, + endCutoffPlane, + upperConnectingPlane) + .addIntersection( + planetModel, + lowerConnectingPlane, + endCutoffPlane, + upperConnectingPlane, + startCutoffPlane) + .addIntersection( + planetModel, + endCutoffPlane, + upperConnectingPlane, + startCutoffPlane, + lowerConnectingPlane); } - } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideDegenerateHorizontalLine.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideDegenerateHorizontalLine.java index b22ce0f40a1..c1e8bd300c2 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideDegenerateHorizontalLine.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideDegenerateHorizontalLine.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * Degenerate bounding box wider than PI and limited on two sides (left lon, right lon). @@ -58,28 +58,34 @@ class GeoWideDegenerateHorizontalLine extends GeoBaseBBox { protected final GeoPoint[] edgePoints; /** - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}. - * Horizontal angle must be greater than or equal to PI. - *@param planetModel is the planet model. - *@param latitude is the line latitude. - *@param leftLon is the left cutoff longitude. - *@param rightLon is the right cutoff longitude. + * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> + * PI}. Horizontal angle must be greater than or equal to PI. + * + * @param planetModel is the planet model. + * @param latitude is the line latitude. + * @param leftLon is the left cutoff longitude. + * @param rightLon is the right cutoff longitude. */ - public GeoWideDegenerateHorizontalLine(final PlanetModel planetModel, final double latitude, final double leftLon, double rightLon) { + public GeoWideDegenerateHorizontalLine( + final PlanetModel planetModel, final double latitude, final double leftLon, double rightLon) { super(planetModel); // Argument checking - if (latitude > Math.PI * 0.5 || latitude < -Math.PI * 0.5) + if (latitude > Math.PI * 0.5 || latitude < -Math.PI * 0.5) { throw new IllegalArgumentException("Latitude out of range"); - if (leftLon < -Math.PI || leftLon > Math.PI) + } + if (leftLon < -Math.PI || leftLon > Math.PI) { throw new IllegalArgumentException("Left longitude out of range"); - if (rightLon < -Math.PI || rightLon > Math.PI) + } + if (rightLon < -Math.PI || rightLon > Math.PI) { throw new IllegalArgumentException("Right longitude out of range"); + } double extent = rightLon - leftLon; if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent < Math.PI) + if (extent < Math.PI) { throw new IllegalArgumentException("Width of rectangle too small"); + } this.latitude = latitude; this.leftLon = leftLon; @@ -93,8 +99,12 @@ class GeoWideDegenerateHorizontalLine extends GeoBaseBBox { final double cosRightLon = Math.cos(rightLon); // Now build the two points - this.LHC = new GeoPoint(planetModel, sinLatitude, sinLeftLon, cosLatitude, cosLeftLon, latitude, leftLon); - this.RHC = new GeoPoint(planetModel, sinLatitude, sinRightLon, cosLatitude, cosRightLon, latitude, rightLon); + this.LHC = + new GeoPoint( + planetModel, sinLatitude, sinLeftLon, cosLatitude, cosLeftLon, latitude, leftLon); + this.RHC = + new GeoPoint( + planetModel, sinLatitude, sinRightLon, cosLatitude, cosRightLon, latitude, rightLon); this.plane = new Plane(planetModel, sinLatitude); @@ -106,25 +116,32 @@ class GeoWideDegenerateHorizontalLine extends GeoBaseBBox { double sinMiddleLon = Math.sin(middleLon); double cosMiddleLon = Math.cos(middleLon); - this.centerPoint = new GeoPoint(planetModel, sinLatitude, sinMiddleLon, cosLatitude, cosMiddleLon); + this.centerPoint = + new GeoPoint(planetModel, sinLatitude, sinMiddleLon, cosLatitude, cosMiddleLon); this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - this.planePoints = new GeoPoint[]{LHC, RHC}; + this.planePoints = new GeoPoint[] {LHC, RHC}; this.eitherBound = new EitherBound(); - this.edgePoints = new GeoPoint[]{centerPoint}; + this.edgePoints = new GeoPoint[] {centerPoint}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoWideDegenerateHorizontalLine(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoWideDegenerateHorizontalLine( + final PlanetModel planetModel, final InputStream inputStream) throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -140,29 +157,31 @@ class GeoWideDegenerateHorizontalLine extends GeoBaseBBox { final double newBottomLat = latitude - angle; // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return plane.evaluateIsZero(x, y, z) && - (leftPlane.isWithin(x, y, z) || - rightPlane.isWithin(x, y, z)); + return plane.evaluateIsZero(x, y, z) + && (leftPlane.isWithin(x, y, z) || rightPlane.isWithin(x, y, z)); } @Override public double getRadius() { - // Here we compute the distance from the middle point to one of the corners. However, we need to be careful - // to use the longest of three distances: the distance to a corner on the top; the distnace to a corner on the bottom, and - // the distance to the right or left edge from the center. + // Here we compute the distance from the middle point to one of the corners. However, we need + // to be careful to use the longest of three distances: the distance to a corner on the top; + // the distance to a corner on the bottom, and the distance to the right or left edge from the + // center. final double topAngle = centerPoint.arcDistance(RHC); final double bottomAngle = centerPoint.arcDistance(LHC); return Math.max(topAngle, bottomAngle); @@ -179,26 +198,30 @@ class GeoWideDegenerateHorizontalLine extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one - // requires crossing into the right part of the other. So intersection can ignore the left/right bounds. + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + // Right and left bounds are essentially independent hemispheres; crossing into the wrong part + // of one requires crossing into the right part of the other. So intersection can ignore the + // left/right bounds. return p.intersects(planetModel, plane, notablePoints, planePoints, bounds, eitherBound); } @Override public boolean intersects(final GeoShape geoShape) { - // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one - // requires crossing into the right part of the other. So intersection can ignore the left/right bounds. + // Right and left bounds are essentially independent hemispheres; crossing into the wrong part + // of one requires crossing into the right part of the other. So intersection can ignore the + // left/right bounds. return geoShape.intersects(plane, planePoints, eitherBound); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.isWide() - .addHorizontalPlane(planetModel, latitude, plane, eitherBound) - .addPoint(LHC) - .addPoint(RHC); + bounds + .isWide() + .addHorizontalPlane(planetModel, latitude, plane, eitherBound) + .addPoint(LHC) + .addPoint(RHC); } @Override @@ -215,21 +238,21 @@ class GeoWideDegenerateHorizontalLine extends GeoBaseBBox { } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double distance = distanceStyle.computeDistance(planetModel, plane, x,y,z, eitherBound); - - final double LHCDistance = distanceStyle.computeDistance(LHC, x,y,z); - final double RHCDistance = distanceStyle.computeDistance(RHC, x,y,z); - - return Math.min( - distance, - Math.min(LHCDistance, RHCDistance)); + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double distance = distanceStyle.computeDistance(planetModel, plane, x, y, z, eitherBound); + + final double LHCDistance = distanceStyle.computeDistance(LHC, x, y, z); + final double RHCDistance = distanceStyle.computeDistance(RHC, x, y, z); + + return Math.min(distance, Math.min(LHCDistance, RHCDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoWideDegenerateHorizontalLine)) + if (!(o instanceof GeoWideDegenerateHorizontalLine)) { return false; + } GeoWideDegenerateHorizontalLine other = (GeoWideDegenerateHorizontalLine) o; return super.equals(other) && other.LHC.equals(LHC) && other.RHC.equals(RHC); } @@ -244,16 +267,27 @@ class GeoWideDegenerateHorizontalLine extends GeoBaseBBox { @Override public String toString() { - return "GeoWideDegenerateHorizontalLine: {planetmodel="+planetModel+", latitude=" + latitude + "(" + latitude * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightLon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoWideDegenerateHorizontalLine: {planetmodel=" + + planetModel + + ", latitude=" + + latitude + + "(" + + latitude * 180.0 / Math.PI + + "), leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightLon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } - /** Membership implementation representing a wide cutoff (more than 180 degrees). - */ + /** Membership implementation representing a wide cutoff (more than 180 degrees). */ protected class EitherBound implements Membership { - /** Constructor. - */ - public EitherBound() { - } + /** Constructor. */ + public EitherBound() {} @Override public boolean isWithin(final double x, final double y, final double z) { @@ -261,5 +295,3 @@ class GeoWideDegenerateHorizontalLine extends GeoBaseBBox { } } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideLongitudeSlice.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideLongitudeSlice.java index adf03d889ee..3e9b0b758d0 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideLongitudeSlice.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideLongitudeSlice.java @@ -16,13 +16,12 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Bounding box wider than PI but limited on left and right sides ( - * left lon, right lon). + * Bounding box wider than PI but limited on left and right sides ( left lon, right lon). * * @lucene.internal */ @@ -44,28 +43,33 @@ class GeoWideLongitudeSlice extends GeoBaseBBox { protected final GeoPoint centerPoint; /** A point on the edge of the shape */ - protected final GeoPoint[] edgePoints; + protected final GeoPoint[] edgePoints; /** - * Accepts only values in the following ranges: lon: {@code -PI -> PI}. - * Horizantal angle must be greater than or equal to PI. - *@param planetModel is the planet model. - *@param leftLon is the left longitude. - *@param rightLon is the right longitude. + * Accepts only values in the following ranges: lon: {@code -PI -> PI}. Horizantal angle must be + * greater than or equal to PI. + * + * @param planetModel is the planet model. + * @param leftLon is the left longitude. + * @param rightLon is the right longitude. */ - public GeoWideLongitudeSlice(final PlanetModel planetModel, final double leftLon, double rightLon) { + public GeoWideLongitudeSlice( + final PlanetModel planetModel, final double leftLon, double rightLon) { super(planetModel); // Argument checking - if (leftLon < -Math.PI || leftLon > Math.PI) + if (leftLon < -Math.PI || leftLon > Math.PI) { throw new IllegalArgumentException("Left longitude out of range"); - if (rightLon < -Math.PI || rightLon > Math.PI) + } + if (rightLon < -Math.PI || rightLon > Math.PI) { throw new IllegalArgumentException("Right longitude out of range"); + } double extent = rightLon - leftLon; if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent < Math.PI) + if (extent < Math.PI) { throw new IllegalArgumentException("Width of rectangle too small"); + } this.leftLon = leftLon; this.rightLon = rightLon; @@ -81,27 +85,32 @@ class GeoWideLongitudeSlice extends GeoBaseBBox { } double middleLon = (leftLon + rightLon) * 0.5; while (middleLon > Math.PI) { - middleLon -= Math.PI * 2.0; + middleLon -= Math.PI * 2.0; } while (middleLon < -Math.PI) { - middleLon += Math.PI * 2.0; + middleLon += Math.PI * 2.0; } this.centerPoint = new GeoPoint(planetModel, 0.0, middleLon); this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - - this.planePoints = new GeoPoint[]{planetModel.NORTH_POLE, planetModel.SOUTH_POLE}; - this.edgePoints = new GeoPoint[]{planetModel.NORTH_POLE}; + + this.planePoints = new GeoPoint[] {planetModel.NORTH_POLE, planetModel.SOUTH_POLE}; + this.edgePoints = new GeoPoint[] {planetModel.NORTH_POLE}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoWideLongitudeSlice(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoWideLongitudeSlice(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -114,29 +123,31 @@ class GeoWideLongitudeSlice extends GeoBaseBBox { public GeoBBox expand(final double angle) { // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return leftPlane.isWithin(x, y, z) || - rightPlane.isWithin(x, y, z); + return leftPlane.isWithin(x, y, z) || rightPlane.isWithin(x, y, z); } @Override public double getRadius() { // Compute the extent and divide by two double extent = rightLon - leftLon; - if (extent < 0.0) + if (extent < 0.0) { extent += Math.PI * 2.0; + } return Math.max(Math.PI * 0.5, extent * 0.5); } @@ -151,51 +162,55 @@ class GeoWideLongitudeSlice extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one - // requires crossing into the right part of the other. So intersection can ignore the left/right bounds. - return p.intersects(planetModel, leftPlane, notablePoints, planePoints, bounds) || - p.intersects(planetModel, rightPlane, notablePoints, planePoints, bounds); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + // Right and left bounds are essentially independent hemispheres; crossing into the wrong part + // of one requires crossing into the right part of the other. So intersection can ignore the + // left/right bounds. + return p.intersects(planetModel, leftPlane, notablePoints, planePoints, bounds) + || p.intersects(planetModel, rightPlane, notablePoints, planePoints, bounds); } @Override public boolean intersects(final GeoShape geoShape) { - // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one - // requires crossing into the right part of the other. So intersection can ignore the left/right bounds. - return geoShape.intersects(leftPlane, planePoints) || - geoShape.intersects(rightPlane, planePoints); + // Right and left bounds are essentially independent hemispheres; crossing into the wrong part + // of one requires crossing into the right part of the other. So intersection can ignore the + // left/right bounds. + return geoShape.intersects(leftPlane, planePoints) + || geoShape.intersects(rightPlane, planePoints); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.isWide() - .addVerticalPlane(planetModel, leftLon, leftPlane) - .addVerticalPlane(planetModel, rightLon, rightPlane) - .addIntersection(planetModel, leftPlane, rightPlane) - .addPoint(planetModel.NORTH_POLE) - .addPoint(planetModel.SOUTH_POLE); + bounds + .isWide() + .addVerticalPlane(planetModel, leftLon, leftPlane) + .addVerticalPlane(planetModel, rightLon, rightPlane) + .addIntersection(planetModel, leftPlane, rightPlane) + .addPoint(planetModel.NORTH_POLE) + .addPoint(planetModel.SOUTH_POLE); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - // Because the rectangle exceeds 180 degrees, it is safe to compute the horizontally + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + // Because the rectangle exceeds 180 degrees, it is safe to compute the horizontally // unbounded distance to both the left and the right and only take the minimum of the two. - final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x,y,z); - final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x,y,z); - - final double northDistance = distanceStyle.computeDistance(planetModel.NORTH_POLE, x,y,z); - final double southDistance = distanceStyle.computeDistance(planetModel.SOUTH_POLE, x,y,z); - - return Math.min( - Math.min(leftDistance, rightDistance), - Math.min(northDistance, southDistance)); + final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x, y, z); + final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x, y, z); + + final double northDistance = distanceStyle.computeDistance(planetModel.NORTH_POLE, x, y, z); + final double southDistance = distanceStyle.computeDistance(planetModel.SOUTH_POLE, x, y, z); + + return Math.min(Math.min(leftDistance, rightDistance), Math.min(northDistance, southDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoWideLongitudeSlice)) + if (!(o instanceof GeoWideLongitudeSlice)) { return false; + } GeoWideLongitudeSlice other = (GeoWideLongitudeSlice) o; return super.equals(other) && other.leftLon == leftLon && other.rightLon == rightLon; } @@ -212,7 +227,16 @@ class GeoWideLongitudeSlice extends GeoBaseBBox { @Override public String toString() { - return "GeoWideLongitudeSlice: {planetmodel="+planetModel+", leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoWideLongitudeSlice: {planetmodel=" + + planetModel + + ", leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightlon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideNorthRectangle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideNorthRectangle.java index 7d0ccad11b6..8fefca336ed 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideNorthRectangle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideNorthRectangle.java @@ -16,13 +16,12 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Bounding box wider than PI but limited on three sides ( - * bottom lat, left lon, right lon). + * Bounding box wider than PI but limited on three sides ( bottom lat, left lon, right lon). * * @lucene.internal */ @@ -66,28 +65,37 @@ class GeoWideNorthRectangle extends GeoBaseBBox { protected final GeoPoint[] edgePoints; /** - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}. - * Horizontal angle must be greater than or equal to PI. + * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> + * PI}. Horizontal angle must be greater than or equal to PI. + * * @param planetModel is the planet model. * @param bottomLat is the bottom latitude. * @param leftLon is the left longitude. * @param rightLon is the right longitude. */ - public GeoWideNorthRectangle(final PlanetModel planetModel, final double bottomLat, final double leftLon, double rightLon) { + public GeoWideNorthRectangle( + final PlanetModel planetModel, + final double bottomLat, + final double leftLon, + double rightLon) { super(planetModel); // Argument checking - if (bottomLat > Math.PI * 0.5 || bottomLat < -Math.PI * 0.5) + if (bottomLat > Math.PI * 0.5 || bottomLat < -Math.PI * 0.5) { throw new IllegalArgumentException("Bottom latitude out of range"); - if (leftLon < -Math.PI || leftLon > Math.PI) + } + if (leftLon < -Math.PI || leftLon > Math.PI) { throw new IllegalArgumentException("Left longitude out of range"); - if (rightLon < -Math.PI || rightLon > Math.PI) + } + if (rightLon < -Math.PI || rightLon > Math.PI) { throw new IllegalArgumentException("Right longitude out of range"); + } double extent = rightLon - leftLon; if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent < Math.PI) + if (extent < Math.PI) { throw new IllegalArgumentException("Width of rectangle too small"); + } this.bottomLat = bottomLat; this.leftLon = leftLon; @@ -101,8 +109,12 @@ class GeoWideNorthRectangle extends GeoBaseBBox { final double cosRightLon = Math.cos(rightLon); // Now build the four points - this.LRHC = new GeoPoint(planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon, bottomLat, rightLon); - this.LLHC = new GeoPoint(planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon, bottomLat, leftLon); + this.LRHC = + new GeoPoint( + planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon, bottomLat, rightLon); + this.LLHC = + new GeoPoint( + planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon, bottomLat, leftLon); final double middleLat = (Math.PI * 0.5 + bottomLat) * 0.5; final double sinMiddleLat = Math.sin(middleLat); @@ -115,27 +127,34 @@ class GeoWideNorthRectangle extends GeoBaseBBox { final double sinMiddleLon = Math.sin(middleLon); final double cosMiddleLon = Math.cos(middleLon); - this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); + this.centerPoint = + new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat); this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - this.bottomPlanePoints = new GeoPoint[]{LLHC, LRHC}; - this.leftPlanePoints = new GeoPoint[]{planetModel.NORTH_POLE, LLHC}; - this.rightPlanePoints = new GeoPoint[]{planetModel.NORTH_POLE, LRHC}; + this.bottomPlanePoints = new GeoPoint[] {LLHC, LRHC}; + this.leftPlanePoints = new GeoPoint[] {planetModel.NORTH_POLE, LLHC}; + this.rightPlanePoints = new GeoPoint[] {planetModel.NORTH_POLE, LRHC}; this.eitherBound = new EitherBound(); - this.edgePoints = new GeoPoint[]{planetModel.NORTH_POLE}; + this.edgePoints = new GeoPoint[] {planetModel.NORTH_POLE}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoWideNorthRectangle(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoWideNorthRectangle(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -151,30 +170,31 @@ class GeoWideNorthRectangle extends GeoBaseBBox { final double newBottomLat = bottomLat - angle; // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return - bottomPlane.isWithin(x, y, z) && - (leftPlane.isWithin(x, y, z) || - rightPlane.isWithin(x, y, z)); + return bottomPlane.isWithin(x, y, z) + && (leftPlane.isWithin(x, y, z) || rightPlane.isWithin(x, y, z)); } @Override public double getRadius() { - // Here we compute the distance from the middle point to one of the corners. However, we need to be careful - // to use the longest of three distances: the distance to a corner on the top; the distnace to a corner on the bottom, and - // the distance to the right or left edge from the center. + // Here we compute the distance from the middle point to one of the corners. However, we need + // to be careful to use the longest of three distances: the distance to a corner on the top; + // the distance to a corner on the bottom, and the distance to the right or left edge from the + // center. final double centerAngle = (rightLon - (rightLon + leftLon) * 0.5) * cosMiddleLat; final double bottomAngle = centerPoint.arcDistance(LLHC); return Math.max(centerAngle, bottomAngle); @@ -191,58 +211,67 @@ class GeoWideNorthRectangle extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one - // requires crossing into the right part of the other. So intersection can ignore the left/right bounds. - return - p.intersects(planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, eitherBound) || - p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, bottomPlane) || - p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, bottomPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + // Right and left bounds are essentially independent hemispheres; crossing into the wrong part + // of one requires crossing into the right part of the other. So intersection can ignore the + // left/right bounds. + return p.intersects( + planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, eitherBound) + || p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, bottomPlane) + || p.intersects( + planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, bottomPlane); } @Override public boolean intersects(final GeoShape geoShape) { - // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one - // requires crossing into the right part of the other. So intersection can ignore the left/right bounds. - return - geoShape.intersects(bottomPlane, bottomPlanePoints, eitherBound) || - geoShape.intersects(leftPlane, leftPlanePoints, bottomPlane) || - geoShape.intersects(rightPlane, rightPlanePoints, bottomPlane); + // Right and left bounds are essentially independent hemispheres; crossing into the wrong part + // of one requires crossing into the right part of the other. So intersection can ignore the + // left/right bounds. + return geoShape.intersects(bottomPlane, bottomPlanePoints, eitherBound) + || geoShape.intersects(leftPlane, leftPlanePoints, bottomPlane) + || geoShape.intersects(rightPlane, rightPlanePoints, bottomPlane); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.isWide() - .addHorizontalPlane(planetModel, bottomLat, bottomPlane, eitherBound) - .addVerticalPlane(planetModel, leftLon, leftPlane, bottomPlane) - .addVerticalPlane(planetModel, rightLon, rightPlane, bottomPlane) - .addIntersection(planetModel, leftPlane, rightPlane, bottomPlane) - .addPoint(LLHC).addPoint(LRHC).addPoint(planetModel.NORTH_POLE); + bounds + .isWide() + .addHorizontalPlane(planetModel, bottomLat, bottomPlane, eitherBound) + .addVerticalPlane(planetModel, leftLon, leftPlane, bottomPlane) + .addVerticalPlane(planetModel, rightLon, rightPlane, bottomPlane) + .addIntersection(planetModel, leftPlane, rightPlane, bottomPlane) + .addPoint(LLHC) + .addPoint(LRHC) + .addPoint(planetModel.NORTH_POLE); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double bottomDistance = distanceStyle.computeDistance(planetModel, bottomPlane, x,y,z, eitherBound); - // Because the rectangle exceeds 180 degrees, it is safe to compute the horizontally + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double bottomDistance = + distanceStyle.computeDistance(planetModel, bottomPlane, x, y, z, eitherBound); + // Because the rectangle exceeds 180 degrees, it is safe to compute the horizontally // unbounded distance to both the left and the right and only take the minimum of the two. - final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x,y,z, bottomPlane); - final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x,y,z, bottomPlane); - - final double LRHCDistance = distanceStyle.computeDistance(LRHC, x,y,z); - final double LLHCDistance = distanceStyle.computeDistance(LLHC, x,y,z); - + final double leftDistance = + distanceStyle.computeDistance(planetModel, leftPlane, x, y, z, bottomPlane); + final double rightDistance = + distanceStyle.computeDistance(planetModel, rightPlane, x, y, z, bottomPlane); + + final double LRHCDistance = distanceStyle.computeDistance(LRHC, x, y, z); + final double LLHCDistance = distanceStyle.computeDistance(LLHC, x, y, z); + return Math.min( - Math.min( - bottomDistance, - Math.min(leftDistance, rightDistance)), - Math.min(LRHCDistance, LLHCDistance)); + Math.min(bottomDistance, Math.min(leftDistance, rightDistance)), + Math.min(LRHCDistance, LLHCDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoWideNorthRectangle)) + if (!(o instanceof GeoWideNorthRectangle)) { return false; + } GeoWideNorthRectangle other = (GeoWideNorthRectangle) o; return super.equals(other) && other.LLHC.equals(LLHC) && other.LRHC.equals(LRHC); } @@ -257,16 +286,27 @@ class GeoWideNorthRectangle extends GeoBaseBBox { @Override public String toString() { - return "GeoWideNorthRectangle: {planetmodel="+planetModel+", bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoWideNorthRectangle: {planetmodel=" + + planetModel + + ", bottomlat=" + + bottomLat + + "(" + + bottomLat * 180.0 / Math.PI + + "), leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightlon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } - /** Membership implementation representing a wide (more than 180 degree) bound. - */ + /** Membership implementation representing a wide (more than 180 degree) bound. */ protected class EitherBound implements Membership { - /** Constructor. - */ - public EitherBound() { - } + /** Constructor. */ + public EitherBound() {} @Override public boolean isWithin(final Vector v) { @@ -279,5 +319,3 @@ class GeoWideNorthRectangle extends GeoBaseBBox { } } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideRectangle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideRectangle.java index b200e0cade8..227a9d0bb14 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideRectangle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideRectangle.java @@ -16,13 +16,12 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Bounding box wider than PI but limited on four sides (top lat, - * bottom lat, left lon, right lon). + * Bounding box wider than PI but limited on four sides (top lat, bottom lat, left lon, right lon). * * @lucene.internal */ @@ -76,33 +75,45 @@ class GeoWideRectangle extends GeoBaseBBox { protected final GeoPoint[] edgePoints; /** - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}. - * Horizontal angle must be greater than or equal to PI. + * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> + * PI}. Horizontal angle must be greater than or equal to PI. + * * @param planetModel is the planet model. * @param topLat is the top latitude. * @param bottomLat is the bottom latitude. * @param leftLon is the left longitude. * @param rightLon is the right longitude. */ - public GeoWideRectangle(final PlanetModel planetModel, final double topLat, final double bottomLat, final double leftLon, double rightLon) { + public GeoWideRectangle( + final PlanetModel planetModel, + final double topLat, + final double bottomLat, + final double leftLon, + double rightLon) { super(planetModel); // Argument checking - if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5) + if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5) { throw new IllegalArgumentException("Top latitude out of range"); - if (bottomLat > Math.PI * 0.5 || bottomLat < -Math.PI * 0.5) + } + if (bottomLat > Math.PI * 0.5 || bottomLat < -Math.PI * 0.5) { throw new IllegalArgumentException("Bottom latitude out of range"); - if (topLat < bottomLat) + } + if (topLat < bottomLat) { throw new IllegalArgumentException("Top latitude less than bottom latitude"); - if (leftLon < -Math.PI || leftLon > Math.PI) + } + if (leftLon < -Math.PI || leftLon > Math.PI) { throw new IllegalArgumentException("Left longitude out of range"); - if (rightLon < -Math.PI || rightLon > Math.PI) + } + if (rightLon < -Math.PI || rightLon > Math.PI) { throw new IllegalArgumentException("Right longitude out of range"); + } double extent = rightLon - leftLon; if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent < Math.PI) + if (extent < Math.PI) { throw new IllegalArgumentException("Width of rectangle too small"); + } this.topLat = topLat; this.bottomLat = bottomLat; @@ -119,10 +130,16 @@ class GeoWideRectangle extends GeoBaseBBox { final double cosRightLon = Math.cos(rightLon); // Now build the four points - this.ULHC = new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon, topLat, leftLon); - this.URHC = new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon, topLat, rightLon); - this.LRHC = new GeoPoint(planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon, bottomLat, rightLon); - this.LLHC = new GeoPoint(planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon, bottomLat, leftLon); + this.ULHC = + new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon, topLat, leftLon); + this.URHC = + new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon, topLat, rightLon); + this.LRHC = + new GeoPoint( + planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon, bottomLat, rightLon); + this.LLHC = + new GeoPoint( + planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon, bottomLat, leftLon); final double middleLat = (topLat + bottomLat) * 0.5; final double sinMiddleLat = Math.sin(middleLat); @@ -135,30 +152,38 @@ class GeoWideRectangle extends GeoBaseBBox { final double sinMiddleLon = Math.sin(middleLon); final double cosMiddleLon = Math.cos(middleLon); - this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); + this.centerPoint = + new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat); this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat); this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - this.topPlanePoints = new GeoPoint[]{ULHC, URHC}; - this.bottomPlanePoints = new GeoPoint[]{LLHC, LRHC}; - this.leftPlanePoints = new GeoPoint[]{ULHC, LLHC}; - this.rightPlanePoints = new GeoPoint[]{URHC, LRHC}; + this.topPlanePoints = new GeoPoint[] {ULHC, URHC}; + this.bottomPlanePoints = new GeoPoint[] {LLHC, LRHC}; + this.leftPlanePoints = new GeoPoint[] {ULHC, LLHC}; + this.rightPlanePoints = new GeoPoint[] {URHC, LRHC}; this.eitherBound = new EitherBound(); - this.edgePoints = new GeoPoint[]{ULHC}; + this.edgePoints = new GeoPoint[] {ULHC}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoWideRectangle(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoWideRectangle(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -175,30 +200,32 @@ class GeoWideRectangle extends GeoBaseBBox { final double newBottomLat = bottomLat - angle; // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return topPlane.isWithin(x, y, z) && - bottomPlane.isWithin(x, y, z) && - (leftPlane.isWithin(x, y, z) || - rightPlane.isWithin(x, y, z)); + return topPlane.isWithin(x, y, z) + && bottomPlane.isWithin(x, y, z) + && (leftPlane.isWithin(x, y, z) || rightPlane.isWithin(x, y, z)); } @Override public double getRadius() { - // Here we compute the distance from the middle point to one of the corners. However, we need to be careful - // to use the longest of three distances: the distance to a corner on the top; the distnace to a corner on the bottom, and - // the distance to the right or left edge from the center. + // Here we compute the distance from the middle point to one of the corners. However, we need + // to be careful to use the longest of three distances: the distance to a corner on the top; + // the distance to a corner on the bottom, and the distance to the right or left edge from the + // center. final double centerAngle = (rightLon - (rightLon + leftLon) * 0.5) * cosMiddleLat; final double topAngle = centerPoint.arcDistance(URHC); final double bottomAngle = centerPoint.arcDistance(LLHC); @@ -221,62 +248,86 @@ class GeoWideRectangle extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one - // requires crossing into the right part of the other. So intersection can ignore the left/right bounds. - return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, bottomPlane, eitherBound) || - p.intersects(planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, topPlane, eitherBound) || - p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, topPlane, bottomPlane) || - p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, topPlane, bottomPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + // Right and left bounds are essentially independent hemispheres; crossing into the wrong part + // of one requires crossing into the right part of the other. So intersection can ignore the + // left/right bounds. + return p.intersects( + planetModel, topPlane, notablePoints, topPlanePoints, bounds, bottomPlane, eitherBound) + || p.intersects( + planetModel, + bottomPlane, + notablePoints, + bottomPlanePoints, + bounds, + topPlane, + eitherBound) + || p.intersects( + planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, topPlane, bottomPlane) + || p.intersects( + planetModel, + rightPlane, + notablePoints, + rightPlanePoints, + bounds, + topPlane, + bottomPlane); } @Override public boolean intersects(final GeoShape geoShape) { - return geoShape.intersects(topPlane, topPlanePoints, bottomPlane, eitherBound) || - geoShape.intersects(bottomPlane, bottomPlanePoints, topPlane, eitherBound) || - geoShape.intersects(leftPlane, leftPlanePoints, topPlane, bottomPlane) || - geoShape.intersects(rightPlane, rightPlanePoints, topPlane, bottomPlane); + return geoShape.intersects(topPlane, topPlanePoints, bottomPlane, eitherBound) + || geoShape.intersects(bottomPlane, bottomPlanePoints, topPlane, eitherBound) + || geoShape.intersects(leftPlane, leftPlanePoints, topPlane, bottomPlane) + || geoShape.intersects(rightPlane, rightPlanePoints, topPlane, bottomPlane); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.isWide() - .addHorizontalPlane(planetModel, topLat, topPlane, bottomPlane, eitherBound) - .addVerticalPlane(planetModel, rightLon, rightPlane, topPlane, bottomPlane) - .addHorizontalPlane(planetModel, bottomLat, bottomPlane, topPlane, eitherBound) - .addVerticalPlane(planetModel, leftLon, leftPlane, topPlane, bottomPlane) - .addIntersection(planetModel, leftPlane, rightPlane, topPlane, bottomPlane) - .addPoint(ULHC).addPoint(URHC).addPoint(LRHC).addPoint(LLHC); + bounds + .isWide() + .addHorizontalPlane(planetModel, topLat, topPlane, bottomPlane, eitherBound) + .addVerticalPlane(planetModel, rightLon, rightPlane, topPlane, bottomPlane) + .addHorizontalPlane(planetModel, bottomLat, bottomPlane, topPlane, eitherBound) + .addVerticalPlane(planetModel, leftLon, leftPlane, topPlane, bottomPlane) + .addIntersection(planetModel, leftPlane, rightPlane, topPlane, bottomPlane) + .addPoint(ULHC) + .addPoint(URHC) + .addPoint(LRHC) + .addPoint(LLHC); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double topDistance = distanceStyle.computeDistance(planetModel, topPlane, x,y,z, bottomPlane, eitherBound); - final double bottomDistance = distanceStyle.computeDistance(planetModel, bottomPlane, x,y,z, topPlane, eitherBound); - // Because the rectangle exceeds 180 degrees, it is safe to compute the horizontally + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double topDistance = + distanceStyle.computeDistance(planetModel, topPlane, x, y, z, bottomPlane, eitherBound); + final double bottomDistance = + distanceStyle.computeDistance(planetModel, bottomPlane, x, y, z, topPlane, eitherBound); + // Because the rectangle exceeds 180 degrees, it is safe to compute the horizontally // unbounded distance to both the left and the right and only take the minimum of the two. - final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x,y,z, topPlane, bottomPlane); - final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x,y,z, topPlane, bottomPlane); - - final double ULHCDistance = distanceStyle.computeDistance(ULHC, x,y,z); - final double URHCDistance = distanceStyle.computeDistance(URHC, x,y,z); - final double LRHCDistance = distanceStyle.computeDistance(LRHC, x,y,z); - final double LLHCDistance = distanceStyle.computeDistance(LLHC, x,y,z); - + final double leftDistance = + distanceStyle.computeDistance(planetModel, leftPlane, x, y, z, topPlane, bottomPlane); + final double rightDistance = + distanceStyle.computeDistance(planetModel, rightPlane, x, y, z, topPlane, bottomPlane); + + final double ULHCDistance = distanceStyle.computeDistance(ULHC, x, y, z); + final double URHCDistance = distanceStyle.computeDistance(URHC, x, y, z); + final double LRHCDistance = distanceStyle.computeDistance(LRHC, x, y, z); + final double LLHCDistance = distanceStyle.computeDistance(LLHC, x, y, z); + return Math.min( - Math.min( - Math.min(topDistance, bottomDistance), - Math.min(leftDistance, rightDistance)), - Math.min( - Math.min(ULHCDistance, URHCDistance), - Math.min(LRHCDistance, LLHCDistance))); + Math.min(Math.min(topDistance, bottomDistance), Math.min(leftDistance, rightDistance)), + Math.min(Math.min(ULHCDistance, URHCDistance), Math.min(LRHCDistance, LLHCDistance))); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoWideRectangle)) + if (!(o instanceof GeoWideRectangle)) { return false; + } GeoWideRectangle other = (GeoWideRectangle) o; return super.equals(other) && other.ULHC.equals(ULHC) && other.LRHC.equals(LRHC); } @@ -291,16 +342,31 @@ class GeoWideRectangle extends GeoBaseBBox { @Override public String toString() { - return "GeoWideRectangle: {planetmodel=" + planetModel + ", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoWideRectangle: {planetmodel=" + + planetModel + + ", toplat=" + + topLat + + "(" + + topLat * 180.0 / Math.PI + + "), bottomlat=" + + bottomLat + + "(" + + bottomLat * 180.0 / Math.PI + + "), leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightlon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } - /** A membership implementation representing a wide (more than 180) left/right bound. - */ + /** A membership implementation representing a wide (more than 180) left/right bound. */ protected class EitherBound implements Membership { - /** Constructor. - */ - public EitherBound() { - } + /** Constructor. */ + public EitherBound() {} @Override public boolean isWithin(final Vector v) { @@ -313,4 +379,3 @@ class GeoWideRectangle extends GeoBaseBBox { } } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideSouthRectangle.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideSouthRectangle.java index eca4f6284c6..5320e2221fb 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideSouthRectangle.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWideSouthRectangle.java @@ -16,13 +16,12 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * Bounding box wider than PI but limited on three sides (top lat, - * left lon, right lon). + * Bounding box wider than PI but limited on three sides (top lat, left lon, right lon). * * @lucene.internal */ @@ -66,14 +65,16 @@ class GeoWideSouthRectangle extends GeoBaseBBox { protected final GeoPoint[] edgePoints; /** - * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}. - * Horizontal angle must be greater than or equal to PI. + * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> + * PI}. Horizontal angle must be greater than or equal to PI. + * * @param planetModel is the planet model. * @param topLat is the top latitude. * @param leftLon is the left longitude. * @param rightLon is the right longitude. */ - public GeoWideSouthRectangle(final PlanetModel planetModel, final double topLat, final double leftLon, double rightLon) { + public GeoWideSouthRectangle( + final PlanetModel planetModel, final double topLat, final double leftLon, double rightLon) { super(planetModel); // Argument checking if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5) @@ -86,8 +87,9 @@ class GeoWideSouthRectangle extends GeoBaseBBox { if (extent < 0.0) { extent += 2.0 * Math.PI; } - if (extent < Math.PI) + if (extent < Math.PI) { throw new IllegalArgumentException("Width of rectangle too small"); + } this.topLat = topLat; this.leftLon = leftLon; @@ -101,8 +103,10 @@ class GeoWideSouthRectangle extends GeoBaseBBox { final double cosRightLon = Math.cos(rightLon); // Now build the four points - this.ULHC = new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon, topLat, leftLon); - this.URHC = new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon, topLat, rightLon); + this.ULHC = + new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon, topLat, leftLon); + this.URHC = + new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon, topLat, rightLon); final double middleLat = (topLat - Math.PI * 0.5) * 0.5; final double sinMiddleLat = Math.sin(middleLat); @@ -115,28 +119,35 @@ class GeoWideSouthRectangle extends GeoBaseBBox { final double sinMiddleLon = Math.sin(middleLon); final double cosMiddleLon = Math.cos(middleLon); - this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); + this.centerPoint = + new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon); this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat); this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon); this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon); - this.topPlanePoints = new GeoPoint[]{ULHC, URHC}; - this.leftPlanePoints = new GeoPoint[]{ULHC, planetModel.SOUTH_POLE}; - this.rightPlanePoints = new GeoPoint[]{URHC, planetModel.SOUTH_POLE}; + this.topPlanePoints = new GeoPoint[] {ULHC, URHC}; + this.leftPlanePoints = new GeoPoint[] {ULHC, planetModel.SOUTH_POLE}; + this.rightPlanePoints = new GeoPoint[] {URHC, planetModel.SOUTH_POLE}; this.eitherBound = new EitherBound(); - - this.edgePoints = new GeoPoint[]{planetModel.SOUTH_POLE}; + + this.edgePoints = new GeoPoint[] {planetModel.SOUTH_POLE}; } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public GeoWideSouthRectangle(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream), SerializableObject.readDouble(inputStream)); + public GeoWideSouthRectangle(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -152,29 +163,31 @@ class GeoWideSouthRectangle extends GeoBaseBBox { final double newBottomLat = -Math.PI * 0.5; // Figuring out when we escalate to a special case requires some prefiguring double currentLonSpan = rightLon - leftLon; - if (currentLonSpan < 0.0) + if (currentLonSpan < 0.0) { currentLonSpan += Math.PI * 2.0; + } double newLeftLon = leftLon - angle; double newRightLon = rightLon + angle; if (currentLonSpan + 2.0 * angle >= Math.PI * 2.0) { newLeftLon = -Math.PI; newRightLon = Math.PI; } - return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); + return GeoBBoxFactory.makeGeoBBox( + planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon); } @Override public boolean isWithin(final double x, final double y, final double z) { - return topPlane.isWithin(x, y, z) && - (leftPlane.isWithin(x, y, z) || - rightPlane.isWithin(x, y, z)); + return topPlane.isWithin(x, y, z) + && (leftPlane.isWithin(x, y, z) || rightPlane.isWithin(x, y, z)); } @Override public double getRadius() { - // Here we compute the distance from the middle point to one of the corners. However, we need to be careful - // to use the longest of three distances: the distance to a corner on the top; the distnace to a corner on the bottom, and - // the distance to the right or left edge from the center. + // Here we compute the distance from the middle point to one of the corners. However, we need + // to be careful to use the longest of three distances: the distance to a corner on the top; + // the distance to a corner on the bottom, and the distance to the right or left edge from the + // center. final double centerAngle = (rightLon - (rightLon + leftLon) * 0.5) * cosMiddleLat; final double topAngle = centerPoint.arcDistance(URHC); return Math.max(centerAngle, topAngle); @@ -191,54 +204,62 @@ class GeoWideSouthRectangle extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { - // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one - // requires crossing into the right part of the other. So intersection can ignore the left/right bounds. - return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, eitherBound) || - p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, topPlane) || - p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, topPlane); + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + // Right and left bounds are essentially independent hemispheres; crossing into the wrong part + // of one requires crossing into the right part of the other. So intersection can ignore the + // left/right bounds. + return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, eitherBound) + || p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, topPlane) + || p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, topPlane); } @Override public boolean intersects(final GeoShape geoShape) { - return geoShape.intersects(topPlane, topPlanePoints, eitherBound) || - geoShape.intersects(leftPlane, leftPlanePoints, topPlane) || - geoShape.intersects(rightPlane, rightPlanePoints, topPlane); + return geoShape.intersects(topPlane, topPlanePoints, eitherBound) + || geoShape.intersects(leftPlane, leftPlanePoints, topPlane) + || geoShape.intersects(rightPlane, rightPlanePoints, topPlane); } @Override public void getBounds(Bounds bounds) { super.getBounds(bounds); - bounds.isWide() - .addHorizontalPlane(planetModel, topLat, topPlane, eitherBound) - .addVerticalPlane(planetModel, rightLon, rightPlane, topPlane) - .addVerticalPlane(planetModel, leftLon, leftPlane, topPlane) - .addIntersection(planetModel, leftPlane, rightPlane, topPlane) - .addPoint(ULHC).addPoint(URHC).addPoint(planetModel.SOUTH_POLE); + bounds + .isWide() + .addHorizontalPlane(planetModel, topLat, topPlane, eitherBound) + .addVerticalPlane(planetModel, rightLon, rightPlane, topPlane) + .addVerticalPlane(planetModel, leftLon, leftPlane, topPlane) + .addIntersection(planetModel, leftPlane, rightPlane, topPlane) + .addPoint(ULHC) + .addPoint(URHC) + .addPoint(planetModel.SOUTH_POLE); } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { - final double topDistance = distanceStyle.computeDistance(planetModel, topPlane, x,y,z, eitherBound); - // Because the rectangle exceeds 180 degrees, it is safe to compute the horizontally + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { + final double topDistance = + distanceStyle.computeDistance(planetModel, topPlane, x, y, z, eitherBound); + // Because the rectangle exceeds 180 degrees, it is safe to compute the horizontally // unbounded distance to both the left and the right and only take the minimum of the two. - final double leftDistance = distanceStyle.computeDistance(planetModel, leftPlane, x,y,z, topPlane); - final double rightDistance = distanceStyle.computeDistance(planetModel, rightPlane, x,y,z, topPlane); - - final double ULHCDistance = distanceStyle.computeDistance(ULHC, x,y,z); - final double URHCDistance = distanceStyle.computeDistance(URHC, x,y,z); - + final double leftDistance = + distanceStyle.computeDistance(planetModel, leftPlane, x, y, z, topPlane); + final double rightDistance = + distanceStyle.computeDistance(planetModel, rightPlane, x, y, z, topPlane); + + final double ULHCDistance = distanceStyle.computeDistance(ULHC, x, y, z); + final double URHCDistance = distanceStyle.computeDistance(URHC, x, y, z); + return Math.min( - Math.min( - topDistance, - Math.min(leftDistance, rightDistance)), - Math.min(ULHCDistance, URHCDistance)); + Math.min(topDistance, Math.min(leftDistance, rightDistance)), + Math.min(ULHCDistance, URHCDistance)); } @Override public boolean equals(Object o) { - if (!(o instanceof GeoWideSouthRectangle)) + if (!(o instanceof GeoWideSouthRectangle)) { return false; + } GeoWideSouthRectangle other = (GeoWideSouthRectangle) o; return super.equals(o) && other.ULHC.equals(ULHC) && other.URHC.equals(URHC); } @@ -253,16 +274,27 @@ class GeoWideSouthRectangle extends GeoBaseBBox { @Override public String toString() { - return "GeoWideSouthRectangle: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}"; + return "GeoWideSouthRectangle: {planetmodel=" + + planetModel + + ", toplat=" + + topLat + + "(" + + topLat * 180.0 / Math.PI + + "), leftlon=" + + leftLon + + "(" + + leftLon * 180.0 / Math.PI + + "), rightlon=" + + rightLon + + "(" + + rightLon * 180.0 / Math.PI + + ")}"; } - /** Membership implementation representing width more than 180. - */ + /** Membership implementation representing width more than 180. */ protected class EitherBound implements Membership { - /** Constructor. - */ - public EitherBound() { - } + /** Constructor. */ + public EitherBound() {} @Override public boolean isWithin(final Vector v) { @@ -275,5 +307,3 @@ class GeoWideSouthRectangle extends GeoBaseBBox { } } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWorld.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWorld.java index 4eef12869b4..b1dd4646019 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWorld.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoWorld.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * Bounding box including the entire world. @@ -27,21 +27,25 @@ import java.io.IOException; */ class GeoWorld extends GeoBaseBBox { /** No points on the edge of the shape */ - protected final static GeoPoint[] edgePoints = new GeoPoint[0]; + protected static final GeoPoint[] edgePoints = new GeoPoint[0]; /** Point in the middle of the world */ protected final GeoPoint originPoint; - - /** Constructor. - *@param planetModel is the planet model. + + /** + * Constructor. + * + * @param planetModel is the planet model. */ public GeoWorld(final PlanetModel planetModel) { super(planetModel); originPoint = new GeoPoint(planetModel.xyScaling, 1.0, 0.0, 0.0); } - /** Constructor. - *@param planetModel is the planet model. - *@param inputStream is the input stream. + /** + * Constructor. + * + * @param planetModel is the planet model. + * @param inputStream is the input stream. */ public GeoWorld(final PlanetModel planetModel, final InputStream inputStream) throws IOException { this(planetModel); @@ -79,7 +83,8 @@ class GeoWorld extends GeoBaseBBox { } @Override - public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { + public boolean intersects( + final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) { return false; } @@ -92,7 +97,7 @@ class GeoWorld extends GeoBaseBBox { public void getBounds(Bounds bounds) { super.getBounds(bounds); // Unnecessary - //bounds.noLongitudeBound().noTopLatitudeBound().noBottomLatitudeBound(); + // bounds.noLongitudeBound().noTopLatitudeBound().noBottomLatitudeBound(); } @Override @@ -105,14 +110,16 @@ class GeoWorld extends GeoBaseBBox { } @Override - protected double outsideDistance(final DistanceStyle distanceStyle, final double x, final double y, final double z) { + protected double outsideDistance( + final DistanceStyle distanceStyle, final double x, final double y, final double z) { return 0.0; } @Override public boolean equals(Object o) { - if (!(o instanceof GeoWorld)) + if (!(o instanceof GeoWorld)) { return false; + } return super.equals(o); } @@ -123,6 +130,6 @@ class GeoWorld extends GeoBaseBBox { @Override public String toString() { - return "GeoWorld: {planetmodel="+planetModel+"}"; + return "GeoWorld: {planetmodel=" + planetModel + "}"; } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LatLonBounds.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LatLonBounds.java index ef43dccd452..5da1ed43e57 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LatLonBounds.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LatLonBounds.java @@ -43,8 +43,8 @@ public class LatLonBounds implements Bounds { // // The next problem is how to compare two of the same kind of bound, e.g. two left bounds. // We need to keep track of the leftmost longitude of the shape, but since this is a circle, - // this is arbitrary. What we could try to do instead would be to find a pair of (left,right) bounds such - // that: + // this is arbitrary. What we could try to do instead would be to find a pair of (left,right) + // bounds such that: // (1) all other bounds are within, and // (2) the left minus right distance is minimized // Unfortunately, there are still shapes that cannot be summarized in this way correctly. @@ -52,40 +52,45 @@ public class LatLonBounds implements Bounds { // lat/lon bounds that do not in fact circle the globe. // // One way to handle the longitude issue correctly is therefore to stipulate that we - // walk the bounds of the shape in some kind of connected order. Each point or circle is therefore - // added in a sequence. We also need an interior point to make sure we have the right - // choice of longitude bounds. But even with this, we still can't always choose whether the actual shape - // goes right or left. + // walk the bounds of the shape in some kind of connected order. Each point or circle is + // therefore added in a sequence. We also need an interior point to make sure we have the + // right choice of longitude bounds. But even with this, we still can't always choose whether + // the actual shape goes right or left. // // We can make the specification truly general by submitting the following in order: // addSide(PlaneSide side, Membership... constraints) // ... - // This is unambiguous, but I still can't see yet how this would help compute the bounds. The plane - // solution would in general seem to boil down to the same logic that relies on points along the path - // to define the shape boundaries. I guess the one thing that you do know for a bounded edge is that - // the endpoints are actually connected. But it is not clear whether relationship helps in any way. + // This is unambiguous, but I still can't see yet how this would help compute the bounds. The + // plane solution would in general seem to boil down to the same logic that relies on points + // along the path to define the shape boundaries. I guess the one thing that you do know for + // a bounded edge is that the endpoints are actually connected. But it is not clear whether + // relationship helps in any way. // - // In any case, if we specify shapes by a sequence of planes, we should stipulate that multiple sequences - // are allowed, provided they progressively tile an area of the sphere that is connected and sequential. - // For example, paths do alternating rectangles and circles, in sequence. Each sequence member is - // described by a sequence of planes. I think it would also be reasonable to insist that the first segment - // of a shape overlap or adjoin the previous shape. + // In any case, if we specify shapes by a sequence of planes, we should stipulate that multiple + // sequences are allowed, provided they progressively tile an area of the sphere that is + // connected and sequential. + // For example, paths do alternating rectangles and circles, in sequence. Each sequence member + // is described by a sequence of planes. I think it would also be reasonable to insist that + // the first segment of a shape overlap or adjoin the previous shape. // - // Here's a way to think about it that might help: Traversing every edge should grow the longitude bounds - // in the direction of the traversal. So if the traversal is always known to be less than PI in total longitude - // angle, then it is possible to use the endpoints to determine the unambiguous extension of the envelope. - // For example, say you are currently at longitude -0.5. The next point is at longitude PI-0.1. You could say - // that the difference in longitude going one way around would be beter than the distance the other way - // around, and therefore the longitude envelope should be extended accordingly. But in practice, when an - // edge goes near a pole and may be inclined as well, the longer longitude change might be the right path, even - // if the arc length is short. So this too doesn't work. + // Here's a way to think about it that might help: Traversing every edge should grow the longitude + // bounds in the direction of the traversal. So if the traversal is always known to be less than + // PI in total longitude angle, then it is possible to use the endpoints to determine the + // unambiguous extension of the envelope. + // For example, say you are currently at longitude -0.5. The next point is at longitude PI-0.1. + // You could say that the difference in longitude going one way around would be better than the + // distance the other way around, and therefore the longitude envelope should be extended + // accordingly. But in practice, when an edge goes near a pole and may be inclined as well, + // the longer longitude change might be the right path, even if the arc length is short. So this + // too doesn't work. // - // Given we have a hard time making an exact match, here's the current proposal. The proposal is a - // heuristic, based on the idea that most areas are small compared to the circumference of the globe. - // We keep track of the last point we saw, and take each point as it arrives, and compute its longitude. - // Then, we have a choice as to which way to expand the envelope: we can expand by going to the left or - // to the right. We choose the direction with the least longitude difference. (If we aren't sure, - // and can recognize that, we can set "unconstrained in longitude".) + // Given we have a hard time making an exact match, here's the current proposal. The proposal is + // a heuristic, based on the idea that most areas are small compared to the circumference of the + // globe. We keep track of the last point we saw, and take each point as it arrives, and compute + // its longitude. + // Then, we have a choice as to which way to expand the envelope: we can expand by going to the + // left or to the right. We choose the direction with the least longitude difference. (If we + // aren't sure, and can recognize that, we can set "unconstrained in longitude".) /** If non-null, the left longitude bound */ private Double leftLongitude = null; @@ -93,86 +98,102 @@ public class LatLonBounds implements Bounds { private Double rightLongitude = null; /** Construct an empty bounds object */ - public LatLonBounds() { - } + public LatLonBounds() {} // Accessor methods - - /** Get maximum latitude, if any. - *@return maximum latitude or null. + + /** + * Get maximum latitude, if any. + * + * @return maximum latitude or null. */ public Double getMaxLatitude() { return maxLatitude; } - /** Get minimum latitude, if any. - *@return minimum latitude or null. + /** + * Get minimum latitude, if any. + * + * @return minimum latitude or null. */ public Double getMinLatitude() { return minLatitude; } - /** Get left longitude, if any. - *@return left longitude, or null. + /** + * Get left longitude, if any. + * + * @return left longitude, or null. */ public Double getLeftLongitude() { return leftLongitude; } - /** Get right longitude, if any. - *@return right longitude, or null. + /** + * Get right longitude, if any. + * + * @return right longitude, or null. */ public Double getRightLongitude() { return rightLongitude; } // Degenerate case check - - /** Check if there's no longitude bound. - *@return true if no longitude bound. + + /** + * Check if there's no longitude bound. + * + * @return true if no longitude bound. */ public boolean checkNoLongitudeBound() { return noLongitudeBound; } - /** Check if there's no top latitude bound. - *@return true if no top latitude bound. + /** + * Check if there's no top latitude bound. + * + * @return true if no top latitude bound. */ public boolean checkNoTopLatitudeBound() { return noTopLatitudeBound; } - /** Check if there's no bottom latitude bound. - *@return true if no bottom latitude bound. + /** + * Check if there's no bottom latitude bound. + * + * @return true if no bottom latitude bound. */ public boolean checkNoBottomLatitudeBound() { return noBottomLatitudeBound; } // Modification methods - + @Override - public Bounds addPlane(final PlanetModel planetModel, final Plane plane, final Membership... bounds) { + public Bounds addPlane( + final PlanetModel planetModel, final Plane plane, final Membership... bounds) { plane.recordBounds(planetModel, this, bounds); return this; } @Override - public Bounds addHorizontalPlane(final PlanetModel planetModel, - final double latitude, - final Plane horizontalPlane, - final Membership... bounds) { + public Bounds addHorizontalPlane( + final PlanetModel planetModel, + final double latitude, + final Plane horizontalPlane, + final Membership... bounds) { if (!noTopLatitudeBound || !noBottomLatitudeBound) { addLatitudeBound(latitude); } return this; } - + @Override - public Bounds addVerticalPlane(final PlanetModel planetModel, - final double longitude, - final Plane verticalPlane, - final Membership... bounds) { + public Bounds addVerticalPlane( + final PlanetModel planetModel, + final double longitude, + final Plane verticalPlane, + final Membership... bounds) { if (!noLongitudeBound) { addLongitudeBound(longitude); } @@ -213,7 +234,11 @@ public class LatLonBounds implements Bounds { } @Override - public Bounds addIntersection(final PlanetModel planetModel, final Plane plane1, final Plane plane2, final Membership... bounds) { + public Bounds addIntersection( + final PlanetModel planetModel, + final Plane plane1, + final Plane plane2, + final Membership... bounds) { plane1.recordBounds(planetModel, this, plane2, bounds); return this; } @@ -230,7 +255,7 @@ public class LatLonBounds implements Bounds { } return this; } - + @Override public Bounds noLongitudeBound() { noLongitudeBound = true; @@ -257,21 +282,27 @@ public class LatLonBounds implements Bounds { public Bounds noBound(final PlanetModel planetModel) { return noLongitudeBound().noTopLatitudeBound().noBottomLatitudeBound(); } - + // Protected methods - - /** Update latitude bound. - *@param latitude is the latitude. + + /** + * Update latitude bound. + * + * @param latitude is the latitude. */ private void addLatitudeBound(double latitude) { - if (!noTopLatitudeBound && (maxLatitude == null || latitude > maxLatitude)) + if (!noTopLatitudeBound && (maxLatitude == null || latitude > maxLatitude)) { maxLatitude = latitude; - if (!noBottomLatitudeBound && (minLatitude == null || latitude < minLatitude)) + } + if (!noBottomLatitudeBound && (minLatitude == null || latitude < minLatitude)) { minLatitude = latitude; + } } - /** Update longitude bound. - *@param longitude is the new longitude value. + /** + * Update longitude bound. + * + * @param longitude is the new longitude value. */ private void addLongitudeBound(double longitude) { // If this point is within the current bounds, we're done; otherwise @@ -280,16 +311,18 @@ public class LatLonBounds implements Bounds { leftLongitude = longitude; rightLongitude = longitude; } else { - // Compute whether we're to the right of the left value. But the left value may be greater than - // the right value. + // Compute whether we're to the right of the left value. But the left value may be greater + // than the right value. double currentLeftLongitude = leftLongitude; double currentRightLongitude = rightLongitude; - if (currentRightLongitude < currentLeftLongitude) + if (currentRightLongitude < currentLeftLongitude) { currentRightLongitude += 2.0 * Math.PI; + } // We have a range to look at that's going in the right way. // Now, do the same trick with the computed longitude. - if (longitude < currentLeftLongitude) + if (longitude < currentLeftLongitude) { longitude += 2.0 * Math.PI; + } if (longitude < currentLeftLongitude || longitude > currentRightLongitude) { // Outside of current bounds. Consider carefully how we'll expand. @@ -321,13 +354,13 @@ public class LatLonBounds implements Bounds { } } double testRightLongitude = rightLongitude; - if (testRightLongitude < leftLongitude) + if (testRightLongitude < leftLongitude) { testRightLongitude += Math.PI * 2.0; + } if (testRightLongitude - leftLongitude >= Math.PI) { noLongitudeBound = true; leftLongitude = null; rightLongitude = null; } } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LinearDistance.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LinearDistance.java index f301f49442d..b204548fdce 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LinearDistance.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LinearDistance.java @@ -22,50 +22,61 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class LinearDistance implements DistanceStyle { - + /** A convenient instance */ - public final static LinearDistance INSTANCE = new LinearDistance(); - - /** Constructor. - */ - public LinearDistance() { - } - + public static final LinearDistance INSTANCE = new LinearDistance(); + + /** Constructor. */ + public LinearDistance() {} + @Override public double computeDistance(final GeoPoint point1, final GeoPoint point2) { return point1.linearDistance(point2); } - + @Override - public double computeDistance(final GeoPoint point1, final double x2, final double y2, final double z2) { - return point1.linearDistance(x2,y2,z2); + public double computeDistance( + final GeoPoint point1, final double x2, final double y2, final double z2) { + return point1.linearDistance(x2, y2, z2); } @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final GeoPoint point, final Membership... bounds) { + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final GeoPoint point, + final Membership... bounds) { return plane.linearDistance(planetModel, point, bounds); } - + @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final double x, final double y, final double z, final Membership... bounds) { - return plane.linearDistance(planetModel, x,y,z, bounds); + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final double x, + final double y, + final double z, + final Membership... bounds) { + return plane.linearDistance(planetModel, x, y, z, bounds); } @Override - public GeoPoint[] findDistancePoints(final PlanetModel planetModel, final double distanceValue, final GeoPoint startPoint, final Plane plane, final Membership... bounds) { + public GeoPoint[] findDistancePoints( + final PlanetModel planetModel, + final double distanceValue, + final GeoPoint startPoint, + final Plane plane, + final Membership... bounds) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - + @Override public double findMinimumArcDistance(final PlanetModel planetModel, final double distanceValue) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - + @Override public double findMaximumArcDistance(final PlanetModel planetModel, final double distanceValue) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LinearSquaredDistance.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LinearSquaredDistance.java index 12ef926c60d..3996fb6e2c6 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LinearSquaredDistance.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/LinearSquaredDistance.java @@ -22,33 +22,42 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class LinearSquaredDistance implements DistanceStyle { - + /** A convenient instance */ - public final static LinearSquaredDistance INSTANCE = new LinearSquaredDistance(); - - /** Constructor. - */ - public LinearSquaredDistance() { - } - + public static final LinearSquaredDistance INSTANCE = new LinearSquaredDistance(); + + /** Constructor. */ + public LinearSquaredDistance() {} + @Override public double computeDistance(final GeoPoint point1, final GeoPoint point2) { return point1.linearDistanceSquared(point2); } - + @Override - public double computeDistance(final GeoPoint point1, final double x2, final double y2, final double z2) { - return point1.linearDistanceSquared(x2,y2,z2); + public double computeDistance( + final GeoPoint point1, final double x2, final double y2, final double z2) { + return point1.linearDistanceSquared(x2, y2, z2); } @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final GeoPoint point, final Membership... bounds) { + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final GeoPoint point, + final Membership... bounds) { return plane.linearDistanceSquared(planetModel, point, bounds); } - + @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final double x, final double y, final double z, final Membership... bounds) { - return plane.linearDistanceSquared(planetModel, x,y,z, bounds); + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final double x, + final double y, + final double z, + final Membership... bounds) { + return plane.linearDistanceSquared(planetModel, x, y, z, bounds); } @Override @@ -62,20 +71,22 @@ public class LinearSquaredDistance implements DistanceStyle { } @Override - public GeoPoint[] findDistancePoints(final PlanetModel planetModel, final double distanceValue, final GeoPoint startPoint, final Plane plane, final Membership... bounds) { + public GeoPoint[] findDistancePoints( + final PlanetModel planetModel, + final double distanceValue, + final GeoPoint startPoint, + final Plane plane, + final Membership... bounds) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - + @Override public double findMinimumArcDistance(final PlanetModel planetModel, final double distanceValue) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - + @Override public double findMaximumArcDistance(final PlanetModel planetModel, final double distanceValue) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Membership.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Membership.java index 0cf6ff0edd7..5d88950ec59 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Membership.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Membership.java @@ -42,5 +42,4 @@ public interface Membership { * @return true if the point is within this shape */ public boolean isWithin(final double x, final double y, final double z); - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/NormalDistance.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/NormalDistance.java index 64fd20bfa5f..7f38c07f005 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/NormalDistance.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/NormalDistance.java @@ -22,50 +22,61 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class NormalDistance implements DistanceStyle { - + /** A convenient instance */ - public final static NormalDistance INSTANCE = new NormalDistance(); - - /** Constructor. - */ - public NormalDistance() { - } - + public static final NormalDistance INSTANCE = new NormalDistance(); + + /** Constructor. */ + public NormalDistance() {} + @Override public double computeDistance(final GeoPoint point1, final GeoPoint point2) { return point1.normalDistance(point2); } - + @Override - public double computeDistance(final GeoPoint point1, final double x2, final double y2, final double z2) { - return point1.normalDistance(x2,y2,z2); + public double computeDistance( + final GeoPoint point1, final double x2, final double y2, final double z2) { + return point1.normalDistance(x2, y2, z2); } @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final GeoPoint point, final Membership... bounds) { + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final GeoPoint point, + final Membership... bounds) { return plane.normalDistance(point, bounds); } - + @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final double x, final double y, final double z, final Membership... bounds) { - return plane.normalDistance(x,y,z, bounds); + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final double x, + final double y, + final double z, + final Membership... bounds) { + return plane.normalDistance(x, y, z, bounds); } @Override - public GeoPoint[] findDistancePoints(final PlanetModel planetModel, final double distanceValue, final GeoPoint startPoint, final Plane plane, final Membership... bounds) { + public GeoPoint[] findDistancePoints( + final PlanetModel planetModel, + final double distanceValue, + final GeoPoint startPoint, + final Plane plane, + final Membership... bounds) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - + @Override public double findMinimumArcDistance(final PlanetModel planetModel, final double distanceValue) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - + @Override public double findMaximumArcDistance(final PlanetModel planetModel, final double distanceValue) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/NormalSquaredDistance.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/NormalSquaredDistance.java index 25b467c87db..aebdcf045f4 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/NormalSquaredDistance.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/NormalSquaredDistance.java @@ -22,33 +22,42 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class NormalSquaredDistance implements DistanceStyle { - + /** A convenient instance */ - public final static NormalSquaredDistance INSTANCE = new NormalSquaredDistance(); - - /** Constructor. - */ - public NormalSquaredDistance() { - } - + public static final NormalSquaredDistance INSTANCE = new NormalSquaredDistance(); + + /** Constructor. */ + public NormalSquaredDistance() {} + @Override public double computeDistance(final GeoPoint point1, final GeoPoint point2) { return point1.normalDistanceSquared(point2); } - + @Override - public double computeDistance(final GeoPoint point1, final double x2, final double y2, final double z2) { - return point1.normalDistanceSquared(x2,y2,z2); + public double computeDistance( + final GeoPoint point1, final double x2, final double y2, final double z2) { + return point1.normalDistanceSquared(x2, y2, z2); } @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final GeoPoint point, final Membership... bounds) { + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final GeoPoint point, + final Membership... bounds) { return plane.normalDistanceSquared(point, bounds); } - + @Override - public double computeDistance(final PlanetModel planetModel, final Plane plane, final double x, final double y, final double z, final Membership... bounds) { - return plane.normalDistanceSquared(x,y,z, bounds); + public double computeDistance( + final PlanetModel planetModel, + final Plane plane, + final double x, + final double y, + final double z, + final Membership... bounds) { + return plane.normalDistanceSquared(x, y, z, bounds); } @Override @@ -62,20 +71,22 @@ public class NormalSquaredDistance implements DistanceStyle { } @Override - public GeoPoint[] findDistancePoints(final PlanetModel planetModel, final double distanceValue, final GeoPoint startPoint, final Plane plane, final Membership... bounds) { + public GeoPoint[] findDistancePoints( + final PlanetModel planetModel, + final double distanceValue, + final GeoPoint startPoint, + final Plane plane, + final Membership... bounds) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - + @Override public double findMinimumArcDistance(final PlanetModel planetModel, final double distanceValue) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - + @Override public double findMaximumArcDistance(final PlanetModel planetModel, final double distanceValue) { throw new IllegalStateException("Reverse mapping not implemented for this distance metric"); } - } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java index 836feca92c8..2c4aa0c520c 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java @@ -17,32 +17,34 @@ package org.apache.lucene.spatial3d.geom; /** - * We know about three kinds of planes. First kind: general plain through two points and origin - * Second kind: horizontal plane at specified height. Third kind: vertical plane with specified x and y value, through origin. + * We know about three kinds of planes. First kind: general plain through two points and origin + * Second kind: horizontal plane at specified height. Third kind: vertical plane with specified x + * and y value, through origin. * * @lucene.experimental */ public class Plane extends Vector { /** An array with no points in it */ - public final static GeoPoint[] NO_POINTS = new GeoPoint[0]; + public static final GeoPoint[] NO_POINTS = new GeoPoint[0]; /** An array with no bounds in it */ - public final static Membership[] NO_BOUNDS = new Membership[0]; + public static final Membership[] NO_BOUNDS = new Membership[0]; /** A vertical plane normal to the Y axis */ - public final static Plane normalYPlane = new Plane(0.0,1.0,0.0,0.0); + public static final Plane normalYPlane = new Plane(0.0, 1.0, 0.0, 0.0); /** A vertical plane normal to the X axis */ - public final static Plane normalXPlane = new Plane(1.0,0.0,0.0,0.0); + public static final Plane normalXPlane = new Plane(1.0, 0.0, 0.0, 0.0); /** A vertical plane normal to the Z axis */ - public final static Plane normalZPlane = new Plane(0.0,0.0,1.0,0.0); + public static final Plane normalZPlane = new Plane(0.0, 0.0, 1.0, 0.0); /** Ax + By + Cz + D = 0 */ public final double D; /** * Construct a plane with all four coefficients defined. - *@param A is A - *@param B is B - *@param C is C - *@param D is D + * + * @param A is A + * @param B is B + * @param C is C + * @param D is D */ public Plane(final double A, final double B, final double C, final double D) { super(A, B, C); @@ -85,8 +87,7 @@ public class Plane extends Vector { } /** - * Construct a vertical plane through a specified - * x, y and origin. + * Construct a vertical plane through a specified x, y and origin. * * @param x is the specified x value. * @param y is the specified y value. @@ -97,8 +98,8 @@ public class Plane extends Vector { } /** - * Construct a plane with a specific vector, and D offset - * from origin. + * Construct a plane with a specific vector, and D offset from origin. + * * @param v is the normal vector. * @param D is the D offset from the origin. */ @@ -107,19 +108,30 @@ public class Plane extends Vector { this.D = D; } - /** Construct a plane that is parallel to the one provided, but which is just barely numerically + /** + * Construct a plane that is parallel to the one provided, but which is just barely numerically * distinguishable from it, in the direction desired. + * * @param basePlane is the starting plane. - * @param above is set to true if the desired plane is in the positive direction from the base plane, - * or false in the negative direction. + * @param above is set to true if the desired plane is in the positive direction from the base + * plane, or false in the negative direction. */ public Plane(final Plane basePlane, final boolean above) { - this(basePlane.x, basePlane.y, basePlane.z, above?Math.nextUp(basePlane.D + MINIMUM_RESOLUTION):Math.nextDown(basePlane.D - MINIMUM_RESOLUTION)); + this( + basePlane.x, + basePlane.y, + basePlane.z, + above + ? Math.nextUp(basePlane.D + MINIMUM_RESOLUTION) + : Math.nextDown(basePlane.D - MINIMUM_RESOLUTION)); } - - /** Construct the most accurate normalized plane through an x-y point and including the Z axis. - * If none of the points can determine the plane, return null. - * @param planePoints is a set of points to choose from. The best one for constructing the most precise plane is picked. + + /** + * Construct the most accurate normalized plane through an x-y point and including the Z axis. If + * none of the points can determine the plane, return null. + * + * @param planePoints is a set of points to choose from. The best one for constructing the most + * precise plane is picked. * @return the plane */ public static Plane constructNormalizedZPlane(final Vector... planePoints) { @@ -136,9 +148,12 @@ public class Plane extends Vector { return constructNormalizedZPlane(bestPoint.x, bestPoint.y); } - /** Construct the most accurate normalized plane through an x-z point and including the Y axis. - * If none of the points can determine the plane, return null. - * @param planePoints is a set of points to choose from. The best one for constructing the most precise plane is picked. + /** + * Construct the most accurate normalized plane through an x-z point and including the Y axis. If + * none of the points can determine the plane, return null. + * + * @param planePoints is a set of points to choose from. The best one for constructing the most + * precise plane is picked. * @return the plane */ public static Plane constructNormalizedYPlane(final Vector... planePoints) { @@ -155,9 +170,12 @@ public class Plane extends Vector { return constructNormalizedYPlane(bestPoint.x, bestPoint.z, 0.0); } - /** Construct the most accurate normalized plane through an y-z point and including the X axis. - * If none of the points can determine the plane, return null. - * @param planePoints is a set of points to choose from. The best one for constructing the most precise plane is picked. + /** + * Construct the most accurate normalized plane through an y-z point and including the X axis. If + * none of the points can determine the plane, return null. + * + * @param planePoints is a set of points to choose from. The best one for constructing the most + * precise plane is picked. * @return the plane */ public static Plane constructNormalizedXPlane(final Vector... planePoints) { @@ -174,50 +192,54 @@ public class Plane extends Vector { return constructNormalizedXPlane(bestPoint.y, bestPoint.z, 0.0); } - /** Construct a normalized plane through an x-y point and including the Z axis. - * If the x-y point is at (0,0), return null. + /** + * Construct a normalized plane through an x-y point and including the Z axis. If the x-y point is + * at (0,0), return null. + * * @param x is the x value. * @param y is the y value. * @return a plane passing through the Z axis and (x,y,0). */ public static Plane constructNormalizedZPlane(final double x, final double y) { - if (Math.abs(x) < MINIMUM_RESOLUTION && Math.abs(y) < MINIMUM_RESOLUTION) - return null; - final double denom = 1.0 / Math.sqrt(x*x + y*y); + if (Math.abs(x) < MINIMUM_RESOLUTION && Math.abs(y) < MINIMUM_RESOLUTION) return null; + final double denom = 1.0 / Math.sqrt(x * x + y * y); return new Plane(y * denom, -x * denom, 0.0, 0.0); } - /** Construct a normalized plane through an x-z point and parallel to the Y axis. - * If the x-z point is at (0,0), return null. + /** + * Construct a normalized plane through an x-z point and parallel to the Y axis. If the x-z point + * is at (0,0), return null. + * * @param x is the x value. * @param z is the z value. * @param DValue is the offset from the origin for the plane. * @return a plane parallel to the Y axis and perpendicular to the x and z values given. */ - public static Plane constructNormalizedYPlane(final double x, final double z, final double DValue) { - if (Math.abs(x) < MINIMUM_RESOLUTION && Math.abs(z) < MINIMUM_RESOLUTION) - return null; - final double denom = 1.0 / Math.sqrt(x*x + z*z); + public static Plane constructNormalizedYPlane( + final double x, final double z, final double DValue) { + if (Math.abs(x) < MINIMUM_RESOLUTION && Math.abs(z) < MINIMUM_RESOLUTION) return null; + final double denom = 1.0 / Math.sqrt(x * x + z * z); return new Plane(z * denom, 0.0, -x * denom, DValue); } - /** Construct a normalized plane through a y-z point and parallel to the X axis. - * If the y-z point is at (0,0), return null. + /** + * Construct a normalized plane through a y-z point and parallel to the X axis. If the y-z point + * is at (0,0), return null. + * * @param y is the y value. * @param z is the z value. * @param DValue is the offset from the origin for the plane. * @return a plane parallel to the X axis and perpendicular to the y and z values given. */ - public static Plane constructNormalizedXPlane(final double y, final double z, final double DValue) { - if (Math.abs(y) < MINIMUM_RESOLUTION && Math.abs(z) < MINIMUM_RESOLUTION) - return null; - final double denom = 1.0 / Math.sqrt(y*y + z*z); + public static Plane constructNormalizedXPlane( + final double y, final double z, final double DValue) { + if (Math.abs(y) < MINIMUM_RESOLUTION && Math.abs(z) < MINIMUM_RESOLUTION) return null; + final double denom = 1.0 / Math.sqrt(y * y + z * z); return new Plane(0.0, z * denom, -y * denom, DValue); } /** - * Evaluate the plane equation for a given point, as represented - * by a vector. + * Evaluate the plane equation for a given point, as represented by a vector. * * @param v is the vector. * @return the result of the evaluation. @@ -227,8 +249,8 @@ public class Plane extends Vector { } /** - * Evaluate the plane equation for a given point, as represented - * by a vector. + * Evaluate the plane equation for a given point, as represented by a vector. + * * @param x is the x value. * @param y is the y value. * @param z is the z value. @@ -239,8 +261,7 @@ public class Plane extends Vector { } /** - * Evaluate the plane equation for a given point, as represented - * by a vector. + * Evaluate the plane equation for a given point, as represented by a vector. * * @param v is the vector. * @return true if the result is on the plane. @@ -250,8 +271,7 @@ public class Plane extends Vector { } /** - * Evaluate the plane equation for a given point, as represented - * by a vector. + * Evaluate the plane equation for a given point, as represented by a vector. * * @param x is the x value. * @param y is the y value. @@ -269,19 +289,23 @@ public class Plane extends Vector { */ public Plane normalize() { Vector normVect = super.normalize(); - if (normVect == null) - return null; + if (normVect == null) return null; return new Plane(normVect, this.D); } - /** Compute arc distance from plane to a vector expressed with a {@link GeoPoint}. - * @see #arcDistance(PlanetModel, double, double, double, Membership...) */ - public double arcDistance(final PlanetModel planetModel, final GeoPoint v, final Membership... bounds) { + /** + * Compute arc distance from plane to a vector expressed with a {@link GeoPoint}. + * + * @see #arcDistance(PlanetModel, double, double, double, Membership...) + */ + public double arcDistance( + final PlanetModel planetModel, final GeoPoint v, final Membership... bounds) { return arcDistance(planetModel, v.x, v.y, v.z, bounds); } - + /** * Compute arc distance from plane to a vector. + * * @param planetModel is the planet model. * @param x is the x vector value. * @param y is the y vector value. @@ -289,40 +313,47 @@ public class Plane extends Vector { * @param bounds are the bounds which constrain the intersection point. * @return the arc distance. */ - public double arcDistance(final PlanetModel planetModel, final double x, final double y, final double z, final Membership... bounds) { + public double arcDistance( + final PlanetModel planetModel, + final double x, + final double y, + final double z, + final Membership... bounds) { - if (evaluateIsZero(x,y,z)) { - if (meetsAllBounds(x,y,z, bounds)) - return 0.0; + if (evaluateIsZero(x, y, z)) { + if (meetsAllBounds(x, y, z, bounds)) return 0.0; return Double.POSITIVE_INFINITY; } - - // First, compute the perpendicular plane. - final Plane perpPlane = new Plane(this.y * z - this.z * y, this.z * x - this.x * z, this.x * y - this.y * x, 0.0); - // We need to compute the intersection of two planes on the geo surface: this one, and its perpendicular. - // Then, we need to choose which of the two points we want to compute the distance to. We pick the + // First, compute the perpendicular plane. + final Plane perpPlane = + new Plane(this.y * z - this.z * y, this.z * x - this.x * z, this.x * y - this.y * x, 0.0); + + // We need to compute the intersection of two planes on the geo surface: this one, and its + // perpendicular. + // Then, we need to choose which of the two points we want to compute the distance to. We pick + // the // shorter distance always. - + final GeoPoint[] intersectionPoints = findIntersections(planetModel, perpPlane); - + // For each point, compute a linear distance, and take the minimum of them double minDistance = Double.POSITIVE_INFINITY; - + for (final GeoPoint intersectionPoint : intersectionPoints) { if (meetsAllBounds(intersectionPoint, bounds)) { - final double theDistance = intersectionPoint.arcDistance(x,y,z); + final double theDistance = intersectionPoint.arcDistance(x, y, z); if (theDistance < minDistance) { minDistance = theDistance; } } } return minDistance; - } /** * Compute normal distance from plane to a vector. + * * @param v is the vector. * @param bounds are the bounds which constrain the intersection point. * @return the normal distance. @@ -330,18 +361,20 @@ public class Plane extends Vector { public double normalDistance(final Vector v, final Membership... bounds) { return normalDistance(v.x, v.y, v.z, bounds); } - + /** * Compute normal distance from plane to a vector. + * * @param x is the vector x. * @param y is the vector y. * @param z is the vector z. * @param bounds are the bounds which constrain the intersection point. * @return the normal distance. */ - public double normalDistance(final double x, final double y, final double z, final Membership... bounds) { + public double normalDistance( + final double x, final double y, final double z, final Membership... bounds) { - final double dist = evaluate(x,y,z); + final double dist = evaluate(x, y, z); final double perpX = x - dist * this.x; final double perpY = y - dist * this.y; final double perpZ = z - dist * this.z; @@ -349,12 +382,13 @@ public class Plane extends Vector { if (!meetsAllBounds(perpX, perpY, perpZ, bounds)) { return Double.POSITIVE_INFINITY; } - + return Math.abs(dist); } - + /** * Compute normal distance squared from plane to a vector. + * * @param v is the vector. * @param bounds are the bounds which constrain the intersection point. * @return the normal distance squared. @@ -362,39 +396,43 @@ public class Plane extends Vector { public double normalDistanceSquared(final Vector v, final Membership... bounds) { return normalDistanceSquared(v.x, v.y, v.z, bounds); } - + /** * Compute normal distance squared from plane to a vector. + * * @param x is the vector x. * @param y is the vector y. * @param z is the vector z. * @param bounds are the bounds which constrain the intersection point. * @return the normal distance squared. */ - public double normalDistanceSquared(final double x, final double y, final double z, final Membership... bounds) { - final double normal = normalDistance(x,y,z,bounds); - if (normal == Double.POSITIVE_INFINITY) + public double normalDistanceSquared( + final double x, final double y, final double z, final Membership... bounds) { + final double normal = normalDistance(x, y, z, bounds); + if (normal == Double.POSITIVE_INFINITY) { return normal; + } return normal * normal; } /** - * Compute linear distance from plane to a vector. This is defined - * as the distance from the given point to the nearest intersection of - * this plane with the planet surface. + * Compute linear distance from plane to a vector. This is defined as the distance from the given + * point to the nearest intersection of this plane with the planet surface. + * * @param planetModel is the planet model. * @param v is the point. * @param bounds are the bounds which constrain the intersection point. * @return the linear distance. */ - public double linearDistance(final PlanetModel planetModel, final GeoPoint v, final Membership... bounds) { + public double linearDistance( + final PlanetModel planetModel, final GeoPoint v, final Membership... bounds) { return linearDistance(planetModel, v.x, v.y, v.z, bounds); } - + /** - * Compute linear distance from plane to a vector. This is defined - * as the distance from the given point to the nearest intersection of - * this plane with the planet surface. + * Compute linear distance from plane to a vector. This is defined as the distance from the given + * point to the nearest intersection of this plane with the planet surface. + * * @param planetModel is the planet model. * @param x is the vector x. * @param y is the vector y. @@ -402,28 +440,35 @@ public class Plane extends Vector { * @param bounds are the bounds which constrain the intersection point. * @return the linear distance. */ - public double linearDistance(final PlanetModel planetModel, final double x, final double y, final double z, final Membership... bounds) { - if (evaluateIsZero(x,y,z)) { - if (meetsAllBounds(x,y,z, bounds)) + public double linearDistance( + final PlanetModel planetModel, + final double x, + final double y, + final double z, + final Membership... bounds) { + if (evaluateIsZero(x, y, z)) { + if (meetsAllBounds(x, y, z, bounds)) { return 0.0; + } return Double.POSITIVE_INFINITY; } - - // First, compute the perpendicular plane. - final Plane perpPlane = new Plane(this.y * z - this.z * y, this.z * x - this.x * z, this.x * y - this.y * x, 0.0); - // We need to compute the intersection of two planes on the geo surface: this one, and its perpendicular. - // Then, we need to choose which of the two points we want to compute the distance to. We pick the - // shorter distance always. - + // First, compute the perpendicular plane. + final Plane perpPlane = + new Plane(this.y * z - this.z * y, this.z * x - this.x * z, this.x * y - this.y * x, 0.0); + + // We need to compute the intersection of two planes on the geo surface: this one, and its + // perpendicular. Then, we need to choose which of the two points we want to compute the + // distance to. We pick the shorter distance always. + final GeoPoint[] intersectionPoints = findIntersections(planetModel, perpPlane); - + // For each point, compute a linear distance, and take the minimum of them double minDistance = Double.POSITIVE_INFINITY; - + for (final GeoPoint intersectionPoint : intersectionPoints) { if (meetsAllBounds(intersectionPoint, bounds)) { - final double theDistance = intersectionPoint.linearDistance(x,y,z); + final double theDistance = intersectionPoint.linearDistance(x, y, z); if (theDistance < minDistance) { minDistance = theDistance; } @@ -431,24 +476,25 @@ public class Plane extends Vector { } return minDistance; } - + /** - * Compute linear distance squared from plane to a vector. This is defined - * as the distance from the given point to the nearest intersection of - * this plane with the planet surface. + * Compute linear distance squared from plane to a vector. This is defined as the distance from + * the given point to the nearest intersection of this plane with the planet surface. + * * @param planetModel is the planet model. * @param v is the point. * @param bounds are the bounds which constrain the intersection point. * @return the linear distance squared. */ - public double linearDistanceSquared(final PlanetModel planetModel, final GeoPoint v, final Membership... bounds) { + public double linearDistanceSquared( + final PlanetModel planetModel, final GeoPoint v, final Membership... bounds) { return linearDistanceSquared(planetModel, v.x, v.y, v.z, bounds); } - + /** - * Compute linear distance squared from plane to a vector. This is defined - * as the distance from the given point to the nearest intersection of - * this plane with the planet surface. + * Compute linear distance squared from plane to a vector. This is defined as the distance from + * the given point to the nearest intersection of this plane with the planet surface. + * * @param planetModel is the planet model. * @param x is the vector x. * @param y is the vector y. @@ -456,22 +502,32 @@ public class Plane extends Vector { * @param bounds are the bounds which constrain the intersection point. * @return the linear distance squared. */ - public double linearDistanceSquared(final PlanetModel planetModel, final double x, final double y, final double z, final Membership... bounds) { + public double linearDistanceSquared( + final PlanetModel planetModel, + final double x, + final double y, + final double z, + final Membership... bounds) { final double linearDistance = linearDistance(planetModel, x, y, z, bounds); return linearDistance * linearDistance; } /** - * Find points on the boundary of the intersection of a plane and the unit sphere, - * given a starting point, and ending point, and a list of proportions of the arc (e.g. 0.25, 0.5, 0.75). + * Find points on the boundary of the intersection of a plane and the unit sphere, given a + * starting point, and ending point, and a list of proportions of the arc (e.g. 0.25, 0.5, 0.75). * The angle between the starting point and ending point is assumed to be less than pi. + * * @param planetModel is the planet model. * @param start is the start point. * @param end is the end point. * @param proportions is an array of fractional proportions measured between start and end. * @return an array of points corresponding to the proportions passed in. */ - public GeoPoint[] interpolate(final PlanetModel planetModel, final GeoPoint start, final GeoPoint end, final double[] proportions) { + public GeoPoint[] interpolate( + final PlanetModel planetModel, + final GeoPoint start, + final GeoPoint end, + final double[] proportions) { // Steps: // (1) Translate (x0,y0,z0) of endpoints into origin-centered place: // x1 = x0 + D*A @@ -494,7 +550,8 @@ public class Plane extends Vector { // z3 = x2 sin ha + z2 cos ha // At this point, z3 should be zero. // Faster: - // sin(ha) = cos(asin(C/sqrt(A^2+B^2+C^2))) = sqrt(1 - C^2/(A^2+B^2+C^2)) = sqrt(A^2+B^2)/sqrt(A^2+B^2+C^2) + // sin(ha) = cos(asin(C/sqrt(A^2+B^2+C^2))) = sqrt(1 - C^2/(A^2+B^2+C^2)) = + // sqrt(A^2+B^2)/sqrt(A^2+B^2+C^2) // cos(ha) = sin(asin(C/sqrt(A^2+B^2+C^2))) = C/sqrt(A^2+B^2+C^2) // (4) Compute interpolations by getting longitudes of original points // la = atan2(y3,x3) @@ -549,7 +606,8 @@ public class Plane extends Vector { sinRA = 0.0; } - // sin(ha) = cos(asin(C/sqrt(A^2+B^2+C^2))) = sqrt(1 - C^2/(A^2+B^2+C^2)) = sqrt(A^2+B^2)/sqrt(A^2+B^2+C^2) + // sin(ha) = cos(asin(C/sqrt(A^2+B^2+C^2))) = sqrt(1 - C^2/(A^2+B^2+C^2)) = + // sqrt(A^2+B^2)/sqrt(A^2+B^2+C^2) // cos(ha) = sin(asin(C/sqrt(A^2+B^2+C^2))) = C/sqrt(A^2+B^2+C^2) sinHA = xyMagnitude; cosHA = C; @@ -572,7 +630,8 @@ public class Plane extends Vector { final double startAngle = Math.atan2(modifiedStart.y, modifiedStart.x); final double endAngle = Math.atan2(modifiedEnd.y, modifiedEnd.x); - final double startMagnitude = Math.sqrt(modifiedStart.x * modifiedStart.x + modifiedStart.y * modifiedStart.y); + final double startMagnitude = + Math.sqrt(modifiedStart.x * modifiedStart.x + modifiedStart.y * modifiedStart.y); double delta; double newEndAngle = endAngle; @@ -595,8 +654,10 @@ public class Plane extends Vector { final double newAngle = startAngle + proportions[i] * delta; final double sinNewAngle = Math.sin(newAngle); final double cosNewAngle = Math.cos(newAngle); - final Vector newVector = new Vector(cosNewAngle * startMagnitude, sinNewAngle * startMagnitude, 0.0); - returnValues[i] = reverseModify(planetModel, newVector, transX, transY, transZ, sinRA, cosRA, sinHA, cosHA); + final Vector newVector = + new Vector(cosNewAngle * startMagnitude, sinNewAngle * startMagnitude, 0.0); + returnValues[i] = + reverseModify(planetModel, newVector, transX, transY, transZ, sinRA, cosRA, sinHA, cosHA); } return returnValues; @@ -604,6 +665,7 @@ public class Plane extends Vector { /** * Modify a point to produce a vector in translated/rotated space. + * * @param start is the start point. * @param transX is the translation x value. * @param transY is the translation y value. @@ -614,13 +676,21 @@ public class Plane extends Vector { * @param cosHA is the cosine of the height angle. * @return the modified point. */ - protected static Vector modify(final GeoPoint start, final double transX, final double transY, final double transZ, - final double sinRA, final double cosRA, final double sinHA, final double cosHA) { + protected static Vector modify( + final GeoPoint start, + final double transX, + final double transY, + final double transZ, + final double sinRA, + final double cosRA, + final double sinHA, + final double cosHA) { return start.translate(transX, transY, transZ).rotateXY(sinRA, cosRA).rotateXZ(sinHA, cosHA); } /** * Reverse modify a point to produce a GeoPoint in normal space. + * * @param planetModel is the planet model. * @param point is the translated point. * @param transX is the translation x value. @@ -632,21 +702,31 @@ public class Plane extends Vector { * @param cosHA is the cosine of the height angle. * @return the original point. */ - protected static GeoPoint reverseModify(final PlanetModel planetModel, - final Vector point, final double transX, final double transY, final double transZ, - final double sinRA, final double cosRA, final double sinHA, final double cosHA) { - final Vector result = point.rotateXZ(-sinHA, cosHA).rotateXY(-sinRA, cosRA).translate(-transX, -transY, -transZ); + protected static GeoPoint reverseModify( + final PlanetModel planetModel, + final Vector point, + final double transX, + final double transY, + final double transZ, + final double sinRA, + final double cosRA, + final double sinHA, + final double cosHA) { + final Vector result = + point.rotateXZ(-sinHA, cosHA).rotateXY(-sinRA, cosRA).translate(-transX, -transY, -transZ); return planetModel.createSurfacePoint(result.x, result.y, result.z); } /** * Find the intersection points between two planes, given a set of bounds. + * * @param planetModel is the planet model. * @param q is the plane to intersect with. * @param bounds are the bounds to consider to determine legal intersection points. * @return the set of legal intersection points, or null if the planes are numerically identical. */ - public GeoPoint[] findIntersections(final PlanetModel planetModel, final Plane q, final Membership... bounds) { + public GeoPoint[] findIntersections( + final PlanetModel planetModel, final Plane q, final Membership... bounds) { if (isNumericallyIdentical(q)) { return null; } @@ -663,7 +743,8 @@ public class Plane extends Vector { * @param bounds are the bounds to consider to determine legal intersection points. * @return the set of legal crossing points, or null if the planes are numerically identical. */ - public GeoPoint[] findCrossings(final PlanetModel planetModel, final Plane q, final Membership... bounds) { + public GeoPoint[] findCrossings( + final PlanetModel planetModel, final Plane q, final Membership... bounds) { if (isNumericallyIdentical(q)) { return null; } @@ -671,8 +752,8 @@ public class Plane extends Vector { } /** - * Checks if three points are coplanar in any of the three planes they can describe. - * The planes are all assumed to go through the origin. + * Checks if three points are coplanar in any of the three planes they can describe. The planes + * are all assumed to go through the origin. * * @param A The first point. * @param B The second point. @@ -680,36 +761,44 @@ public class Plane extends Vector { * @return true if provided points are coplanar in any of the three planes they can describe. */ public static boolean arePointsCoplanar(final GeoPoint A, final GeoPoint B, final GeoPoint C) { - return Vector.crossProductEvaluateIsZero(A, B, C) || - Vector.crossProductEvaluateIsZero(A, C, B) || - Vector.crossProductEvaluateIsZero(B, C, A); + return Vector.crossProductEvaluateIsZero(A, B, C) + || Vector.crossProductEvaluateIsZero(A, C, B) + || Vector.crossProductEvaluateIsZero(B, C, A); } - + /** * Find the intersection points between two planes, given a set of bounds. * * @param planetModel is the planet model to use in finding points. - * @param q is the plane to intersect with. - * @param bounds is the set of bounds. + * @param q is the plane to intersect with. + * @param bounds is the set of bounds. * @param moreBounds is another set of bounds. * @return the intersection point(s) on the unit sphere, if there are any. */ - protected GeoPoint[] findIntersections(final PlanetModel planetModel, final Plane q, final Membership[] bounds, final Membership[] moreBounds) { - //System.err.println("Looking for intersection between plane "+this+" and plane "+q+" within bounds"); + protected GeoPoint[] findIntersections( + final PlanetModel planetModel, + final Plane q, + final Membership[] bounds, + final Membership[] moreBounds) { + // System.err.println("Looking for intersection between plane "+this+" and plane "+q+" within + // bounds"); // Unnormalized, unchecked... final double lineVectorX = y * q.z - z * q.y; final double lineVectorY = z * q.x - x * q.z; final double lineVectorZ = x * q.y - y * q.x; - if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION && Math.abs(lineVectorY) < MINIMUM_RESOLUTION && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { + if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION + && Math.abs(lineVectorY) < MINIMUM_RESOLUTION + && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { // Degenerate case: parallel planes - //System.err.println(" planes are parallel - no intersection"); + // System.err.println(" planes are parallel - no intersection"); return NO_POINTS; } // The line will have the equation: A t + A0 = x, B t + B0 = y, C t + C0 = z. - // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that is on both planes. - // To do this, we find the largest vector value (either x, y, or z), and look for a point that solves both plane equations - // simultaneous. For example, let's say that the vector is (0.5,0.5,1), and the two plane equations are: + // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that + // is on both planes. To do this, we find the largest vector value (either x, y, or z), and + // look for a point that solves both plane equations simultaneous. For example, let's say + // that the vector is (0.5,0.5,1), and the two plane equations are: // 0.7 x + 0.3 y + 0.1 z + 0.0 = 0 // and // 0.9 x - 0.1 y + 0.2 z + 4.0 = 0 @@ -735,7 +824,7 @@ public class Plane extends Vector { if (Math.abs(denomYZ) >= Math.abs(denomXZ) && Math.abs(denomYZ) >= Math.abs(denomXY)) { // X is the biggest, so our point will have x0 = 0.0 if (Math.abs(denomYZ) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return NO_POINTS; } final double denom = 1.0 / denomYZ; @@ -745,7 +834,7 @@ public class Plane extends Vector { } else if (Math.abs(denomXZ) >= Math.abs(denomXY) && Math.abs(denomXZ) >= Math.abs(denomYZ)) { // Y is the biggest, so y0 = 0.0 if (Math.abs(denomXZ) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return NO_POINTS; } final double denom = 1.0 / denomXZ; @@ -755,7 +844,7 @@ public class Plane extends Vector { } else { // Z is the biggest, so Z0 = 0.0 if (Math.abs(denomXY) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return NO_POINTS; } final double denom = 1.0 / denomXY; @@ -764,22 +853,33 @@ public class Plane extends Vector { z0 = 0.0; } - // Once an intersecting line is determined, the next step is to intersect that line with the ellipsoid, which - // will yield zero, one, or two points. + // Once an intersecting line is determined, the next step is to intersect that line with the + // ellipsoid, which will yield zero, one, or two points. // The ellipsoid equation: 1,0 = x^2/a^2 + y^2/b^2 + z^2/zScaling^2 // 1.0 = (At+A0)^2/a^2 + (Bt+B0)^2/b^2 + (Ct+C0)^2/zScaling^2 - // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 - // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 + // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 + // / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 + // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / + // zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 // Use the quadratic formula to determine t values and candidate point(s) - final double A = lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + - lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + - lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; - final double B = 2.0 * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + lineVectorY * y0 * planetModel.inverseXYScalingSquared + lineVectorZ * z0 * planetModel.inverseZScalingSquared); - final double C = x0 * x0 * planetModel.inverseXYScalingSquared + y0 * y0 * planetModel.inverseXYScalingSquared + z0 * z0 * planetModel.inverseZScalingSquared - 1.0; + final double A = + lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + + lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + + lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; + final double B = + 2.0 + * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + + lineVectorY * y0 * planetModel.inverseXYScalingSquared + + lineVectorZ * z0 * planetModel.inverseZScalingSquared); + final double C = + x0 * x0 * planetModel.inverseXYScalingSquared + + y0 * y0 * planetModel.inverseXYScalingSquared + + z0 * z0 * planetModel.inverseZScalingSquared + - 1.0; final double BsquaredMinus = B * B - 4.0 * A * C; if (Math.abs(BsquaredMinus) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" One point of intersection"); + // System.err.println(" One point of intersection"); final double inverse2A = 1.0 / (2.0 * A); // One solution only final double t = -B * inverse2A; @@ -797,9 +897,9 @@ public class Plane extends Vector { return NO_POINTS; } } - return new GeoPoint[]{new GeoPoint(pointX, pointY, pointZ)}; + return new GeoPoint[] {new GeoPoint(pointX, pointY, pointZ)}; } else if (BsquaredMinus > 0.0) { - //System.err.println(" Two points of intersection"); + // System.err.println(" Two points of intersection"); final double inverse2A = 1.0 / (2.0 * A); // Two solutions final double sqrtTerm = Math.sqrt(BsquaredMinus); @@ -844,17 +944,19 @@ public class Plane extends Vector { } if (point1Valid && point2Valid) { - return new GeoPoint[]{new GeoPoint(point1X, point1Y, point1Z), new GeoPoint(point2X, point2Y, point2Z)}; + return new GeoPoint[] { + new GeoPoint(point1X, point1Y, point1Z), new GeoPoint(point2X, point2Y, point2Z) + }; } if (point1Valid) { - return new GeoPoint[]{new GeoPoint(point1X, point1Y, point1Z)}; + return new GeoPoint[] {new GeoPoint(point1X, point1Y, point1Z)}; } if (point2Valid) { - return new GeoPoint[]{new GeoPoint(point2X, point2Y, point2Z)}; + return new GeoPoint[] {new GeoPoint(point2X, point2Y, point2Z)}; } return NO_POINTS; } else { - //System.err.println(" no solutions - no intersection"); + // System.err.println(" no solutions - no intersection"); return NO_POINTS; } } @@ -865,27 +967,35 @@ public class Plane extends Vector { * but must cross at two. * * @param planetModel is the planet model to use in finding points. - * @param q is the plane to intersect with. - * @param bounds is the set of bounds. + * @param q is the plane to intersect with. + * @param bounds is the set of bounds. * @param moreBounds is another set of bounds. * @return the intersection point(s) on the ellipsoid, if there are any. */ - protected GeoPoint[] findCrossings(final PlanetModel planetModel, final Plane q, final Membership[] bounds, final Membership[] moreBounds) { - // This code in this method is very similar to findIntersections(), but eliminates the cases where + protected GeoPoint[] findCrossings( + final PlanetModel planetModel, + final Plane q, + final Membership[] bounds, + final Membership[] moreBounds) { + // This code in this method is very similar to findIntersections(), but eliminates the cases + // where // crossings are detected. // Unnormalized, unchecked... final double lineVectorX = y * q.z - z * q.y; final double lineVectorY = z * q.x - x * q.z; final double lineVectorZ = x * q.y - y * q.x; - if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION && Math.abs(lineVectorY) < MINIMUM_RESOLUTION && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { + if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION + && Math.abs(lineVectorY) < MINIMUM_RESOLUTION + && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { // Degenerate case: parallel planes return NO_POINTS; } // The line will have the equation: A t + A0 = x, B t + B0 = y, C t + C0 = z. - // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that is on both planes. - // To do this, we find the largest vector value (either x, y, or z), and look for a point that solves both plane equations - // simultaneous. For example, let's say that the vector is (0.5,0.5,1), and the two plane equations are: + // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that + // is on both planes. To do this, we find the largest vector value (either x, y, or z), and + // look for a point that solves both plane equations simultaneous. For example, let's say + // that the vector is (0.5,0.5,1), and the two plane equations are: // 0.7 x + 0.3 y + 0.1 z + 0.0 = 0 // and // 0.9 x - 0.1 y + 0.2 z + 4.0 = 0 @@ -937,18 +1047,29 @@ public class Plane extends Vector { z0 = 0.0; } - // Once an intersecting line is determined, the next step is to intersect that line with the ellipsoid, which - // will yield zero, one, or two points. + // Once an intersecting line is determined, the next step is to intersect that line with the + // ellipsoid, which will yield zero, one, or two points. // The ellipsoid equation: 1,0 = x^2/a^2 + y^2/b^2 + z^2/zScaling^2 // 1.0 = (At+A0)^2/a^2 + (Bt+B0)^2/b^2 + (Ct+C0)^2/zScaling^2 - // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 - // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 + // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 + // / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 + // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / + // zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 // Use the quadratic formula to determine t values and candidate point(s) - final double A = lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + - lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + - lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; - final double B = 2.0 * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + lineVectorY * y0 * planetModel.inverseXYScalingSquared + lineVectorZ * z0 * planetModel.inverseZScalingSquared); - final double C = x0 * x0 * planetModel.inverseXYScalingSquared + y0 * y0 * planetModel.inverseXYScalingSquared + z0 * z0 * planetModel.inverseZScalingSquared - 1.0; + final double A = + lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + + lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + + lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; + final double B = + 2.0 + * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + + lineVectorY * y0 * planetModel.inverseXYScalingSquared + + lineVectorZ * z0 * planetModel.inverseZScalingSquared); + final double C = + x0 * x0 * planetModel.inverseXYScalingSquared + + y0 * y0 * planetModel.inverseXYScalingSquared + + z0 * z0 * planetModel.inverseZScalingSquared + - 1.0; final double BsquaredMinus = B * B - 4.0 * A * C; if (Math.abs(BsquaredMinus) < MINIMUM_RESOLUTION_SQUARED) { @@ -999,13 +1120,15 @@ public class Plane extends Vector { } if (point1Valid && point2Valid) { - return new GeoPoint[]{new GeoPoint(point1X, point1Y, point1Z), new GeoPoint(point2X, point2Y, point2Z)}; + return new GeoPoint[] { + new GeoPoint(point1X, point1Y, point1Z), new GeoPoint(point2X, point2Y, point2Z) + }; } if (point1Valid) { - return new GeoPoint[]{new GeoPoint(point1X, point1Y, point1Z)}; + return new GeoPoint[] {new GeoPoint(point1X, point1Y, point1Z)}; } if (point2Valid) { - return new GeoPoint[]{new GeoPoint(point2X, point2Y, point2Z)}; + return new GeoPoint[] {new GeoPoint(point2X, point2Y, point2Z)}; } return NO_POINTS; } else { @@ -1015,27 +1138,34 @@ public class Plane extends Vector { } /** - * Record intersection points for planes with error bounds. - * This method calls the Bounds object with every intersection point it can find that matches the criteria. - * Each plane is considered to have two sides, one that is D + MINIMUM_RESOLUTION, and one that is - * D - MINIMUM_RESOLUTION. Both are examined and intersection points determined. + * Record intersection points for planes with error bounds. This method calls the Bounds object + * with every intersection point it can find that matches the criteria. Each plane is considered + * to have two sides, one that is D + MINIMUM_RESOLUTION, and one that is D - MINIMUM_RESOLUTION. + * Both are examined and intersection points determined. */ - protected void findIntersectionBounds(final PlanetModel planetModel, final Bounds boundsInfo, final Plane q, final Membership... bounds) { - //System.out.println("Finding intersection bounds"); + protected void findIntersectionBounds( + final PlanetModel planetModel, + final Bounds boundsInfo, + final Plane q, + final Membership... bounds) { + // System.out.println("Finding intersection bounds"); // Unnormalized, unchecked... final double lineVectorX = y * q.z - z * q.y; final double lineVectorY = z * q.x - x * q.z; final double lineVectorZ = x * q.y - y * q.x; - if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION && Math.abs(lineVectorY) < MINIMUM_RESOLUTION && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { + if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION + && Math.abs(lineVectorY) < MINIMUM_RESOLUTION + && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { // Degenerate case: parallel planes - //System.out.println(" planes are parallel - no intersection"); + // System.out.println(" planes are parallel - no intersection"); return; } // The line will have the equation: A t + A0 = x, B t + B0 = y, C t + C0 = z. - // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that is on both planes. - // To do this, we find the largest vector value (either x, y, or z), and look for a point that solves both plane equations - // simultaneous. For example, let's say that the vector is (0.5,0.5,1), and the two plane equations are: + // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that + // is on both planes. To do this, we find the largest vector value (either x, y, or z), and + // look for a point that solves both plane equations simultaneous. For example, let's say + // that the vector is (0.5,0.5,1), and the two plane equations are: // 0.7 x + 0.3 y + 0.1 z + 0.0 = 0 // and // 0.9 x - 0.1 y + 0.2 z + 4.0 = 0 @@ -1056,102 +1186,190 @@ public class Plane extends Vector { final double denomXZ = this.x * q.z - this.z * q.x; final double denomXY = this.x * q.y - this.y * q.x; if (Math.abs(denomYZ) >= Math.abs(denomXZ) && Math.abs(denomYZ) >= Math.abs(denomXY)) { - //System.out.println("X biggest"); + // System.out.println("X biggest"); // X is the biggest, so our point will have x0 = 0.0 if (Math.abs(denomYZ) < MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Denominator is zero: no intersection"); + // System.out.println(" Denominator is zero: no intersection"); return; } final double denom = 1.0 / denomYZ; // Each value of D really is two values of D. That makes 4 combinations. - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - 0.0, (-(this.D+MINIMUM_RESOLUTION) * q.z - this.z * -(q.D+MINIMUM_RESOLUTION)) * denom, (this.y * -(q.D+MINIMUM_RESOLUTION) + (this.D+MINIMUM_RESOLUTION) * q.y) * denom, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - 0.0, (-(this.D-MINIMUM_RESOLUTION) * q.z - this.z * -(q.D+MINIMUM_RESOLUTION)) * denom, (this.y * -(q.D+MINIMUM_RESOLUTION) + (this.D-MINIMUM_RESOLUTION) * q.y) * denom, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - 0.0, (-(this.D+MINIMUM_RESOLUTION) * q.z - this.z * -(q.D-MINIMUM_RESOLUTION)) * denom, (this.y * -(q.D-MINIMUM_RESOLUTION) + (this.D+MINIMUM_RESOLUTION) * q.y) * denom, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - 0.0, (-(this.D-MINIMUM_RESOLUTION) * q.z - this.z * -(q.D-MINIMUM_RESOLUTION)) * denom, (this.y * -(q.D-MINIMUM_RESOLUTION) + (this.D-MINIMUM_RESOLUTION) * q.y) * denom, - bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + 0.0, + (-(this.D + MINIMUM_RESOLUTION) * q.z - this.z * -(q.D + MINIMUM_RESOLUTION)) * denom, + (this.y * -(q.D + MINIMUM_RESOLUTION) + (this.D + MINIMUM_RESOLUTION) * q.y) * denom, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + 0.0, + (-(this.D - MINIMUM_RESOLUTION) * q.z - this.z * -(q.D + MINIMUM_RESOLUTION)) * denom, + (this.y * -(q.D + MINIMUM_RESOLUTION) + (this.D - MINIMUM_RESOLUTION) * q.y) * denom, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + 0.0, + (-(this.D + MINIMUM_RESOLUTION) * q.z - this.z * -(q.D - MINIMUM_RESOLUTION)) * denom, + (this.y * -(q.D - MINIMUM_RESOLUTION) + (this.D + MINIMUM_RESOLUTION) * q.y) * denom, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + 0.0, + (-(this.D - MINIMUM_RESOLUTION) * q.z - this.z * -(q.D - MINIMUM_RESOLUTION)) * denom, + (this.y * -(q.D - MINIMUM_RESOLUTION) + (this.D - MINIMUM_RESOLUTION) * q.y) * denom, + bounds); } else if (Math.abs(denomXZ) >= Math.abs(denomXY) && Math.abs(denomXZ) >= Math.abs(denomYZ)) { - //System.out.println("Y biggest"); + // System.out.println("Y biggest"); // Y is the biggest, so y0 = 0.0 if (Math.abs(denomXZ) < MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Denominator is zero: no intersection"); + // System.out.println(" Denominator is zero: no intersection"); return; } final double denom = 1.0 / denomXZ; - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - (-(this.D+MINIMUM_RESOLUTION) * q.z - this.z * -(q.D+MINIMUM_RESOLUTION)) * denom, 0.0, (this.x * -(q.D+MINIMUM_RESOLUTION) + (this.D+MINIMUM_RESOLUTION) * q.x) * denom, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - (-(this.D-MINIMUM_RESOLUTION) * q.z - this.z * -(q.D+MINIMUM_RESOLUTION)) * denom, 0.0, (this.x * -(q.D+MINIMUM_RESOLUTION) + (this.D-MINIMUM_RESOLUTION) * q.x) * denom, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - (-(this.D+MINIMUM_RESOLUTION) * q.z - this.z * -(q.D-MINIMUM_RESOLUTION)) * denom, 0.0, (this.x * -(q.D-MINIMUM_RESOLUTION) + (this.D+MINIMUM_RESOLUTION) * q.x) * denom, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - (-(this.D-MINIMUM_RESOLUTION) * q.z - this.z * -(q.D-MINIMUM_RESOLUTION)) * denom, 0.0, (this.x * -(q.D-MINIMUM_RESOLUTION) + (this.D-MINIMUM_RESOLUTION) * q.x) * denom, - bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + (-(this.D + MINIMUM_RESOLUTION) * q.z - this.z * -(q.D + MINIMUM_RESOLUTION)) * denom, + 0.0, + (this.x * -(q.D + MINIMUM_RESOLUTION) + (this.D + MINIMUM_RESOLUTION) * q.x) * denom, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + (-(this.D - MINIMUM_RESOLUTION) * q.z - this.z * -(q.D + MINIMUM_RESOLUTION)) * denom, + 0.0, + (this.x * -(q.D + MINIMUM_RESOLUTION) + (this.D - MINIMUM_RESOLUTION) * q.x) * denom, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + (-(this.D + MINIMUM_RESOLUTION) * q.z - this.z * -(q.D - MINIMUM_RESOLUTION)) * denom, + 0.0, + (this.x * -(q.D - MINIMUM_RESOLUTION) + (this.D + MINIMUM_RESOLUTION) * q.x) * denom, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + (-(this.D - MINIMUM_RESOLUTION) * q.z - this.z * -(q.D - MINIMUM_RESOLUTION)) * denom, + 0.0, + (this.x * -(q.D - MINIMUM_RESOLUTION) + (this.D - MINIMUM_RESOLUTION) * q.x) * denom, + bounds); } else { - //System.out.println("Z biggest"); + // System.out.println("Z biggest"); // Z is the biggest, so Z0 = 0.0 if (Math.abs(denomXY) < MINIMUM_RESOLUTION_SQUARED) { - //System.out.println(" Denominator is zero: no intersection"); + // System.out.println(" Denominator is zero: no intersection"); return; } final double denom = 1.0 / denomXY; - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - (-(this.D+MINIMUM_RESOLUTION) * q.y - this.y * -(q.D+MINIMUM_RESOLUTION)) * denom, (this.x * -(q.D+MINIMUM_RESOLUTION) + (this.D+MINIMUM_RESOLUTION) * q.x) * denom, 0.0, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - (-(this.D-MINIMUM_RESOLUTION) * q.y - this.y * -(q.D+MINIMUM_RESOLUTION)) * denom, (this.x * -(q.D+MINIMUM_RESOLUTION) + (this.D-MINIMUM_RESOLUTION) * q.x) * denom, 0.0, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - (-(this.D+MINIMUM_RESOLUTION) * q.y - this.y * -(q.D-MINIMUM_RESOLUTION)) * denom, (this.x * -(q.D-MINIMUM_RESOLUTION) + (this.D+MINIMUM_RESOLUTION) * q.x) * denom, 0.0, - bounds); - recordLineBounds(planetModel, boundsInfo, - lineVectorX, lineVectorY, lineVectorZ, - (-(this.D-MINIMUM_RESOLUTION) * q.y - this.y * -(q.D-MINIMUM_RESOLUTION)) * denom, (this.x * -(q.D-MINIMUM_RESOLUTION) + (this.D-MINIMUM_RESOLUTION) * q.x) * denom, 0.0, - bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + (-(this.D + MINIMUM_RESOLUTION) * q.y - this.y * -(q.D + MINIMUM_RESOLUTION)) * denom, + (this.x * -(q.D + MINIMUM_RESOLUTION) + (this.D + MINIMUM_RESOLUTION) * q.x) * denom, + 0.0, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + (-(this.D - MINIMUM_RESOLUTION) * q.y - this.y * -(q.D + MINIMUM_RESOLUTION)) * denom, + (this.x * -(q.D + MINIMUM_RESOLUTION) + (this.D - MINIMUM_RESOLUTION) * q.x) * denom, + 0.0, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + (-(this.D + MINIMUM_RESOLUTION) * q.y - this.y * -(q.D - MINIMUM_RESOLUTION)) * denom, + (this.x * -(q.D - MINIMUM_RESOLUTION) + (this.D + MINIMUM_RESOLUTION) * q.x) * denom, + 0.0, + bounds); + recordLineBounds( + planetModel, + boundsInfo, + lineVectorX, + lineVectorY, + lineVectorZ, + (-(this.D - MINIMUM_RESOLUTION) * q.y - this.y * -(q.D - MINIMUM_RESOLUTION)) * denom, + (this.x * -(q.D - MINIMUM_RESOLUTION) + (this.D - MINIMUM_RESOLUTION) * q.x) * denom, + 0.0, + bounds); } } - - private static void recordLineBounds(final PlanetModel planetModel, - final Bounds boundsInfo, - final double lineVectorX, final double lineVectorY, final double lineVectorZ, - final double x0, final double y0, final double z0, - final Membership... bounds) { - // Once an intersecting line is determined, the next step is to intersect that line with the ellipsoid, which - // will yield zero, one, or two points. + + private static void recordLineBounds( + final PlanetModel planetModel, + final Bounds boundsInfo, + final double lineVectorX, + final double lineVectorY, + final double lineVectorZ, + final double x0, + final double y0, + final double z0, + final Membership... bounds) { + // Once an intersecting line is determined, the next step is to intersect that line with the + // ellipsoid, which will yield zero, one, or two points. // The ellipsoid equation: 1,0 = x^2/a^2 + y^2/b^2 + z^2/zScaling^2 // 1.0 = (At+A0)^2/a^2 + (Bt+B0)^2/b^2 + (Ct+C0)^2/zScaling^2 - // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 - // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 + // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 + // / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 + // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / + // zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 // Use the quadratic formula to determine t values and candidate point(s) - final double A = lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + - lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + - lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; - final double B = 2.0 * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + lineVectorY * y0 * planetModel.inverseXYScalingSquared + lineVectorZ * z0 * planetModel.inverseZScalingSquared); - final double C = x0 * x0 * planetModel.inverseXYScalingSquared + y0 * y0 * planetModel.inverseXYScalingSquared + z0 * z0 * planetModel.inverseZScalingSquared - 1.0; + final double A = + lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + + lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + + lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; + final double B = + 2.0 + * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + + lineVectorY * y0 * planetModel.inverseXYScalingSquared + + lineVectorZ * z0 * planetModel.inverseZScalingSquared); + final double C = + x0 * x0 * planetModel.inverseXYScalingSquared + + y0 * y0 * planetModel.inverseXYScalingSquared + + z0 * z0 * planetModel.inverseZScalingSquared + - 1.0; final double BsquaredMinus = B * B - 4.0 * A * C; if (Math.abs(BsquaredMinus) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" One point of intersection"); + // System.err.println(" One point of intersection"); final double inverse2A = 1.0 / (2.0 * A); // One solution only final double t = -B * inverse2A; @@ -1166,7 +1384,7 @@ public class Plane extends Vector { } boundsInfo.addPoint(new GeoPoint(pointX, pointY, pointZ)); } else if (BsquaredMinus > 0.0) { - //System.err.println(" Two points of intersection"); + // System.err.println(" Two points of intersection"); final double inverse2A = 1.0 / (2.0 * A); // Two solutions final double sqrtTerm = Math.sqrt(BsquaredMinus); @@ -1219,63 +1437,70 @@ public class Plane extends Vector { */ /** - * Accumulate (x,y,z) bounds information for this plane, intersected with another and the - * world. - * Updates min/max information using intersection points found. These include the error - * envelope for the planes (D +/- MINIMUM_RESOLUTION). - * @param planetModel is the planet model to use in determining bounds. - * @param boundsInfo is the xyz info to update with additional bounding information. - * @param p is the other plane. - * @param bounds are the surfaces delineating what's inside the shape. - */ - public void recordBounds(final PlanetModel planetModel, final XYZBounds boundsInfo, final Plane p, final Membership... bounds) { - findIntersectionBounds(planetModel, boundsInfo, p, bounds); - } - - - /** - * Accumulate (x,y,z) bounds information for this plane, intersected with the unit sphere. - * Updates min/max information, using max/min points found - * within the specified bounds. + * Accumulate (x,y,z) bounds information for this plane, intersected with another and the world. + * Updates min/max information using intersection points found. These include the error envelope + * for the planes (D +/- MINIMUM_RESOLUTION). * * @param planetModel is the planet model to use in determining bounds. * @param boundsInfo is the xyz info to update with additional bounding information. - * @param bounds are the surfaces delineating what's inside the shape. + * @param p is the other plane. + * @param bounds are the surfaces delineating what's inside the shape. */ - public void recordBounds(final PlanetModel planetModel, final XYZBounds boundsInfo, final Membership... bounds) { + public void recordBounds( + final PlanetModel planetModel, + final XYZBounds boundsInfo, + final Plane p, + final Membership... bounds) { + findIntersectionBounds(planetModel, boundsInfo, p, bounds); + } + + /** + * Accumulate (x,y,z) bounds information for this plane, intersected with the unit sphere. Updates + * min/max information, using max/min points found within the specified bounds. + * + * @param planetModel is the planet model to use in determining bounds. + * @param boundsInfo is the xyz info to update with additional bounding information. + * @param bounds are the surfaces delineating what's inside the shape. + */ + public void recordBounds( + final PlanetModel planetModel, final XYZBounds boundsInfo, final Membership... bounds) { // Basic plan is to do three intersections of the plane and the planet. // For min/max x, we intersect a vertical plane such that y = 0. // For min/max y, we intersect a vertical plane such that x = 0. - // For min/max z, we intersect a vertical plane that is chosen to go through the high point of the arc. - // For clarity, load local variables with good names + // For min/max z, we intersect a vertical plane that is chosen to go through the high point of + // the arc. For clarity, load local variables with good names final double A = this.x; final double B = this.y; final double C = this.z; // Do Z. This can be done simply because it is symmetrical. if (!boundsInfo.isSmallestMinZ(planetModel) || !boundsInfo.isLargestMaxZ(planetModel)) { - //System.err.println(" computing Z bound"); + // System.err.println(" computing Z bound"); // Compute Z bounds for this arc // With ellipsoids, we really have only one viable way to do this computation. - // Specifically, we compute an appropriate vertical plane, based on the current plane's x-y orientation, and - // then intersect it with this one and with the ellipsoid. This gives us zero, one, or two points to use - // as bounds. - // There is one special case: horizontal circles. These require TWO vertical planes: one for the x, and one for - // the y, and we use all four resulting points in the bounds computation. + // Specifically, we compute an appropriate vertical plane, based on the current plane's x-y + // orientation, and then intersect it with this one and with the ellipsoid. This gives us + // zero, one, or two points to use as bounds. + // There is one special case: horizontal circles. These require TWO vertical planes: one for + // the x, and one for the y, and we use all four resulting points in the bounds computation. if ((Math.abs(A) >= MINIMUM_RESOLUTION || Math.abs(B) >= MINIMUM_RESOLUTION)) { // NOT a degenerate case - //System.err.println(" not degenerate"); - final Plane normalizedZPlane = constructNormalizedZPlane(A,B); - final GeoPoint[] points = findIntersections(planetModel, normalizedZPlane, bounds, NO_BOUNDS); + // System.err.println(" not degenerate"); + final Plane normalizedZPlane = constructNormalizedZPlane(A, B); + final GeoPoint[] points = + findIntersections(planetModel, normalizedZPlane, bounds, NO_BOUNDS); for (final GeoPoint point : points) { assert planetModel.pointOnSurface(point); - //System.err.println(" Point = "+point+"; this.evaluate(point)="+this.evaluate(point)+"; normalizedZPlane.evaluate(point)="+normalizedZPlane.evaluate(point)); + // System.err.println(" Point = "+point+"; + // this.evaluate(point)="+this.evaluate(point)+"; + // normalizedZPlane.evaluate(point)="+normalizedZPlane.evaluate(point)); addPoint(boundsInfo, bounds, point); } } else { // Since a==b==0, any plane including the Z axis suffices. - //System.err.println(" Perpendicular to z"); - final GeoPoint[] points = findIntersections(planetModel, normalYPlane, NO_BOUNDS, NO_BOUNDS); + // System.err.println(" Perpendicular to z"); + final GeoPoint[] points = + findIntersections(planetModel, normalYPlane, NO_BOUNDS, NO_BOUNDS); if (points.length > 0) { boundsInfo.addZValue(points[0]); } @@ -1283,71 +1508,83 @@ public class Plane extends Vector { } // First, compute common subexpressions - final double k = 1.0 / ((x*x + y*y)*planetModel.xyScaling *planetModel.xyScaling + z*z*planetModel.zScaling *planetModel.zScaling); + final double k = + 1.0 + / ((x * x + y * y) * planetModel.xyScaling * planetModel.xyScaling + + z * z * planetModel.zScaling * planetModel.zScaling); final double abSquared = planetModel.xyScaling * planetModel.xyScaling; final double cSquared = planetModel.zScaling * planetModel.zScaling; final double ASquared = A * A; final double BSquared = B * B; final double CSquared = C * C; - - final double r = 2.0*D*k; + + final double r = 2.0 * D * k; final double rSquared = r * r; - + if (!boundsInfo.isSmallestMinX(planetModel) || !boundsInfo.isLargestMaxX(planetModel)) { // For min/max x, we need to use lagrange multipliers. // // For this, we need grad(F(x,y,z)) = (dF/dx, dF/dy, dF/dz). // - // Minimize and maximize f(x,y,z) = x, with respect to g(x,y,z) = Ax + By + Cz - D and h(x,y,z) = x^2/xyScaling^2 + y^2/xyScaling^2 + z^2/zScaling^2 - 1 + // Minimize and maximize f(x,y,z) = x, with respect to g(x,y,z) = Ax + By + Cz - D and + // h(x,y,z) = x^2/xyScaling^2 + y^2/xyScaling^2 + z^2/zScaling^2 - 1 // // grad(f(x,y,z)) = (1,0,0) // grad(g(x,y,z)) = (A,B,C) // grad(h(x,y,z)) = (2x/xyScaling^2,2y/xyScaling^2,2z/zScaling^2) // // Equations we need to simultaneously solve: - // + // // grad(f(x,y,z)) = l * grad(g(x,y,z)) + m * grad(h(x,y,z)) // g(x,y,z) = 0 // h(x,y,z) = 0 - // + // // Equations: // 1 = l*A + m*2x/xyScaling^2 // 0 = l*B + m*2y/xyScaling^2 // 0 = l*C + m*2z/zScaling^2 // Ax + By + Cz + D = 0 // x^2/xyScaling^2 + y^2/xyScaling^2 + z^2/zScaling^2 - 1 = 0 - // + // // Solve for x,y,z in terms of (l, m): - // + // // x = ((1 - l*A) * xyScaling^2 ) / (2 * m) // y = (-l*B * xyScaling^2) / ( 2 * m) // z = (-l*C * zScaling^2)/ (2 * m) - // + // // Two equations, two unknowns: - // - // A * (((1 - l*A) * xyScaling^2 ) / (2 * m)) + B * ((-l*B * xyScaling^2) / ( 2 * m)) + C * ((-l*C * zScaling^2)/ (2 * m)) + D = 0 - // + // + // A * (((1 - l*A) * xyScaling^2 ) / (2 * m)) + B * ((-l*B * xyScaling^2) / ( 2 * m)) + C * + // ((-l*C * zScaling^2)/ (2 * m)) + D = 0 + // // and - // - // (((1 - l*A) * xyScaling^2 ) / (2 * m))^2/xyScaling^2 + ((-l*B * xyScaling^2) / ( 2 * m))^2/xyScaling^2 + ((-l*C * zScaling^2)/ (2 * m))^2/zScaling^2 - 1 = 0 - // + // + // (((1 - l*A) * xyScaling^2 ) / (2 * m))^2/xyScaling^2 + ((-l*B * xyScaling^2) / ( 2 * + // m))^2/xyScaling^2 + ((-l*C * zScaling^2)/ (2 * m))^2/zScaling^2 - 1 = 0 + // // Simple: solve for l and m, then find x from it. - // + // // (a) Use first equation to find l in terms of m. - // - // A * (((1 - l*A) * xyScaling^2 ) / (2 * m)) + B * ((-l*B * xyScaling^2) / ( 2 * m)) + C * ((-l*C * zScaling^2)/ (2 * m)) + D = 0 - // A * ((1 - l*A) * xyScaling^2 ) + B * (-l*B * xyScaling^2) + C * (-l*C * zScaling^2) + D * 2 * m = 0 - // A * xyScaling^2 - l*A^2* xyScaling^2 - B^2 * l * xyScaling^2 - C^2 * l * zScaling^2 + D * 2 * m = 0 - // - l *(A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + (A * xyScaling^2 + D * 2 * m) = 0 - // l = (A * xyScaling^2 + D * 2 * m) / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) - // l = A * xyScaling^2 / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + m * 2 * D / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) - // + // + // A * (((1 - l*A) * xyScaling^2 ) / (2 * m)) + B * ((-l*B * xyScaling^2) / ( 2 * m)) + C * + // ((-l*C * zScaling^2)/ (2 * m)) + D = 0 + // A * ((1 - l*A) * xyScaling^2 ) + B * (-l*B * xyScaling^2) + C * (-l*C * zScaling^2) + D * 2 + // * m = 0 + // A * xyScaling^2 - l*A^2* xyScaling^2 - B^2 * l * xyScaling^2 - C^2 * l * zScaling^2 + D * 2 + // * m = 0 + // - l *(A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + (A * xyScaling^2 + D * 2 * + // m) = 0 + // l = (A * xyScaling^2 + D * 2 * m) / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * + // zScaling^2) + // l = A * xyScaling^2 / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + m * 2 * D + // / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + // // For convenience: - // + // // k = 1.0 / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) - // + // // Then: - // + // // l = A * xyScaling^2 * k + m * 2 * D * k // l = k * (A*xyScaling^2 + m*2*D) // @@ -1358,38 +1595,62 @@ public class Plane extends Vector { // // l = (r*m + q) // l^2 = (r^2 * m^2 + 2*r*m*q + q^2) - // + // // (b) Simplify the second equation before substitution - // - // (((1 - l*A) * xyScaling^2 ) / (2 * m))^2/xyScaling^2 + ((-l*B * xyScaling^2) / ( 2 * m))^2/xyScaling^2 + ((-l*C * zScaling^2)/ (2 * m))^2/zScaling^2 - 1 = 0 - // ((1 - l*A) * xyScaling^2 )^2/xyScaling^2 + (-l*B * xyScaling^2)^2/xyScaling^2 + (-l*C * zScaling^2)^2/zScaling^2 = 4 * m^2 + // + // (((1 - l*A) * xyScaling^2 ) / (2 * m))^2/xyScaling^2 + ((-l*B * xyScaling^2) / ( 2 * + // m))^2/xyScaling^2 + ((-l*C * zScaling^2)/ (2 * m))^2/zScaling^2 - 1 = 0 + // ((1 - l*A) * xyScaling^2 )^2/xyScaling^2 + (-l*B * xyScaling^2)^2/xyScaling^2 + (-l*C * + // zScaling^2)^2/zScaling^2 = 4 * m^2 // (1 - l*A)^2 * xyScaling^2 + (-l*B)^2 * xyScaling^2 + (-l*C)^2 * zScaling^2 = 4 * m^2 - // (1 - 2*l*A + l^2*A^2) * xyScaling^2 + l^2*B^2 * xyScaling^2 + l^2*C^2 * zScaling^2 = 4 * m^2 - // xyScaling^2 - 2*A*xyScaling^2*l + A^2*xyScaling^2*l^2 + B^2*xyScaling^2*l^2 + C^2*zScaling^2*l^2 - 4*m^2 = 0 - // + // (1 - 2*l*A + l^2*A^2) * xyScaling^2 + l^2*B^2 * xyScaling^2 + l^2*C^2 * zScaling^2 = 4 * + // m^2 + // xyScaling^2 - 2*A*xyScaling^2*l + A^2*xyScaling^2*l^2 + B^2*xyScaling^2*l^2 + + // C^2*zScaling^2*l^2 - 4*m^2 = 0 + // // (zScaling) Substitute for l, l^2 // - // xyScaling^2 - 2*A*xyScaling^2*(r*m + q) + A^2*xyScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) + B^2*xyScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) + C^2*zScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) - 4*m^2 = 0 - // xyScaling^2 - 2*A*xyScaling^2*r*m - 2*A*xyScaling^2*q + A^2*xyScaling^2*r^2*m^2 + 2*A^2*xyScaling^2*r*q*m + - // A^2*xyScaling^2*q^2 + B^2*xyScaling^2*r^2*m^2 + 2*B^2*xyScaling^2*r*q*m + B^2*xyScaling^2*q^2 + C^2*zScaling^2*r^2*m^2 + 2*C^2*zScaling^2*r*q*m + C^2*zScaling^2*q^2 - 4*m^2 = 0 + // xyScaling^2 - 2*A*xyScaling^2*(r*m + q) + A^2*xyScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) + + // B^2*xyScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) + C^2*zScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) - + // 4*m^2 = 0 + // xyScaling^2 - 2*A*xyScaling^2*r*m - 2*A*xyScaling^2*q + A^2*xyScaling^2*r^2*m^2 + + // 2*A^2*xyScaling^2*r*q*m + + // A^2*xyScaling^2*q^2 + B^2*xyScaling^2*r^2*m^2 + 2*B^2*xyScaling^2*r*q*m + + // B^2*xyScaling^2*q^2 + C^2*zScaling^2*r^2*m^2 + 2*C^2*zScaling^2*r*q*m + C^2*zScaling^2*q^2 + // - 4*m^2 = 0 // // (d) Group // // m^2 * [A^2*xyScaling^2*r^2 + B^2*xyScaling^2*r^2 + C^2*zScaling^2*r^2 - 4] + - // m * [- 2*A*xyScaling^2*r + 2*A^2*xyScaling^2*r*q + 2*B^2*xyScaling^2*r*q + 2*C^2*zScaling^2*r*q] + - // [xyScaling^2 - 2*A*xyScaling^2*q + A^2*xyScaling^2*q^2 + B^2*xyScaling^2*q^2 + C^2*zScaling^2*q^2] = 0 - + // m * [- 2*A*xyScaling^2*r + 2*A^2*xyScaling^2*r*q + 2*B^2*xyScaling^2*r*q + + // 2*C^2*zScaling^2*r*q] + + // [xyScaling^2 - 2*A*xyScaling^2*q + A^2*xyScaling^2*q^2 + B^2*xyScaling^2*q^2 + + // C^2*zScaling^2*q^2] = 0 + // Useful subexpressions for this bound - final double q = A*abSquared*k; + final double q = A * abSquared * k; final double qSquared = q * q; // Quadratic equation - final double a = ASquared*abSquared*rSquared + BSquared*abSquared*rSquared + CSquared*cSquared*rSquared - 4.0; - final double b = - 2.0*A*abSquared*r + 2.0*ASquared*abSquared*r*q + 2.0*BSquared*abSquared*r*q + 2.0*CSquared*cSquared*r*q; - final double c = abSquared - 2.0*A*abSquared*q + ASquared*abSquared*qSquared + BSquared*abSquared*qSquared + CSquared*cSquared*qSquared; - + final double a = + ASquared * abSquared * rSquared + + BSquared * abSquared * rSquared + + CSquared * cSquared * rSquared + - 4.0; + final double b = + -2.0 * A * abSquared * r + + 2.0 * ASquared * abSquared * r * q + + 2.0 * BSquared * abSquared * r * q + + 2.0 * CSquared * cSquared * r * q; + final double c = + abSquared + - 2.0 * A * abSquared * q + + ASquared * abSquared * qSquared + + BSquared * abSquared * qSquared + + CSquared * cSquared * qSquared; + if (Math.abs(a) >= MINIMUM_RESOLUTION_SQUARED) { - final double sqrtTerm = b*b - 4.0*a*c; + final double sqrtTerm = b * b - 4.0 * a * c; if (Math.abs(sqrtTerm) < MINIMUM_RESOLUTION_SQUARED) { // One solution final double m = -b / (2.0 * a); @@ -1400,20 +1661,28 @@ public class Plane extends Vector { // y = (-l*B * xyScaling^2) / ( 2 * m) // z = (-l*C * zScaling^2)/ (2 * m) final double denom0 = 0.5 / m; - final GeoPoint thePoint = new GeoPoint((1.0-l*A) * abSquared * denom0, -l*B * abSquared * denom0, -l*C * cSquared * denom0); - //Math is not quite accurate enough for this - //assert planetModel.pointOnSurface(thePoint): "Point: "+thePoint+"; Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ - // (thePoint.x*thePoint.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint.y*thePoint.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint.z*thePoint.z*planetModel.inverseZScaling*planetModel.inverseZScaling); - //assert evaluateIsZero(thePoint): "Evaluation of point: "+evaluate(thePoint); + final GeoPoint thePoint = + new GeoPoint( + (1.0 - l * A) * abSquared * denom0, + -l * B * abSquared * denom0, + -l * C * cSquared * denom0); + // Math is not quite accurate enough for this + // assert planetModel.pointOnSurface(thePoint): "Point: "+thePoint+"; + // Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ + // (thePoint.x*thePoint.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint.y*thePoint.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint.z*thePoint.z*planetModel.inverseZScaling*planetModel.inverseZScaling); + // assert evaluateIsZero(thePoint): "Evaluation of point: "+evaluate(thePoint); addPoint(boundsInfo, bounds, thePoint); } else { - // This is a plane of the form A=n B=0 C=0. We can set a bound only by noting the D value. - boundsInfo.addXValue(-D/A); + // This is a plane of the form A=n B=0 C=0. We can set a bound only by noting the D + // value. + boundsInfo.addXValue(-D / A); } } else if (sqrtTerm > 0.0) { // Two solutions final double sqrtResult = Math.sqrt(sqrtTerm); - final double commonDenom = 0.5/a; + final double commonDenom = 0.5 / a; final double m1 = (-b + sqrtResult) * commonDenom; assert Math.abs(a * m1 * m1 + b * m1 + c) < MINIMUM_RESOLUTION; final double m2 = (-b - sqrtResult) * commonDenom; @@ -1426,20 +1695,35 @@ public class Plane extends Vector { // z = (-l*C * zScaling^2)/ (2 * m) final double denom1 = 0.5 / m1; final double denom2 = 0.5 / m2; - final GeoPoint thePoint1 = new GeoPoint((1.0-l1*A) * abSquared * denom1, -l1*B * abSquared * denom1, -l1*C * cSquared * denom1); - final GeoPoint thePoint2 = new GeoPoint((1.0-l2*A) * abSquared * denom2, -l2*B * abSquared * denom2, -l2*C * cSquared * denom2); - //Math is not quite accurate enough for this - //assert planetModel.pointOnSurface(thePoint1): "Point1: "+thePoint1+"; Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ - // (thePoint1.x*thePoint1.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint1.y*thePoint1.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint1.z*thePoint1.z*planetModel.inverseZScaling*planetModel.inverseZScaling); - //assert planetModel.pointOnSurface(thePoint2): "Point1: "+thePoint2+"; Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ - // (thePoint2.x*thePoint2.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint2.y*thePoint2.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint2.z*thePoint2.z*planetModel.inverseZScaling*planetModel.inverseZScaling); - //assert evaluateIsZero(thePoint1): "Evaluation of point1: "+evaluate(thePoint1); - //assert evaluateIsZero(thePoint2): "Evaluation of point2: "+evaluate(thePoint2); + final GeoPoint thePoint1 = + new GeoPoint( + (1.0 - l1 * A) * abSquared * denom1, + -l1 * B * abSquared * denom1, + -l1 * C * cSquared * denom1); + final GeoPoint thePoint2 = + new GeoPoint( + (1.0 - l2 * A) * abSquared * denom2, + -l2 * B * abSquared * denom2, + -l2 * C * cSquared * denom2); + // Math is not quite accurate enough for this + // assert planetModel.pointOnSurface(thePoint1): "Point1: "+thePoint1+"; + // Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ + // (thePoint1.x*thePoint1.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint1.y*thePoint1.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint1.z*thePoint1.z*planetModel.inverseZScaling*planetModel.inverseZScaling); + // assert planetModel.pointOnSurface(thePoint2): "Point1: "+thePoint2+"; + // Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ + // (thePoint2.x*thePoint2.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint2.y*thePoint2.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint2.z*thePoint2.z*planetModel.inverseZScaling*planetModel.inverseZScaling); + // assert evaluateIsZero(thePoint1): "Evaluation of point1: "+evaluate(thePoint1); + // assert evaluateIsZero(thePoint2): "Evaluation of point2: "+evaluate(thePoint2); addPoint(boundsInfo, bounds, thePoint1); addPoint(boundsInfo, bounds, thePoint2); } else { - // This is a plane of the form A=n B=0 C=0. We can set a bound only by noting the D value. - boundsInfo.addXValue(-D/A); + // This is a plane of the form A=n B=0 C=0. We can set a bound only by noting the D + // value. + boundsInfo.addXValue(-D / A); } } else { // No solutions @@ -1452,73 +1736,88 @@ public class Plane extends Vector { // y = (-l*B * xyScaling^2) / ( 2 * m) // z = (-l*C * zScaling^2)/ (2 * m) final double denom0 = 0.5 / m; - final GeoPoint thePoint = new GeoPoint((1.0-l*A) * abSquared * denom0, -l*B * abSquared * denom0, -l*C * cSquared * denom0); - //Math is not quite accurate enough for this - //assert planetModel.pointOnSurface(thePoint): "Point: "+thePoint+"; Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ - // (thePoint.x*thePoint.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint.y*thePoint.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint.z*thePoint.z*planetModel.inverseZScaling*planetModel.inverseZScaling); - //assert evaluateIsZero(thePoint): "Evaluation of point: "+evaluate(thePoint); + final GeoPoint thePoint = + new GeoPoint( + (1.0 - l * A) * abSquared * denom0, + -l * B * abSquared * denom0, + -l * C * cSquared * denom0); + // Math is not quite accurate enough for this + // assert planetModel.pointOnSurface(thePoint): "Point: "+thePoint+"; + // Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ + // (thePoint.x*thePoint.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint.y*thePoint.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint.z*thePoint.z*planetModel.inverseZScaling*planetModel.inverseZScaling); + // assert evaluateIsZero(thePoint): "Evaluation of point: "+evaluate(thePoint); addPoint(boundsInfo, bounds, thePoint); } else { // Something went very wrong; a = b = 0 } } - + // Do Y if (!boundsInfo.isSmallestMinY(planetModel) || !boundsInfo.isLargestMaxY(planetModel)) { // For min/max x, we need to use lagrange multipliers. // // For this, we need grad(F(x,y,z)) = (dF/dx, dF/dy, dF/dz). // - // Minimize and maximize f(x,y,z) = y, with respect to g(x,y,z) = Ax + By + Cz - D and h(x,y,z) = x^2/xyScaling^2 + y^2/xyScaling^2 + z^2/zScaling^2 - 1 + // Minimize and maximize f(x,y,z) = y, with respect to g(x,y,z) = Ax + By + Cz - D and + // h(x,y,z) = x^2/xyScaling^2 + y^2/xyScaling^2 + z^2/zScaling^2 - 1 // // grad(f(x,y,z)) = (0,1,0) // grad(g(x,y,z)) = (A,B,C) // grad(h(x,y,z)) = (2x/xyScaling^2,2y/xyScaling^2,2z/zScaling^2) // // Equations we need to simultaneously solve: - // + // // grad(f(x,y,z)) = l * grad(g(x,y,z)) + m * grad(h(x,y,z)) // g(x,y,z) = 0 // h(x,y,z) = 0 - // + // // Equations: // 0 = l*A + m*2x/xyScaling^2 // 1 = l*B + m*2y/xyScaling^2 // 0 = l*C + m*2z/zScaling^2 // Ax + By + Cz + D = 0 // x^2/xyScaling^2 + y^2/xyScaling^2 + z^2/zScaling^2 - 1 = 0 - // + // // Solve for x,y,z in terms of (l, m): - // + // // x = (-l*A * xyScaling^2 ) / (2 * m) // y = ((1 - l*B) * xyScaling^2) / ( 2 * m) // z = (-l*C * zScaling^2)/ (2 * m) - // + // // Two equations, two unknowns: - // - // A * ((-l*A * xyScaling^2 ) / (2 * m)) + B * (((1 - l*B) * xyScaling^2) / ( 2 * m)) + C * ((-l*C * zScaling^2)/ (2 * m)) + D = 0 - // + // + // A * ((-l*A * xyScaling^2 ) / (2 * m)) + B * (((1 - l*B) * xyScaling^2) / ( 2 * m)) + C * + // ((-l*C * zScaling^2)/ (2 * m)) + D = 0 + // // and - // - // ((-l*A * xyScaling^2 ) / (2 * m))^2/xyScaling^2 + (((1 - l*B) * xyScaling^2) / ( 2 * m))^2/xyScaling^2 + ((-l*C * zScaling^2)/ (2 * m))^2/zScaling^2 - 1 = 0 - // + // + // ((-l*A * xyScaling^2 ) / (2 * m))^2/xyScaling^2 + (((1 - l*B) * xyScaling^2) / ( 2 * + // m))^2/xyScaling^2 + ((-l*C * zScaling^2)/ (2 * m))^2/zScaling^2 - 1 = 0 + // // Simple: solve for l and m, then find y from it. - // + // // (a) Use first equation to find l in terms of m. - // - // A * ((-l*A * xyScaling^2 ) / (2 * m)) + B * (((1 - l*B) * xyScaling^2) / ( 2 * m)) + C * ((-l*C * zScaling^2)/ (2 * m)) + D = 0 - // A * (-l*A * xyScaling^2 ) + B * ((1-l*B) * xyScaling^2) + C * (-l*C * zScaling^2) + D * 2 * m = 0 + // + // A * ((-l*A * xyScaling^2 ) / (2 * m)) + B * (((1 - l*B) * xyScaling^2) / ( 2 * m)) + C * + // ((-l*C * zScaling^2)/ (2 * m)) + D = 0 + // A * (-l*A * xyScaling^2 ) + B * ((1-l*B) * xyScaling^2) + C * (-l*C * zScaling^2) + D * 2 * + // m = 0 // -A^2*l*xyScaling^2 + B*xyScaling^2 - l*B^2*xyScaling^2 - C^2*l*zScaling^2 + D*2*m = 0 - // - l *(A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + (B * xyScaling^2 + D * 2 * m) = 0 - // l = (B * xyScaling^2 + D * 2 * m) / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) - // l = B * xyScaling^2 / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + m * 2 * D / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) - // + // - l *(A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + (B * xyScaling^2 + D * 2 * + // m) = 0 + // l = (B * xyScaling^2 + D * 2 * m) / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * + // zScaling^2) + // l = B * xyScaling^2 / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + m * 2 * D + // / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) + // // For convenience: - // + // // k = 1.0 / (A^2* xyScaling^2 + B^2 * xyScaling^2 + C^2 * zScaling^2) - // + // // Then: - // + // // l = B * xyScaling^2 * k + m * 2 * D * k // l = k * (B*xyScaling^2 + m*2*D) // @@ -1529,40 +1828,63 @@ public class Plane extends Vector { // // l = (r*m + q) // l^2 = (r^2 * m^2 + 2*r*m*q + q^2) - // + // // (b) Simplify the second equation before substitution - // - // ((-l*A * xyScaling^2 ) / (2 * m))^2/xyScaling^2 + (((1 - l*B) * xyScaling^2) / ( 2 * m))^2/xyScaling^2 + ((-l*C * zScaling^2)/ (2 * m))^2/zScaling^2 - 1 = 0 - // (-l*A * xyScaling^2 )^2/xyScaling^2 + ((1 - l*B) * xyScaling^2)^2/xyScaling^2 + (-l*C * zScaling^2)^2/zScaling^2 = 4 * m^2 + // + // ((-l*A * xyScaling^2 ) / (2 * m))^2/xyScaling^2 + (((1 - l*B) * xyScaling^2) / ( 2 * + // m))^2/xyScaling^2 + ((-l*C * zScaling^2)/ (2 * m))^2/zScaling^2 - 1 = 0 + // (-l*A * xyScaling^2 )^2/xyScaling^2 + ((1 - l*B) * xyScaling^2)^2/xyScaling^2 + (-l*C * + // zScaling^2)^2/zScaling^2 = 4 * m^2 // (-l*A)^2 * xyScaling^2 + (1 - l*B)^2 * xyScaling^2 + (-l*C)^2 * zScaling^2 = 4 * m^2 - // l^2*A^2 * xyScaling^2 + (1 - 2*l*B + l^2*B^2) * xyScaling^2 + l^2*C^2 * zScaling^2 = 4 * m^2 - // A^2*xyScaling^2*l^2 + xyScaling^2 - 2*B*xyScaling^2*l + B^2*xyScaling^2*l^2 + C^2*zScaling^2*l^2 - 4*m^2 = 0 - // + // l^2*A^2 * xyScaling^2 + (1 - 2*l*B + l^2*B^2) * xyScaling^2 + l^2*C^2 * zScaling^2 = 4 * + // m^2 + // A^2*xyScaling^2*l^2 + xyScaling^2 - 2*B*xyScaling^2*l + B^2*xyScaling^2*l^2 + + // C^2*zScaling^2*l^2 - 4*m^2 = 0 + // // (zScaling) Substitute for l, l^2 // - // A^2*xyScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) + xyScaling^2 - 2*B*xyScaling^2*(r*m + q) + B^2*xyScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) + C^2*zScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) - 4*m^2 = 0 - // A^2*xyScaling^2*r^2*m^2 + 2*A^2*xyScaling^2*r*q*m + A^2*xyScaling^2*q^2 + xyScaling^2 - 2*B*xyScaling^2*r*m - 2*B*xyScaling^2*q + B^2*xyScaling^2*r^2*m^2 + - // 2*B^2*xyScaling^2*r*q*m + B^2*xyScaling^2*q^2 + C^2*zScaling^2*r^2*m^2 + 2*C^2*zScaling^2*r*q*m + C^2*zScaling^2*q^2 - 4*m^2 = 0 + // A^2*xyScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) + xyScaling^2 - 2*B*xyScaling^2*(r*m + q) + + // B^2*xyScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) + C^2*zScaling^2*(r^2 * m^2 + 2*r*m*q + q^2) - + // 4*m^2 = 0 + // A^2*xyScaling^2*r^2*m^2 + 2*A^2*xyScaling^2*r*q*m + A^2*xyScaling^2*q^2 + xyScaling^2 - + // 2*B*xyScaling^2*r*m - 2*B*xyScaling^2*q + B^2*xyScaling^2*r^2*m^2 + + // 2*B^2*xyScaling^2*r*q*m + B^2*xyScaling^2*q^2 + C^2*zScaling^2*r^2*m^2 + + // 2*C^2*zScaling^2*r*q*m + C^2*zScaling^2*q^2 - 4*m^2 = 0 // // (d) Group // // m^2 * [A^2*xyScaling^2*r^2 + B^2*xyScaling^2*r^2 + C^2*zScaling^2*r^2 - 4] + - // m * [2*A^2*xyScaling^2*r*q - 2*B*xyScaling^2*r + 2*B^2*xyScaling^2*r*q + 2*C^2*zScaling^2*r*q] + - // [A^2*xyScaling^2*q^2 + xyScaling^2 - 2*B*xyScaling^2*q + B^2*xyScaling^2*q^2 + C^2*zScaling^2*q^2] = 0 + // m * [2*A^2*xyScaling^2*r*q - 2*B*xyScaling^2*r + 2*B^2*xyScaling^2*r*q + + // 2*C^2*zScaling^2*r*q] + + // [A^2*xyScaling^2*q^2 + xyScaling^2 - 2*B*xyScaling^2*q + B^2*xyScaling^2*q^2 + + // C^2*zScaling^2*q^2] = 0 + + // System.err.println(" computing Y bound"); - //System.err.println(" computing Y bound"); - // Useful subexpressions for this bound - final double q = B*abSquared*k; + final double q = B * abSquared * k; final double qSquared = q * q; // Quadratic equation - final double a = ASquared*abSquared*rSquared + BSquared*abSquared*rSquared + CSquared*cSquared*rSquared - 4.0; - final double b = 2.0*ASquared*abSquared*r*q - 2.0*B*abSquared*r + 2.0*BSquared*abSquared*r*q + 2.0*CSquared*cSquared*r*q; - final double c = ASquared*abSquared*qSquared + abSquared - 2.0*B*abSquared*q + BSquared*abSquared*qSquared + CSquared*cSquared*qSquared; + final double a = + ASquared * abSquared * rSquared + + BSquared * abSquared * rSquared + + CSquared * cSquared * rSquared + - 4.0; + final double b = + 2.0 * ASquared * abSquared * r * q + - 2.0 * B * abSquared * r + + 2.0 * BSquared * abSquared * r * q + + 2.0 * CSquared * cSquared * r * q; + final double c = + ASquared * abSquared * qSquared + + abSquared + - 2.0 * B * abSquared * q + + BSquared * abSquared * qSquared + + CSquared * cSquared * qSquared; if (Math.abs(a) >= MINIMUM_RESOLUTION_SQUARED) { - final double sqrtTerm = b*b - 4.0*a*c; + final double sqrtTerm = b * b - 4.0 * a * c; if (Math.abs(sqrtTerm) < MINIMUM_RESOLUTION_SQUARED) { // One solution final double m = -b / (2.0 * a); @@ -1573,20 +1895,28 @@ public class Plane extends Vector { // y = ((1.0-l*B) * xyScaling^2) / ( 2 * m) // z = (-l*C * zScaling^2)/ (2 * m) final double denom0 = 0.5 / m; - final GeoPoint thePoint = new GeoPoint(-l*A * abSquared * denom0, (1.0-l*B) * abSquared * denom0, -l*C * cSquared * denom0); - //Math is not quite accurate enough for this - //assert planetModel.pointOnSurface(thePoint): "Point: "+thePoint+"; Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ - // (thePoint1.x*thePoint.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint.y*thePoint.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint.z*thePoint.z*planetModel.inverseZScaling*planetModel.inverseZScaling); - //assert evaluateIsZero(thePoint): "Evaluation of point: "+evaluate(thePoint); + final GeoPoint thePoint = + new GeoPoint( + -l * A * abSquared * denom0, + (1.0 - l * B) * abSquared * denom0, + -l * C * cSquared * denom0); + // Math is not quite accurate enough for this + // assert planetModel.pointOnSurface(thePoint): "Point: "+thePoint+"; + // Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ + // (thePoint1.x*thePoint.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint.y*thePoint.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint.z*thePoint.z*planetModel.inverseZScaling*planetModel.inverseZScaling); + // assert evaluateIsZero(thePoint): "Evaluation of point: "+evaluate(thePoint); addPoint(boundsInfo, bounds, thePoint); } else { - // This is a plane of the form A=0 B=n C=0. We can set a bound only by noting the D value. - boundsInfo.addYValue(-D/B); + // This is a plane of the form A=0 B=n C=0. We can set a bound only by noting the D + // value. + boundsInfo.addYValue(-D / B); } } else if (sqrtTerm > 0.0) { // Two solutions final double sqrtResult = Math.sqrt(sqrtTerm); - final double commonDenom = 0.5/a; + final double commonDenom = 0.5 / a; final double m1 = (-b + sqrtResult) * commonDenom; assert Math.abs(a * m1 * m1 + b * m1 + c) < MINIMUM_RESOLUTION; final double m2 = (-b - sqrtResult) * commonDenom; @@ -1599,20 +1929,35 @@ public class Plane extends Vector { // z = (-l*C * zScaling^2)/ (2 * m) final double denom1 = 0.5 / m1; final double denom2 = 0.5 / m2; - final GeoPoint thePoint1 = new GeoPoint(-l1*A * abSquared * denom1, (1.0-l1*B) * abSquared * denom1, -l1*C * cSquared * denom1); - final GeoPoint thePoint2 = new GeoPoint(-l2*A * abSquared * denom2, (1.0-l2*B) * abSquared * denom2, -l2*C * cSquared * denom2); - //Math is not quite accurate enough for this - //assert planetModel.pointOnSurface(thePoint1): "Point1: "+thePoint1+"; Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ - // (thePoint1.x*thePoint1.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint1.y*thePoint1.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint1.z*thePoint1.z*planetModel.inverseZScaling*planetModel.inverseZScaling); - //assert planetModel.pointOnSurface(thePoint2): "Point2: "+thePoint2+"; Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ - // (thePoint2.x*thePoint2.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint2.y*thePoint2.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint2.z*thePoint2.z*planetModel.inverseZScaling*planetModel.inverseZScaling); - //assert evaluateIsZero(thePoint1): "Evaluation of point1: "+evaluate(thePoint1); - //assert evaluateIsZero(thePoint2): "Evaluation of point2: "+evaluate(thePoint2); + final GeoPoint thePoint1 = + new GeoPoint( + -l1 * A * abSquared * denom1, + (1.0 - l1 * B) * abSquared * denom1, + -l1 * C * cSquared * denom1); + final GeoPoint thePoint2 = + new GeoPoint( + -l2 * A * abSquared * denom2, + (1.0 - l2 * B) * abSquared * denom2, + -l2 * C * cSquared * denom2); + // Math is not quite accurate enough for this + // assert planetModel.pointOnSurface(thePoint1): "Point1: "+thePoint1+"; + // Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ + // (thePoint1.x*thePoint1.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint1.y*thePoint1.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint1.z*thePoint1.z*planetModel.inverseZScaling*planetModel.inverseZScaling); + // assert planetModel.pointOnSurface(thePoint2): "Point2: "+thePoint2+"; + // Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ + // (thePoint2.x*thePoint2.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint2.y*thePoint2.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint2.z*thePoint2.z*planetModel.inverseZScaling*planetModel.inverseZScaling); + // assert evaluateIsZero(thePoint1): "Evaluation of point1: "+evaluate(thePoint1); + // assert evaluateIsZero(thePoint2): "Evaluation of point2: "+evaluate(thePoint2); addPoint(boundsInfo, bounds, thePoint1); addPoint(boundsInfo, bounds, thePoint2); } else { - // This is a plane of the form A=0 B=n C=0. We can set a bound only by noting the D value. - boundsInfo.addYValue(-D/B); + // This is a plane of the form A=0 B=n C=0. We can set a bound only by noting the D + // value. + boundsInfo.addYValue(-D / B); } } else { // No solutions @@ -1625,44 +1970,53 @@ public class Plane extends Vector { // y = ((1-l*B) * xyScaling^2) / ( 2 * m) // z = (-l*C * zScaling^2)/ (2 * m) final double denom0 = 0.5 / m; - final GeoPoint thePoint = new GeoPoint(-l*A * abSquared * denom0, (1.0-l*B) * abSquared * denom0, -l*C * cSquared * denom0); - //Math is not quite accurate enough for this - //assert planetModel.pointOnSurface(thePoint): "Point: "+thePoint+"; Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ - // (thePoint.x*thePoint.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint.y*thePoint.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + thePoint.z*thePoint.z*planetModel.inverseZScaling*planetModel.inverseZScaling); - //assert evaluateIsZero(thePoint): "Evaluation of point: "+evaluate(thePoint); + final GeoPoint thePoint = + new GeoPoint( + -l * A * abSquared * denom0, + (1.0 - l * B) * abSquared * denom0, + -l * C * cSquared * denom0); + // Math is not quite accurate enough for this + // assert planetModel.pointOnSurface(thePoint): "Point: "+thePoint+"; + // Planetmodel="+planetModel+"; A="+A+" B="+B+" C="+C+" D="+D+" planetfcn="+ + // (thePoint.x*thePoint.x*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint.y*thePoint.y*planetModel.inverseXYScaling*planetModel.inverseXYScaling + + // thePoint.z*thePoint.z*planetModel.inverseZScaling*planetModel.inverseZScaling); + // assert evaluateIsZero(thePoint): "Evaluation of point: "+evaluate(thePoint); addPoint(boundsInfo, bounds, thePoint); } else { // Something went very wrong; a = b = 0 } } } - + /** - * Accumulate bounds information for this plane, intersected with another plane - * and the world. - * Updates both latitude and longitude information, using max/min points found - * within the specified bounds. Also takes into account the error envelope for all - * planes being intersected. + * Accumulate bounds information for this plane, intersected with another plane and the world. + * Updates both latitude and longitude information, using max/min points found within the + * specified bounds. Also takes into account the error envelope for all planes being intersected. * * @param planetModel is the planet model to use in determining bounds. * @param boundsInfo is the lat/lon info to update with additional bounding information. * @param p is the other plane. - * @param bounds are the surfaces delineating what's inside the shape. + * @param bounds are the surfaces delineating what's inside the shape. */ - public void recordBounds(final PlanetModel planetModel, final LatLonBounds boundsInfo, final Plane p, final Membership... bounds) { + public void recordBounds( + final PlanetModel planetModel, + final LatLonBounds boundsInfo, + final Plane p, + final Membership... bounds) { findIntersectionBounds(planetModel, boundsInfo, p, bounds); } /** - * Accumulate bounds information for this plane, intersected with the unit sphere. - * Updates both latitude and longitude information, using max/min points found - * within the specified bounds. + * Accumulate bounds information for this plane, intersected with the unit sphere. Updates both + * latitude and longitude information, using max/min points found within the specified bounds. * * @param planetModel is the planet model to use in determining bounds. * @param boundsInfo is the lat/lon info to update with additional bounding information. - * @param bounds are the surfaces delineating what's inside the shape. + * @param bounds are the surfaces delineating what's inside the shape. */ - public void recordBounds(final PlanetModel planetModel, final LatLonBounds boundsInfo, final Membership... bounds) { + public void recordBounds( + final PlanetModel planetModel, final LatLonBounds boundsInfo, final Membership... bounds) { // For clarity, load local variables with good names final double A = this.x; final double B = this.y; @@ -1670,33 +2024,34 @@ public class Plane extends Vector { // Now compute latitude min/max points if (!boundsInfo.checkNoTopLatitudeBound() || !boundsInfo.checkNoBottomLatitudeBound()) { - //System.err.println("Looking at latitude for plane "+this); + // System.err.println("Looking at latitude for plane "+this); // With ellipsoids, we really have only one viable way to do this computation. - // Specifically, we compute an appropriate vertical plane, based on the current plane's x-y orientation, and - // then intersect it with this one and with the ellipsoid. This gives us zero, one, or two points to use - // as bounds. - // There is one special case: horizontal circles. These require TWO vertical planes: one for the x, and one for - // the y, and we use all four resulting points in the bounds computation. + // Specifically, we compute an appropriate vertical plane, based on the current plane's x-y + // orientation, and then intersect it with this one and with the ellipsoid. This gives us + // zero, one, or two points to use as bounds. + // There is one special case: horizontal circles. These require TWO vertical planes: one for + // the x, and one for the y, and we use all four resulting points in the bounds computation. if ((Math.abs(A) >= MINIMUM_RESOLUTION || Math.abs(B) >= MINIMUM_RESOLUTION)) { // NOT a horizontal circle! - //System.err.println(" Not a horizontal circle"); - final Plane verticalPlane = constructNormalizedZPlane(A,B); + // System.err.println(" Not a horizontal circle"); + final Plane verticalPlane = constructNormalizedZPlane(A, B); final GeoPoint[] points = findIntersections(planetModel, verticalPlane, bounds, NO_BOUNDS); for (final GeoPoint point : points) { addPoint(boundsInfo, bounds, point); } } else { // Horizontal circle. Since a==b, any vertical plane suffices. - final GeoPoint[] points = findIntersections(planetModel, normalXPlane, NO_BOUNDS, NO_BOUNDS); + final GeoPoint[] points = + findIntersections(planetModel, normalXPlane, NO_BOUNDS, NO_BOUNDS); boundsInfo.addZValue(points[0]); } - //System.err.println("Done latitude bounds"); + // System.err.println("Done latitude bounds"); } // First, figure out our longitude bounds, unless we no longer need to consider that if (!boundsInfo.checkNoLongitudeBound()) { - //System.err.println("Computing longitude bounds for "+this); - //System.out.println("A = "+A+" B = "+B+" C = "+C+" D = "+D); + // System.err.println("Computing longitude bounds for "+this); + // System.out.println("A = "+A+" B = "+B+" C = "+C+" D = "+D); // Compute longitude bounds double a; @@ -1705,7 +2060,7 @@ public class Plane extends Vector { if (Math.abs(C) < MINIMUM_RESOLUTION) { // Degenerate; the equation describes a line - //System.out.println("It's a zero-width ellipse"); + // System.out.println("It's a zero-width ellipse"); // Ax + By + D = 0 if (Math.abs(D) >= MINIMUM_RESOLUTION) { if (Math.abs(A) > Math.abs(B)) { @@ -1734,7 +2089,9 @@ public class Plane extends Vector { // Group: // y^2 * [B^2/a^2 + A^2/b^2] + y [2BD/a^2] + [D^2/a^2-A^2] = 0 - a = B * B * planetModel.inverseXYScalingSquared + A * A * planetModel.inverseXYScalingSquared; + a = + B * B * planetModel.inverseXYScalingSquared + + A * A * planetModel.inverseXYScalingSquared; b = 2.0 * B * D * planetModel.inverseXYScalingSquared; c = D * D * planetModel.inverseXYScalingSquared - A * A; @@ -1767,7 +2124,9 @@ public class Plane extends Vector { // Use equation suitable for B != 0 // Since I != 0, we rewrite: // y = (-Ax - D)/B - a = B * B * planetModel.inverseXYScalingSquared + A * A * planetModel.inverseXYScalingSquared; + a = + B * B * planetModel.inverseXYScalingSquared + + A * A * planetModel.inverseXYScalingSquared; b = 2.0 * A * D * planetModel.inverseXYScalingSquared; c = D * D * planetModel.inverseXYScalingSquared - B * B; @@ -1797,11 +2156,11 @@ public class Plane extends Vector { } } else { - //System.err.println("General longitude bounds..."); + // System.err.println("General longitude bounds..."); // NOTE WELL: The x,y,z values generated here are NOT on the unit sphere. - // They are for lat/lon calculation purposes only. x-y is meant to be used for longitude determination, - // and z for latitude, and that's all the values are good for. + // They are for lat/lon calculation purposes only. x-y is meant to be used for longitude + // determination, and z for latitude, and that's all the values are good for. // (1) Intersect the plane and the ellipsoid, and project the results into the x-y plane: // From plane: @@ -1811,19 +2170,27 @@ public class Plane extends Vector { // Simplify/expand: // C^2*x^2/a^2 + C^2*y^2/b^2 + (-Ax - By - D)^2/zScaling^2 = C^2 // - // x^2 * C^2/a^2 + y^2 * C^2/b^2 + x^2 * A^2/zScaling^2 + ABxy/zScaling^2 + ADx/zScaling^2 + ABxy/zScaling^2 + y^2 * B^2/zScaling^2 + BDy/zScaling^2 + ADx/zScaling^2 + BDy/zScaling^2 + D^2/zScaling^2 = C^2 + // x^2 * C^2/a^2 + y^2 * C^2/b^2 + x^2 * A^2/zScaling^2 + ABxy/zScaling^2 + ADx/zScaling^2 + + // ABxy/zScaling^2 + y^2 * B^2/zScaling^2 + BDy/zScaling^2 + ADx/zScaling^2 + BDy/zScaling^2 + // + D^2/zScaling^2 = C^2 // Group: - // [A^2/zScaling^2 + C^2/a^2] x^2 + [B^2/zScaling^2 + C^2/b^2] y^2 + [2AB/zScaling^2]xy + [2AD/zScaling^2]x + [2BD/zScaling^2]y + [D^2/zScaling^2-C^2] = 0 + // [A^2/zScaling^2 + C^2/a^2] x^2 + [B^2/zScaling^2 + C^2/b^2] y^2 + [2AB/zScaling^2]xy + + // [2AD/zScaling^2]x + [2BD/zScaling^2]y + [D^2/zScaling^2-C^2] = 0 // For convenience, introduce post-projection coefficient variables to make life easier. // E x^2 + F y^2 + G xy + H x + I y + J = 0 - double E = A * A * planetModel.inverseZScalingSquared + C * C * planetModel.inverseXYScalingSquared; - double F = B * B * planetModel.inverseZScalingSquared + C * C * planetModel.inverseXYScalingSquared; + double E = + A * A * planetModel.inverseZScalingSquared + + C * C * planetModel.inverseXYScalingSquared; + double F = + B * B * planetModel.inverseZScalingSquared + + C * C * planetModel.inverseXYScalingSquared; double G = 2.0 * A * B * planetModel.inverseZScalingSquared; double H = 2.0 * A * D * planetModel.inverseZScalingSquared; double I = 2.0 * B * D * planetModel.inverseZScalingSquared; double J = D * D * planetModel.inverseZScalingSquared - C * C; - //System.err.println("E = " + E + " F = " + F + " G = " + G + " H = "+ H + " I = " + I + " J = " + J); + // System.err.println("E = " + E + " F = " + F + " G = " + G + " H = "+ H + " I = " + I + // + " J = " + J); // Check if the origin is within, by substituting x = 0, y = 0 and seeing if less than zero if (Math.abs(J) >= MINIMUM_RESOLUTION && J > 0.0) { @@ -1844,17 +2211,18 @@ public class Plane extends Vector { // 2E x^2 + 2F y^2 + 2G xy + 2H x + 2I y + 2J = 0 // Subtract one from the other, to remove the high-order terms: // Hx + Iy + 2J = 0 - // Now, we can substitute either x = or y = into the derivative equation, or into the original equation. - // But we will need to base this on which coefficient is non-zero + // Now, we can substitute either x = or y = into the derivative equation, or into the + // original equation. But we will need to base this on which coefficient is non-zero if (Math.abs(H) > Math.abs(I)) { - //System.err.println(" Using the y quadratic"); + // System.err.println(" Using the y quadratic"); // x = (-2J - Iy)/H // Plug into the original equation: // E [(-2J - Iy)/H]^2 + F y^2 + G [(-2J - Iy)/H]y + H [(-2J - Iy)/H] + I y + J = 0 // E [(-2J - Iy)/H]^2 + F y^2 + G [(-2J - Iy)/H]y - J = 0 - // Same equation as derivative equation, except for a factor of 2! So it doesn't matter which we pick. + // Same equation as derivative equation, except for a factor of 2! So it doesn't matter + // which we pick. // Plug into derivative equation: // 2E[(-2J - Iy)/H]^2 + 2Fy^2 + 2G[(-2J - Iy)/H]y + H[(-2J - Iy)/H] + Iy = 0 @@ -1875,19 +2243,19 @@ public class Plane extends Vector { b = 4.0 * E * I * J - 2.0 * G * H * J; c = 4.0 * E * J * J - J * H * H; - //System.out.println("a="+a+" b="+b+" zScaling="+zScaling); + // System.out.println("a=" + a + " b=" + b + " zScaling=" + zScaling); double sqrtClause = b * b - 4.0 * a * c; - //System.out.println("sqrtClause="+sqrtClause); + // System.out.println("sqrtClause=" + sqrtClause); if (Math.abs(sqrtClause) < MINIMUM_RESOLUTION_CUBED) { - //System.err.println(" One solution"); + // System.err.println(" One solution"); double y0 = -b / (2.0 * a); double x0 = (-2.0 * J - I * y0) / H; double z0 = (-A * x0 - B * y0 - D) / C; addPoint(boundsInfo, bounds, new GeoPoint(x0, y0, z0)); } else if (sqrtClause > 0.0) { - //System.err.println(" Two solutions"); + // System.err.println(" Two solutions"); double sqrtResult = Math.sqrt(sqrtClause); double denom = 1.0 / (2.0 * a); double Hdenom = 1.0 / H; @@ -1905,7 +2273,7 @@ public class Plane extends Vector { } } else { - //System.err.println(" Using the x quadratic"); + // System.err.println(" Using the x quadratic"); // y = (-2J - Hx)/I // Plug into the original equation: @@ -1927,11 +2295,11 @@ public class Plane extends Vector { b = 4.0 * F * H * J - 2.0 * G * I * J; c = 4.0 * F * J * J - J * I * I; - //System.out.println("a="+a+" b="+b+" zScaling="+zScaling); + // System.out.println("a=" + a + " b=" + b + " zScaling=" + zScaling); double sqrtClause = b * b - 4.0 * a * c; - //System.out.println("sqrtClause="+sqrtClause); + // System.out.println("sqrtClause=" + sqrtClause); if (Math.abs(sqrtClause) < MINIMUM_RESOLUTION_CUBED) { - //System.err.println(" One solution; sqrt clause was "+sqrtClause); + // System.err.println(" One solution; sqrt clause was " + sqrtClause); double x0 = -b / (2.0 * a); double y0 = (-2.0 * J - H * x0) / I; double z0 = (-A * x0 - B * y0 - D) / C; @@ -1939,7 +2307,7 @@ public class Plane extends Vector { // 2Ex^2 + 2Fy^2 + 2Gxy + Hx + Iy = 0 addPoint(boundsInfo, bounds, new GeoPoint(x0, y0, z0)); } else if (sqrtClause > 0.0) { - //System.err.println(" Two solutions"); + // System.err.println(" Two solutions"); double sqrtResult = Math.sqrt(sqrtClause); double denom = 1.0 / (2.0 * a); double Idenom = 1.0 / I; @@ -1959,79 +2327,97 @@ public class Plane extends Vector { } } } - } - /** Add a point to boundsInfo if within a specifically bounded area. + /** + * Add a point to boundsInfo if within a specifically bounded area. + * * @param boundsInfo is the object to be modified. * @param bounds is the area that the point must be within. * @param point is the point. */ - private static void addPoint(final Bounds boundsInfo, final Membership[] bounds, final GeoPoint point) { + private static void addPoint( + final Bounds boundsInfo, final Membership[] bounds, final GeoPoint point) { // Make sure the discovered point is within the bounds for (Membership bound : bounds) { - if (!bound.isWithin(point)) + if (!bound.isWithin(point)) { return; + } } // Add the point boundsInfo.addPoint(point); } /** - * Determine whether the plane intersects another plane within the - * bounds provided. + * Determine whether the plane intersects another plane within the bounds provided. * * @param planetModel is the planet model to use in determining intersection. - * @param q is the other plane. - * @param notablePoints are points to look at to disambiguate cases when the two planes are identical. - * @param moreNotablePoints are additional points to look at to disambiguate cases when the two planes are identical. - * @param bounds is one part of the bounds. - * @param moreBounds are more bounds. + * @param q is the other plane. + * @param notablePoints are points to look at to disambiguate cases when the two planes are + * identical. + * @param moreNotablePoints are additional points to look at to disambiguate cases when the two + * planes are identical. + * @param bounds is one part of the bounds. + * @param moreBounds are more bounds. * @return true if there's an intersection. */ - public boolean intersects(final PlanetModel planetModel, final Plane q, final GeoPoint[] notablePoints, final GeoPoint[] moreNotablePoints, final Membership[] bounds, final Membership... moreBounds) { - //System.err.println("Does plane "+this+" intersect with plane "+q); + public boolean intersects( + final PlanetModel planetModel, + final Plane q, + final GeoPoint[] notablePoints, + final GeoPoint[] moreNotablePoints, + final Membership[] bounds, + final Membership... moreBounds) { + // System.err.println("Does plane "+this+" intersect with plane "+q); // If the two planes are identical, then the math will find no points of intersection. // So a special case of this is to check for plane equality. But that is not enough, because - // what we really need at that point is to determine whether overlap occurs between the two parts of the intersection - // of plane and circle. That is, are there *any* points on the plane that are within the bounds described? + // what we really need at that point is to determine whether overlap occurs between the two + // parts of the intersection + // of plane and circle. That is, are there *any* points on the plane that are within the bounds + // described? if (isNumericallyIdentical(q)) { - //System.err.println(" Identical plane"); - // The only way to efficiently figure this out will be to have a list of trial points available to evaluate. + // System.err.println(" Identical plane"); + // The only way to efficiently figure this out will be to have a list of trial points + // available to evaluate. // We look for any point that fulfills all the bounds. for (GeoPoint p : notablePoints) { if (meetsAllBounds(p, bounds, moreBounds)) { - //System.err.println(" found a notable point in bounds, so intersects"); + // System.err.println(" found a notable point in bounds, so intersects"); return true; } } for (GeoPoint p : moreNotablePoints) { if (meetsAllBounds(p, bounds, moreBounds)) { - //System.err.println(" found a notable point in bounds, so intersects"); + // System.err.println(" found a notable point in bounds, so intersects"); return true; } } - //System.err.println(" no notable points inside found; no intersection"); + // System.err.println(" no notable points inside found; no intersection"); return false; } - + // Save on allocations; do inline instead of calling findIntersections - //System.err.println("Looking for intersection between plane "+this+" and plane "+q+" within bounds"); + // System.err.println("Looking for intersection between plane " + this + " and plane " + // + q + " within bounds"); // Unnormalized, unchecked... final double lineVectorX = y * q.z - z * q.y; final double lineVectorY = z * q.x - x * q.z; final double lineVectorZ = x * q.y - y * q.x; - if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION && Math.abs(lineVectorY) < MINIMUM_RESOLUTION && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { + if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION + && Math.abs(lineVectorY) < MINIMUM_RESOLUTION + && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { // Degenerate case: parallel planes - //System.err.println(" planes are parallel - no intersection"); + // System.err.println(" planes are parallel - no intersection"); return false; } // The line will have the equation: A t + A0 = x, B t + B0 = y, C t + C0 = z. - // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that is on both planes. - // To do this, we find the largest vector value (either x, y, or z), and look for a point that solves both plane equations - // simultaneous. For example, let's say that the vector is (0.5,0.5,1), and the two plane equations are: + // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that + // is on both planes. + // To do this, we find the largest vector value (either x, y, or z), and look for a point that + // solves both plane equations simultaneous. For example, let's say that the vector is + // (0.5,0.5,1), and the two plane equations are: // 0.7 x + 0.3 y + 0.1 z + 0.0 = 0 // and // 0.9 x - 0.1 y + 0.2 z + 4.0 = 0 @@ -2057,7 +2443,7 @@ public class Plane extends Vector { if (Math.abs(denomYZ) >= Math.abs(denomXZ) && Math.abs(denomYZ) >= Math.abs(denomXY)) { // X is the biggest, so our point will have x0 = 0.0 if (Math.abs(denomYZ) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return false; } final double denom = 1.0 / denomYZ; @@ -2067,7 +2453,7 @@ public class Plane extends Vector { } else if (Math.abs(denomXZ) >= Math.abs(denomXY) && Math.abs(denomXZ) >= Math.abs(denomYZ)) { // Y is the biggest, so y0 = 0.0 if (Math.abs(denomXZ) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return false; } final double denom = 1.0 / denomXZ; @@ -2077,7 +2463,7 @@ public class Plane extends Vector { } else { // Z is the biggest, so Z0 = 0.0 if (Math.abs(denomXY) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return false; } final double denom = 1.0 / denomXY; @@ -2086,22 +2472,33 @@ public class Plane extends Vector { z0 = 0.0; } - // Once an intersecting line is determined, the next step is to intersect that line with the ellipsoid, which - // will yield zero, one, or two points. + // Once an intersecting line is determined, the next step is to intersect that line with the + // ellipsoid, which will yield zero, one, or two points. // The ellipsoid equation: 1,0 = x^2/a^2 + y^2/b^2 + z^2/zScaling^2 // 1.0 = (At+A0)^2/a^2 + (Bt+B0)^2/b^2 + (Ct+C0)^2/zScaling^2 - // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 - // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 + // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 + // / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 + // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / + // zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 // Use the quadratic formula to determine t values and candidate point(s) - final double A = lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + - lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + - lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; - final double B = 2.0 * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + lineVectorY * y0 * planetModel.inverseXYScalingSquared + lineVectorZ * z0 * planetModel.inverseZScalingSquared); - final double C = x0 * x0 * planetModel.inverseXYScalingSquared + y0 * y0 * planetModel.inverseXYScalingSquared + z0 * z0 * planetModel.inverseZScalingSquared - 1.0; + final double A = + lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + + lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + + lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; + final double B = + 2.0 + * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + + lineVectorY * y0 * planetModel.inverseXYScalingSquared + + lineVectorZ * z0 * planetModel.inverseZScalingSquared); + final double C = + x0 * x0 * planetModel.inverseXYScalingSquared + + y0 * y0 * planetModel.inverseXYScalingSquared + + z0 * z0 * planetModel.inverseZScalingSquared + - 1.0; final double BsquaredMinus = B * B - 4.0 * A * C; if (Math.abs(BsquaredMinus) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" One point of intersection"); + // System.err.println(" One point of intersection"); final double inverse2A = 1.0 / (2.0 * A); // One solution only final double t = -B * inverse2A; @@ -2121,7 +2518,7 @@ public class Plane extends Vector { } return true; } else if (BsquaredMinus > 0.0) { - //System.err.println(" Two points of intersection"); + // System.err.println(" Two points of intersection"); final double inverse2A = 1.0 / (2.0 * A); // Two solutions final double sqrtTerm = Math.sqrt(BsquaredMinus); @@ -2164,66 +2561,80 @@ public class Plane extends Vector { } return true; } else { - //System.err.println(" no solutions - no intersection"); + // System.err.println(" no solutions - no intersection"); return false; } } /** - * Determine whether the plane crosses another plane within the - * bounds provided. Crossing is defined as intersecting with the geo surface at two points. + * Determine whether the plane crosses another plane within the bounds provided. Crossing is + * defined as intersecting with the geo surface at two points. * * @param planetModel is the planet model to use in determining intersection. - * @param q is the other plane. - * @param notablePoints are points to look at to disambiguate cases when the two planes are identical. - * @param moreNotablePoints are additional points to look at to disambiguate cases when the two planes are identical. - * @param bounds is one part of the bounds. - * @param moreBounds are more bounds. + * @param q is the other plane. + * @param notablePoints are points to look at to disambiguate cases when the two planes are + * identical. + * @param moreNotablePoints are additional points to look at to disambiguate cases when the two + * planes are identical. + * @param bounds is one part of the bounds. + * @param moreBounds are more bounds. * @return true if there's a crossing. */ - public boolean crosses(final PlanetModel planetModel, final Plane q, final GeoPoint[] notablePoints, final GeoPoint[] moreNotablePoints, final Membership[] bounds, final Membership... moreBounds) { - //System.err.println("Does plane "+this+" cross plane "+q); + public boolean crosses( + final PlanetModel planetModel, + final Plane q, + final GeoPoint[] notablePoints, + final GeoPoint[] moreNotablePoints, + final Membership[] bounds, + final Membership... moreBounds) { + // System.err.println("Does plane "+this+" cross plane "+q); // If the two planes are identical, then the math will find no points of intersection. // So a special case of this is to check for plane equality. But that is not enough, because - // what we really need at that point is to determine whether overlap occurs between the two parts of the intersection - // of plane and circle. That is, are there *any* points on the plane that are within the bounds described? + // what we really need at that point is to determine whether overlap occurs between the two + // parts of the intersection of plane and circle. That is, are there *any* points on the + // plane that are within the bounds described? if (isNumericallyIdentical(q)) { - //System.err.println(" Identical plane"); - // The only way to efficiently figure this out will be to have a list of trial points available to evaluate. - // We look for any point that fulfills all the bounds. + // System.err.println(" Identical plane"); + // The only way to efficiently figure this out will be to have a list of trial points + // available to evaluate. We look for any point that fulfills all the bounds. for (GeoPoint p : notablePoints) { if (meetsAllBounds(p, bounds, moreBounds)) { - //System.err.println(" found a notable point in bounds, so intersects"); + // System.err.println(" found a notable point in bounds, so intersects"); return true; } } for (GeoPoint p : moreNotablePoints) { if (meetsAllBounds(p, bounds, moreBounds)) { - //System.err.println(" found a notable point in bounds, so intersects"); + // System.err.println(" found a notable point in bounds, so intersects"); return true; } } - //System.err.println(" no notable points inside found; no intersection"); + // System.err.println(" no notable points inside found; no intersection"); return false; } - + // Save on allocations; do inline instead of calling findIntersections - //System.err.println("Looking for intersection between plane "+this+" and plane "+q+" within bounds"); + // System.err.println("Looking for intersection between plane " + this + " and plane " + // + q + " within bounds"); // Unnormalized, unchecked... final double lineVectorX = y * q.z - z * q.y; final double lineVectorY = z * q.x - x * q.z; final double lineVectorZ = x * q.y - y * q.x; - if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION && Math.abs(lineVectorY) < MINIMUM_RESOLUTION && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { + if (Math.abs(lineVectorX) < MINIMUM_RESOLUTION + && Math.abs(lineVectorY) < MINIMUM_RESOLUTION + && Math.abs(lineVectorZ) < MINIMUM_RESOLUTION) { // Degenerate case: parallel planes - //System.err.println(" planes are parallel - no intersection"); + // System.err.println(" planes are parallel - no intersection"); return false; } // The line will have the equation: A t + A0 = x, B t + B0 = y, C t + C0 = z. - // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that is on both planes. - // To do this, we find the largest vector value (either x, y, or z), and look for a point that solves both plane equations - // simultaneous. For example, let's say that the vector is (0.5,0.5,1), and the two plane equations are: + // We have A, B, and C. In order to come up with A0, B0, and C0, we need to find a point that + // is on both planes. + // To do this, we find the largest vector value (either x, y, or z), and look for a point that + // solves both plane equations simultaneous. For example, let's say that the vector is + // (0.5,0.5,1), and the two plane equations are: // 0.7 x + 0.3 y + 0.1 z + 0.0 = 0 // and // 0.9 x - 0.1 y + 0.2 z + 4.0 = 0 @@ -2249,7 +2660,7 @@ public class Plane extends Vector { if (Math.abs(denomYZ) >= Math.abs(denomXZ) && Math.abs(denomYZ) >= Math.abs(denomXY)) { // X is the biggest, so our point will have x0 = 0.0 if (Math.abs(denomYZ) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return false; } final double denom = 1.0 / denomYZ; @@ -2259,7 +2670,7 @@ public class Plane extends Vector { } else if (Math.abs(denomXZ) >= Math.abs(denomXY) && Math.abs(denomXZ) >= Math.abs(denomYZ)) { // Y is the biggest, so y0 = 0.0 if (Math.abs(denomXZ) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return false; } final double denom = 1.0 / denomXZ; @@ -2269,7 +2680,7 @@ public class Plane extends Vector { } else { // Z is the biggest, so Z0 = 0.0 if (Math.abs(denomXY) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" Denominator is zero: no intersection"); + // System.err.println(" Denominator is zero: no intersection"); return false; } final double denom = 1.0 / denomXY; @@ -2278,26 +2689,38 @@ public class Plane extends Vector { z0 = 0.0; } - // Once an intersecting line is determined, the next step is to intersect that line with the ellipsoid, which - // will yield zero, one, or two points. + // Once an intersecting line is determined, the next step is to intersect that line with the + // ellipsoid, which will yield zero, one, or two points. // The ellipsoid equation: 1,0 = x^2/a^2 + y^2/b^2 + z^2/zScaling^2 // 1.0 = (At+A0)^2/a^2 + (Bt+B0)^2/b^2 + (Ct+C0)^2/zScaling^2 - // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 - // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 + // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 + // / zScaling^2 + 2CC0t / zScaling^2 + C0^2 / zScaling^2 - 1,0 = 0.0 + // [A^2 / a^2 + B^2 / b^2 + C^2 / zScaling^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / + // zScaling^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / zScaling^2 - 1,0] = 0.0 // Use the quadratic formula to determine t values and candidate point(s) - final double A = lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + - lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + - lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; - final double B = 2.0 * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + lineVectorY * y0 * planetModel.inverseXYScalingSquared + lineVectorZ * z0 * planetModel.inverseZScalingSquared); - final double C = x0 * x0 * planetModel.inverseXYScalingSquared + y0 * y0 * planetModel.inverseXYScalingSquared + z0 * z0 * planetModel.inverseZScalingSquared - 1.0; + final double A = + lineVectorX * lineVectorX * planetModel.inverseXYScalingSquared + + lineVectorY * lineVectorY * planetModel.inverseXYScalingSquared + + lineVectorZ * lineVectorZ * planetModel.inverseZScalingSquared; + final double B = + 2.0 + * (lineVectorX * x0 * planetModel.inverseXYScalingSquared + + lineVectorY * y0 * planetModel.inverseXYScalingSquared + + lineVectorZ * z0 * planetModel.inverseZScalingSquared); + final double C = + x0 * x0 * planetModel.inverseXYScalingSquared + + y0 * y0 * planetModel.inverseXYScalingSquared + + z0 * z0 * planetModel.inverseZScalingSquared + - 1.0; final double BsquaredMinus = B * B - 4.0 * A * C; if (Math.abs(BsquaredMinus) < MINIMUM_RESOLUTION_SQUARED) { - //System.err.println(" One point of intersection"); - // We're not interested in situations where there is only one solution; these are intersections but not crossings + // System.err.println(" One point of intersection"); + // We're not interested in situations where there is only one solution; these are + // intersections but not crossings return false; } else if (BsquaredMinus > 0.0) { - //System.err.println(" Two points of intersection"); + // System.err.println(" Two points of intersection"); final double inverse2A = 1.0 / (2.0 * A); // Two solutions final double sqrtTerm = Math.sqrt(BsquaredMinus); @@ -2340,15 +2763,16 @@ public class Plane extends Vector { } return true; } else { - //System.err.println(" no solutions - no intersection"); + // System.err.println(" no solutions - no intersection"); return false; } } /** - * Returns true if this plane and the other plane are functionally identical within the margin of error. - * Functionally identical means that the planes are so close to parallel that many aspects of planar math, - * like intersections, no longer have answers to within the required precision. + * Returns true if this plane and the other plane are functionally identical within the margin of + * error. Functionally identical means that the planes are so close to parallel that many aspects + * of planar math, like intersections, no longer have answers to within the required precision. + * * @param p is the plane to compare against. * @return true if the planes are functionally identical. */ @@ -2358,13 +2782,15 @@ public class Plane extends Vector { final double cross1 = this.y * p.z - this.z * p.y; final double cross2 = this.z * p.x - this.x * p.z; final double cross3 = this.x * p.y - this.y * p.x; - //System.out.println("cross product magnitude = "+(cross1 * cross1 + cross2 * cross2 + cross3 * cross3)); - // Should be MINIMUM_RESOLUTION_SQUARED, but that gives us planes that are *almost* parallel, and those are problematic too, + // System.out.println("cross product magnitude = "+(cross1 * cross1 + cross2 * cross2 + cross3 * + // cross3)); + // Should be MINIMUM_RESOLUTION_SQUARED, but that gives us planes that are *almost* parallel, + // and those are problematic too, // so we have a tighter constraint on parallelism in this method. if (cross1 * cross1 + cross2 * cross2 + cross3 * cross3 >= 5 * MINIMUM_RESOLUTION) { return false; } - + // Now, see whether the parallel planes are in fact on top of one another. // The math: // We need a single point that fulfills: @@ -2375,15 +2801,17 @@ public class Plane extends Vector { // z0 = -(C * D) / (A^2 + B^2 + C^2) // Check: // A (x0) + B (y0) + C (z0) + D =? 0 - // A (-(A * D) / (A^2 + B^2 + C^2)) + B (-(B * D) / (A^2 + B^2 + C^2)) + C (-(C * D) / (A^2 + B^2 + C^2)) + D ?= 0 + // A (-(A * D) / (A^2 + B^2 + C^2)) + B (-(B * D) / (A^2 + B^2 + C^2)) + C (-(C * D) / (A^2 + + // B^2 + C^2)) + D ?= 0 // -D [ A^2 / (A^2 + B^2 + C^2) + B^2 / (A^2 + B^2 + C^2) + C^2 / (A^2 + B^2 + C^2)] + D ?= 0 // Yes. final double denom = 1.0 / (p.x * p.x + p.y * p.y + p.z * p.z); return evaluateIsZero(-p.x * p.D * denom, -p.y * p.D * denom, -p.z * p.D * denom); } - + /** * Returns true if this plane and the other plane are identical within the margin of error. + * * @param p is the plane to compare against. * @return true if the planes are numerically identical. */ @@ -2393,7 +2821,8 @@ public class Plane extends Vector { final double cross1 = this.y * p.z - this.z * p.y; final double cross2 = this.z * p.x - this.x * p.z; final double cross3 = this.x * p.y - this.y * p.x; - //System.out.println("cross product magnitude = "+(cross1 * cross1 + cross2 * cross2 + cross3 * cross3)); + // System.out.println("cross product magnitude = " + // + (cross1 * cross1 + cross2 * cross2 + cross3 * cross3)); if (cross1 * cross1 + cross2 * cross2 + cross3 * cross3 >= MINIMUM_RESOLUTION_SQUARED) { return false; } @@ -2405,7 +2834,7 @@ public class Plane extends Vector { if (Math.abs(this.x * p.y - this.y * p.x) >= MINIMUM_RESOLUTION) return false; */ - + // Now, see whether the parallel planes are in fact on top of one another. // The math: // We need a single point that fulfills: @@ -2416,7 +2845,8 @@ public class Plane extends Vector { // z0 = -(C * D) / (A^2 + B^2 + C^2) // Check: // A (x0) + B (y0) + C (z0) + D =? 0 - // A (-(A * D) / (A^2 + B^2 + C^2)) + B (-(B * D) / (A^2 + B^2 + C^2)) + C (-(C * D) / (A^2 + B^2 + C^2)) + D ?= 0 + // A (-(A * D) / (A^2 + B^2 + C^2)) + B (-(B * D) / (A^2 + B^2 + C^2)) + C (-(C * D) / (A^2 + + // B^2 + C^2)) + D ?= 0 // -D [ A^2 / (A^2 + B^2 + C^2) + B^2 / (A^2 + B^2 + C^2) + C^2 / (A^2 + B^2 + C^2)] + D ?= 0 // Yes. final double denom = 1.0 / (p.x * p.x + p.y * p.y + p.z * p.z); @@ -2424,40 +2854,55 @@ public class Plane extends Vector { } /** - * Locate a point that is within the specified bounds and on the specified plane, that has an arcDistance as - * specified from the startPoint. + * Locate a point that is within the specified bounds and on the specified plane, that has an + * arcDistance as specified from the startPoint. + * * @param planetModel is the planet model. * @param arcDistanceValue is the arc distance. * @param startPoint is the starting point. * @param bounds are the bounds. * @return zero, one, or two points. */ - public GeoPoint[] findArcDistancePoints(final PlanetModel planetModel, final double arcDistanceValue, final GeoPoint startPoint, final Membership... bounds) { + public GeoPoint[] findArcDistancePoints( + final PlanetModel planetModel, + final double arcDistanceValue, + final GeoPoint startPoint, + final Membership... bounds) { if (Math.abs(D) >= MINIMUM_RESOLUTION) { - throw new IllegalStateException("Can't find arc distance using plane that doesn't go through origin"); + throw new IllegalStateException( + "Can't find arc distance using plane that doesn't go through origin"); } if (!evaluateIsZero(startPoint)) { throw new IllegalArgumentException("Start point is not on plane"); } - - // The following assertion fails at times even for planes that were *explicitly* normalized, so I've disabled the check. - //assert Math.abs(x*x + y*y + z*z - 1.0) < MINIMUM_RESOLUTION_SQUARED : "Plane needs to be normalized"; - - // The first step is to rotate coordinates for the point so that the plane lies on the x-y plane. - // To acheive this, there will need to be three rotations: + + // The following assertion fails at times even for planes that were *explicitly* normalized, so + // I've disabled the check. + // assert Math.abs(x*x + y*y + z*z - 1.0) < MINIMUM_RESOLUTION_SQUARED : "Plane needs to be + // normalized"; + + // The first step is to rotate coordinates for the point so that the plane lies on the x-y + // plane. + // To achieve this, there will need to be three rotations: // (1) rotate the plane in x-y so that the y axis lies in it. // (2) rotate the plane in x-z so that the plane lies on the x-y plane. // (3) rotate in x-y so that the starting vector points to (1,0,0). - + // This presumes a normalized plane!! final double azimuthMagnitude = Math.sqrt(this.x * this.x + this.y * this.y); final double cosPlaneAltitude = this.z; final double sinPlaneAltitude = azimuthMagnitude; final double cosPlaneAzimuth = this.x / azimuthMagnitude; final double sinPlaneAzimuth = this.y / azimuthMagnitude; - - assert Math.abs(sinPlaneAltitude * sinPlaneAltitude + cosPlaneAltitude * cosPlaneAltitude - 1.0) < MINIMUM_RESOLUTION : "Improper sin/cos of altitude: "+(sinPlaneAltitude * sinPlaneAltitude + cosPlaneAltitude * cosPlaneAltitude); - assert Math.abs(sinPlaneAzimuth * sinPlaneAzimuth + cosPlaneAzimuth * cosPlaneAzimuth - 1.0) < MINIMUM_RESOLUTION : "Improper sin/cos of azimuth: "+(sinPlaneAzimuth * sinPlaneAzimuth + cosPlaneAzimuth * cosPlaneAzimuth); + + assert Math.abs(sinPlaneAltitude * sinPlaneAltitude + cosPlaneAltitude * cosPlaneAltitude - 1.0) + < MINIMUM_RESOLUTION + : "Improper sin/cos of altitude: " + + (sinPlaneAltitude * sinPlaneAltitude + cosPlaneAltitude * cosPlaneAltitude); + assert Math.abs(sinPlaneAzimuth * sinPlaneAzimuth + cosPlaneAzimuth * cosPlaneAzimuth - 1.0) + < MINIMUM_RESOLUTION + : "Improper sin/cos of azimuth: " + + (sinPlaneAzimuth * sinPlaneAzimuth + cosPlaneAzimuth * cosPlaneAzimuth); // Coordinate rotation formula: // xT = xS cos T - yS sin T @@ -2465,27 +2910,28 @@ public class Plane extends Vector { // But we're rotating backwards, so use: // sin (-T) = -sin (T) // cos (-T) = cos (T) - + // Now, rotate startpoint in x-y final double x0 = startPoint.x; final double y0 = startPoint.y; final double z0 = startPoint.z; - + final double x1 = x0 * cosPlaneAzimuth + y0 * sinPlaneAzimuth; final double y1 = -x0 * sinPlaneAzimuth + y0 * cosPlaneAzimuth; final double z1 = z0; - + // Rotate now in x-z final double x2 = x1 * cosPlaneAltitude - z1 * sinPlaneAltitude; final double y2 = y1; final double z2 = +x1 * sinPlaneAltitude + z1 * cosPlaneAltitude; - - assert Math.abs(z2) < MINIMUM_RESOLUTION : "Rotation should have put startpoint on x-y plane, instead has value "+z2; - + + assert Math.abs(z2) < MINIMUM_RESOLUTION + : "Rotation should have put startpoint on x-y plane, instead has value " + z2; + // Ok, we have the start point on the x-y plane. To apply the arc distance, we // next need to convert to an angle (in radians). final double startAngle = Math.atan2(y2, x2); - + // To apply the arc distance, just add to startAngle. final double point1Angle = startAngle + arcDistanceValue; final double point2Angle = startAngle - arcDistanceValue; @@ -2493,17 +2939,17 @@ public class Plane extends Vector { final double point1x2 = Math.cos(point1Angle); final double point1y2 = Math.sin(point1Angle); final double point1z2 = 0.0; - + final double point2x2 = Math.cos(point2Angle); final double point2y2 = Math.sin(point2Angle); final double point2z2 = 0.0; - + // Now, do the reverse rotations for both points // Altitude... final double point1x1 = point1x2 * cosPlaneAltitude + point1z2 * sinPlaneAltitude; final double point1y1 = point1y2; final double point1z1 = -point1x2 * sinPlaneAltitude + point1z2 * cosPlaneAltitude; - + final double point2x1 = point2x2 * cosPlaneAltitude + point2z2 * sinPlaneAltitude; final double point2y1 = point2y2; final double point2z1 = -point2x2 * sinPlaneAltitude + point2z2 * cosPlaneAltitude; @@ -2519,28 +2965,29 @@ public class Plane extends Vector { final GeoPoint point1 = planetModel.createSurfacePoint(point1x0, point1y0, point1z0); final GeoPoint point2 = planetModel.createSurfacePoint(point2x0, point2y0, point2z0); - + // Figure out what to return boolean isPoint1Inside = meetsAllBounds(point1, bounds); boolean isPoint2Inside = meetsAllBounds(point2, bounds); - + if (isPoint1Inside) { if (isPoint2Inside) { - return new GeoPoint[]{point1, point2}; + return new GeoPoint[] {point1, point2}; } else { - return new GeoPoint[]{point1}; + return new GeoPoint[] {point1}; } } else { if (isPoint2Inside) { - return new GeoPoint[]{point2}; + return new GeoPoint[] {point2}; } else { return new GeoPoint[0]; } } } - + /** * Check if a vector meets the provided bounds. + * * @param p is the vector. * @param bounds are the bounds. * @return true if the vector describes a point within the bounds. @@ -2551,33 +2998,39 @@ public class Plane extends Vector { /** * Check if a vector meets the provided bounds. + * * @param x is the x value. * @param y is the y value. * @param z is the z value. * @param bounds are the bounds. * @return true if the vector describes a point within the bounds. */ - private static boolean meetsAllBounds(final double x, final double y, final double z, final Membership[] bounds) { + private static boolean meetsAllBounds( + final double x, final double y, final double z, final Membership[] bounds) { for (final Membership bound : bounds) { - if (!bound.isWithin(x,y,z)) + if (!bound.isWithin(x, y, z)) { return false; + } } return true; } /** * Check if a vector meets the provided bounds. + * * @param p is the vector. * @param bounds are the bounds. * @param moreBounds are an additional set of bounds. * @return true if the vector describes a point within the bounds. */ - private static boolean meetsAllBounds(final Vector p, final Membership[] bounds, final Membership[] moreBounds) { + private static boolean meetsAllBounds( + final Vector p, final Membership[] bounds, final Membership[] moreBounds) { return meetsAllBounds(p.x, p.y, p.z, bounds, moreBounds); } /** * Check if a vector meets the provided bounds. + * * @param x is the x value. * @param y is the y value. * @param z is the z value. @@ -2585,21 +3038,27 @@ public class Plane extends Vector { * @param moreBounds are an additional set of bounds. * @return true if the vector describes a point within the bounds. */ - private static boolean meetsAllBounds(final double x, final double y, final double z, final Membership[] bounds, - final Membership[] moreBounds) { - return meetsAllBounds(x,y,z, bounds) && meetsAllBounds(x,y,z, moreBounds); + private static boolean meetsAllBounds( + final double x, + final double y, + final double z, + final Membership[] bounds, + final Membership[] moreBounds) { + return meetsAllBounds(x, y, z, bounds) && meetsAllBounds(x, y, z, moreBounds); } /** * Find a sample point on the intersection between two planes and the world. + * * @param planetModel is the planet model. * @param q is the second plane to consider. * @return a sample point that is on the intersection between the two planes and the world. */ public GeoPoint getSampleIntersectionPoint(final PlanetModel planetModel, final Plane q) { final GeoPoint[] intersections = findIntersections(planetModel, q, NO_BOUNDS, NO_BOUNDS); - if (intersections.length == 0) + if (intersections.length == 0) { return null; + } return intersections[0]; } @@ -2610,10 +3069,12 @@ public class Plane extends Vector { @Override public boolean equals(Object o) { - if (!super.equals(o)) + if (!super.equals(o)) { return false; - if (!(o instanceof Plane)) + } + if (!(o instanceof Plane)) { return false; + } Plane other = (Plane) o; return other.D == D; } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/PlanetModel.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/PlanetModel.java index e5f931fda2e..eb66ae973fd 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/PlanetModel.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/PlanetModel.java @@ -16,30 +16,32 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * Holds mathematical constants associated with the model of a planet. + * * @lucene.experimental */ public class PlanetModel implements SerializableObject { /** Planet model corresponding to sphere. */ - public static final PlanetModel SPHERE = new PlanetModel(1.0,1.0); + public static final PlanetModel SPHERE = new PlanetModel(1.0, 1.0); - /** Planet model corresponding to WGS84 ellipsoid*/ + /** Planet model corresponding to WGS84 ellipsoid */ // see http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf public static final PlanetModel WGS84 = new PlanetModel(6378137.0d, 6356752.314245d); - /** Planet model corresponding to Clarke 1866 ellipsoid*/ + /** Planet model corresponding to Clarke 1866 ellipsoid */ // see https://georepository.com/ellipsoid_7008/Clarke-1866.html public static final PlanetModel CLARKE_1866 = new PlanetModel(6378206.4d, 6356583.8d); // Surface of the planet: // x^2/a^2 + y^2/b^2 + z^2/zScaling^2 = 1.0 - // Scaling factors are a,b,zScaling. geo3d can only support models where a==b, so use xyScaling instead. + // Scaling factors are a,b,zScaling. geo3d can only support models where a==b, so use xyScaling + // instead. /** Semi-major axis */ public final double a; /** Semi-minor axis */ @@ -96,7 +98,7 @@ public class PlanetModel implements SerializableObject { public final double MAX_VALUE; /** numeric space (buckets) for mapping double values into integer range */ private final double MUL; - /** scalar value used to decode from integer back into double space*/ + /** scalar value used to decode from integer back into double space */ public final double DECODE; /** Max encoded value */ public final int MAX_ENCODED_VALUE; @@ -132,18 +134,21 @@ public class PlanetModel implements SerializableObject { this.MAX_Y_POLE = new GeoPoint(xyScaling, 0.0, 1.0, 0.0, 0.0, Math.PI * 0.5); this.inverseScale = 1.0 / scale; - this.minimumPoleDistance = Math.min(surfaceDistance(NORTH_POLE, SOUTH_POLE), surfaceDistance(MIN_X_POLE, MAX_X_POLE)); + this.minimumPoleDistance = + Math.min(surfaceDistance(NORTH_POLE, SOUTH_POLE), surfaceDistance(MIN_X_POLE, MAX_X_POLE)); this.MAX_VALUE = getMaximumMagnitude(); this.MUL = (0x1L << BITS) / (2 * this.MAX_VALUE); - this.DECODE = getNextSafeDouble(1/MUL); + this.DECODE = getNextSafeDouble(1 / MUL); this.MIN_ENCODED_VALUE = encodeValue(-MAX_VALUE); this.MAX_ENCODED_VALUE = encodeValue(MAX_VALUE); this.docValueEncoder = new DocValueEncoder(this); } - /** Deserialization constructor. + /** + * Deserialization constructor. + * * @param inputStream is the input stream. */ public PlanetModel(final InputStream inputStream) throws IOException { @@ -156,64 +161,82 @@ public class PlanetModel implements SerializableObject { SerializableObject.writeDouble(outputStream, b); } - /** Does this planet model describe a sphere? - *@return true if so. + /** + * Does this planet model describe a sphere? + * + * @return true if so. */ public boolean isSphere() { return this.xyScaling == this.zScaling; } - /** Find the minimum magnitude of all points on the ellipsoid. + /** + * Find the minimum magnitude of all points on the ellipsoid. + * * @return the minimum magnitude for the planet. */ public double getMinimumMagnitude() { return Math.min(this.xyScaling, this.zScaling); } - /** Find the maximum magnitude of all points on the ellipsoid. + /** + * Find the maximum magnitude of all points on the ellipsoid. + * * @return the maximum magnitude for the planet. */ public double getMaximumMagnitude() { return Math.max(this.xyScaling, this.zScaling); } - /** Find the minimum x value. - *@return the minimum X value. + /** + * Find the minimum x value. + * + * @return the minimum X value. */ public double getMinimumXValue() { return -this.xyScaling; } - /** Find the maximum x value. - *@return the maximum X value. + /** + * Find the maximum x value. + * + * @return the maximum X value. */ public double getMaximumXValue() { return this.xyScaling; } - /** Find the minimum y value. - *@return the minimum Y value. + /** + * Find the minimum y value. + * + * @return the minimum Y value. */ public double getMinimumYValue() { return -this.xyScaling; } - /** Find the maximum y value. - *@return the maximum Y value. + /** + * Find the maximum y value. + * + * @return the maximum Y value. */ public double getMaximumYValue() { return this.xyScaling; } - /** Find the minimum z value. - *@return the minimum Z value. + /** + * Find the minimum z value. + * + * @return the minimum Z value. */ public double getMinimumZValue() { return -this.zScaling; } - /** Find the maximum z value. - *@return the maximum Z value. + /** + * Find the maximum z value. + * + * @return the maximum Z value. */ public double getMaximumZValue() { return this.zScaling; @@ -227,13 +250,23 @@ public class PlanetModel implements SerializableObject { /** encode the provided value from double to integer space */ public int encodeValue(double x) { if (x > getMaximumMagnitude()) { - throw new IllegalArgumentException("value=" + x + " is out-of-bounds (greater than planetMax=" + getMaximumMagnitude() + ")"); + throw new IllegalArgumentException( + "value=" + + x + + " is out-of-bounds (greater than planetMax=" + + getMaximumMagnitude() + + ")"); } if (x == getMaximumMagnitude()) { x = Math.nextDown(x); } if (x < -getMaximumMagnitude()) { - throw new IllegalArgumentException("value=" + x + " is out-of-bounds (less than than -planetMax=" + -getMaximumMagnitude() + ")"); + throw new IllegalArgumentException( + "value=" + + x + + " is out-of-bounds (less than than -planetMax=" + + -getMaximumMagnitude() + + ")"); } long result = (long) Math.floor(x / DECODE); assert result >= Integer.MIN_VALUE; @@ -241,20 +274,19 @@ public class PlanetModel implements SerializableObject { return (int) result; } - /** - * Decodes a given integer back into the radian value according to the defined planet model - */ + /** Decodes a given integer back into the radian value according to the defined planet model */ public double decodeValue(int x) { double result; if (x == MIN_ENCODED_VALUE) { - // We must special case this, because -MAX_VALUE is not guaranteed to land precisely at a floor value, and we don't ever want to - // return a value outside of the planet's range (I think?). The max value is "safe" because we floor during encode: + // We must special case this, because -MAX_VALUE is not guaranteed to land precisely at a + // floor value, and we don't ever want to return a value outside of the planet's range + // (I think?). The max value is "safe" because we floor during encode: result = -MAX_VALUE; } else if (x == MAX_ENCODED_VALUE) { result = MAX_VALUE; } else { // We decode to the center value; this keeps the encoding stable - result = (x+0.5) * DECODE; + result = (x + 0.5) * DECODE; } assert result >= -MAX_VALUE && result <= MAX_VALUE; return result; @@ -265,8 +297,10 @@ public class PlanetModel implements SerializableObject { return this.docValueEncoder; } - /** Returns a double value >= x such that if you multiply that value by an int, and then - * divide it by that int again, you get precisely the same value back */ + /** + * Returns a double value >= x such that if you multiply that value by an int, and then divide it + * by that int again, you get precisely the same value back + */ private static double getNextSafeDouble(double x) { // Move to double space: @@ -284,15 +318,19 @@ public class PlanetModel implements SerializableObject { return result; } - /** Check if point is on surface. + /** + * Check if point is on surface. + * * @param v is the point to check. * @return true if the point is on the planet surface. */ public boolean pointOnSurface(final Vector v) { return pointOnSurface(v.x, v.y, v.z); } - - /** Check if point is on surface. + + /** + * Check if point is on surface. + * * @param x is the x coord. * @param y is the y coord. * @param z is the z coord. @@ -300,18 +338,27 @@ public class PlanetModel implements SerializableObject { public boolean pointOnSurface(final double x, final double y, final double z) { // Equation of planet surface is: // x^2 / a^2 + y^2 / b^2 + z^2 / zScaling^2 - 1 = 0 - return Math.abs(x * x * inverseXYScaling * inverseXYScaling + y * y * inverseXYScaling * inverseXYScaling + z * z * inverseZScaling * inverseZScaling - 1.0) < Vector.MINIMUM_RESOLUTION; + return Math.abs( + x * x * inverseXYScaling * inverseXYScaling + + y * y * inverseXYScaling * inverseXYScaling + + z * z * inverseZScaling * inverseZScaling + - 1.0) + < Vector.MINIMUM_RESOLUTION; } - /** Check if point is outside surface. + /** + * Check if point is outside surface. + * * @param v is the point to check. * @return true if the point is outside the planet surface. */ public boolean pointOutside(final Vector v) { return pointOutside(v.x, v.y, v.z); } - - /** Check if point is outside surface. + + /** + * Check if point is outside surface. + * * @param x is the x coord. * @param y is the y coord. * @param z is the z coord. @@ -319,10 +366,15 @@ public class PlanetModel implements SerializableObject { public boolean pointOutside(final double x, final double y, final double z) { // Equation of planet surface is: // x^2 / a^2 + y^2 / b^2 + z^2 / zScaling^2 - 1 = 0 - return (x * x + y * y) * inverseXYScaling * inverseXYScaling + z * z * inverseZScaling * inverseZScaling - 1.0 > Vector.MINIMUM_RESOLUTION; + return (x * x + y * y) * inverseXYScaling * inverseXYScaling + + z * z * inverseZScaling * inverseZScaling + - 1.0 + > Vector.MINIMUM_RESOLUTION; } - - /** Compute a GeoPoint that's scaled to actually be on the planet surface. + + /** + * Compute a GeoPoint that's scaled to actually be on the planet surface. + * * @param vector is the vector. * @return the scaled point. */ @@ -330,7 +382,10 @@ public class PlanetModel implements SerializableObject { return createSurfacePoint(vector.x, vector.y, vector.z); } - /** Compute a GeoPoint that's based on (x,y,z) values, but is scaled to actually be on the planet surface. + /** + * Compute a GeoPoint that's based on (x,y,z) values, but is scaled to actually be on the planet + * surface. + * * @param x is the x value. * @param y is the y value. * @param z is the z value. @@ -343,11 +398,18 @@ public class PlanetModel implements SerializableObject { // ((tx)^2 / a^2 + (ty)^2 / b^2 + (tz)^2 / zScaling^2) = 1 // t^2 * (x^2 / a^2 + y^2 / b^2 + z^2 / zScaling^2) = 1 // t = sqrt ( 1 / (x^2 / a^2 + y^2 / b^2 + z^2 / zScaling^2)) - final double t = Math.sqrt(1.0 / (x*x* inverseXYScalingSquared + y*y* inverseXYScalingSquared + z*z* inverseZScalingSquared)); - return new GeoPoint(t*x, t*y, t*z); + final double t = + Math.sqrt( + 1.0 + / (x * x * inverseXYScalingSquared + + y * y * inverseXYScalingSquared + + z * z * inverseZScalingSquared)); + return new GeoPoint(t * x, t * y, t * z); } - - /** Compute a GeoPoint that's a bisection between two other GeoPoints. + + /** + * Compute a GeoPoint that's a bisection between two other GeoPoints. + * * @param pt1 is the first point. * @param pt2 is the second point. * @return the bisection point, or null if a unique one cannot be found. @@ -356,31 +418,35 @@ public class PlanetModel implements SerializableObject { final double A0 = (pt1.x + pt2.x) * 0.5; final double B0 = (pt1.y + pt2.y) * 0.5; final double C0 = (pt1.z + pt2.z) * 0.5; - - final double denom = inverseXYScalingSquared * A0 * A0 + - inverseXYScalingSquared * B0 * B0 + - inverseZScalingSquared * C0 * C0; - - if(denom < Vector.MINIMUM_RESOLUTION) { + + final double denom = + inverseXYScalingSquared * A0 * A0 + + inverseXYScalingSquared * B0 * B0 + + inverseZScalingSquared * C0 * C0; + + if (denom < Vector.MINIMUM_RESOLUTION) { // Bisection is undefined return null; } - + final double t = Math.sqrt(1.0 / denom); - + return new GeoPoint(t * A0, t * B0, t * C0); } - - /** Compute surface distance between two points. + + /** + * Compute surface distance between two points. + * * @param pt1 is the first point. * @param pt2 is the second point. - * @return the adjusted angle, when multiplied by the mean earth radius, yields a surface distance. This will differ - * from GeoPoint.arcDistance() only when the planet model is not a sphere. @see {@link GeoPoint#arcDistance(Vector)} + * @return the adjusted angle, when multiplied by the mean earth radius, yields a surface + * distance. This will differ from GeoPoint.arcDistance() only when the planet model is not a + * sphere. @see {@link GeoPoint#arcDistance(Vector)} */ public double surfaceDistance(final GeoPoint pt1, final GeoPoint pt2) { final double L = pt2.getLongitude() - pt1.getLongitude(); - final double U1 = Math.atan((1.0- scaledFlattening) * Math.tan(pt1.getLatitude())); - final double U2 = Math.atan((1.0- scaledFlattening) * Math.tan(pt2.getLatitude())); + final double U1 = Math.atan((1.0 - scaledFlattening) * Math.tan(pt1.getLatitude())); + final double U2 = Math.atan((1.0 - scaledFlattening) * Math.tan(pt2.getLatitude())); final double sinU1 = Math.sin(U1); final double cosU1 = Math.cos(U1); @@ -393,7 +459,6 @@ public class PlanetModel implements SerializableObject { final double dSinU1SinU2 = sinU1 * sinU2; final double dSinU1CosU2 = sinU1 * cosU2; - double lambda = L; double lambdaP = Math.PI * 2.0; int iterLimit = 0; @@ -409,10 +474,13 @@ public class PlanetModel implements SerializableObject { do { sinLambda = Math.sin(lambda); cosLambda = Math.cos(lambda); - sinSigma = Math.sqrt((cosU2*sinLambda) * (cosU2*sinLambda) + - (dCosU1SinU2 - dSinU1CosU2 * cosLambda) * (dCosU1SinU2 - dSinU1CosU2 * cosLambda)); + sinSigma = + Math.sqrt( + (cosU2 * sinLambda) * (cosU2 * sinLambda) + + (dCosU1SinU2 - dSinU1CosU2 * cosLambda) + * (dCosU1SinU2 - dSinU1CosU2 * cosLambda)); - if (sinSigma==0.0) { + if (sinSigma == 0.0) { return 0.0; } cosSigma = dSinU1SinU2 + dCosU1CosU2 * cosLambda; @@ -421,35 +489,60 @@ public class PlanetModel implements SerializableObject { cosSqAlpha = 1.0 - sinAlpha * sinAlpha; cos2SigmaM = cosSigma - 2.0 * dSinU1SinU2 / cosSqAlpha; - if (Double.isNaN(cos2SigmaM)) - cos2SigmaM = 0.0; // equatorial line: cosSqAlpha=0 - C = scaledFlattening / 16.0 * cosSqAlpha * (4.0 + scaledFlattening * (4.0 - 3.0 * cosSqAlpha)); + if (Double.isNaN(cos2SigmaM)) { + cos2SigmaM = 0.0; // equatorial line: cosSqAlpha=0 + } + C = + scaledFlattening + / 16.0 + * cosSqAlpha + * (4.0 + scaledFlattening * (4.0 - 3.0 * cosSqAlpha)); lambdaP = lambda; - lambda = L + (1.0 - C) * scaledFlattening * sinAlpha * - (sigma + C * sinSigma * (cos2SigmaM + C * cosSigma * (-1.0 + 2.0 * cos2SigmaM *cos2SigmaM))); - } while (Math.abs(lambda-lambdaP) >= Vector.MINIMUM_RESOLUTION && ++iterLimit < 100); + lambda = + L + + (1.0 - C) + * scaledFlattening + * sinAlpha + * (sigma + + C + * sinSigma + * (cos2SigmaM + C * cosSigma * (-1.0 + 2.0 * cos2SigmaM * cos2SigmaM))); + } while (Math.abs(lambda - lambdaP) >= Vector.MINIMUM_RESOLUTION && ++iterLimit < 100); final double uSq = cosSqAlpha * this.squareRatio; final double A = 1.0 + uSq / 16384.0 * (4096.0 + uSq * (-768.0 + uSq * (320.0 - 175.0 * uSq))); final double B = uSq / 1024.0 * (256.0 + uSq * (-128.0 + uSq * (74.0 - 47.0 * uSq))); - final double deltaSigma = B * sinSigma * (cos2SigmaM + B / 4.0 * (cosSigma * (-1.0 + 2.0 * cos2SigmaM * cos2SigmaM)- - B / 6.0 * cos2SigmaM * (-3.0 + 4.0 * sinSigma * sinSigma) * (-3.0 + 4.0 * cos2SigmaM * cos2SigmaM))); + final double deltaSigma = + B + * sinSigma + * (cos2SigmaM + + B + / 4.0 + * (cosSigma * (-1.0 + 2.0 * cos2SigmaM * cos2SigmaM) + - B + / 6.0 + * cos2SigmaM + * (-3.0 + 4.0 * sinSigma * sinSigma) + * (-3.0 + 4.0 * cos2SigmaM * cos2SigmaM))); return zScaling * inverseScale * A * (sigma - deltaSigma); } - /** Compute new point given original point, a bearing direction, and an adjusted angle (as would be computed by - * the surfaceDistance() method above). The original point can be anywhere on the globe. The bearing direction - * ranges from 0 (due east at the equator) to pi/2 (due north) to pi (due west at the equator) to 3 pi/4 (due south) - * to 2 pi. + /** + * Compute new point given original point, a bearing direction, and an adjusted angle (as would be + * computed by the surfaceDistance() method above). The original point can be anywhere on the + * globe. The bearing direction ranges from 0 (due east at the equator) to pi/2 (due north) to pi + * (due west at the equator) to 3 pi/4 (due south) to 2 pi. + * * @param from is the starting point. * @param dist is the adjusted angle. * @param bearing is the direction to proceed. * @return the new point, consistent with the bearing direction and distance. */ - public GeoPoint surfacePointOnBearing(final GeoPoint from, final double dist, final double bearing) { + public GeoPoint surfacePointOnBearing( + final GeoPoint from, final double dist, final double bearing) { // Algorithm using Vincenty's formulae (https://en.wikipedia.org/wiki/Vincenty%27s_formulae) // which takes into account that planets may not be spherical. - //Code adaptation from http://www.movable-type.co.uk/scripts/latlong-vincenty.html + // Code adaptation from http://www.movable-type.co.uk/scripts/latlong-vincenty.html double lat = from.getLatitude(); double lon = from.getLongitude(); @@ -479,35 +572,56 @@ public class PlanetModel implements SerializableObject { cos2σM = Math.cos(2.0 * σ1 + σ); sinσ = Math.sin(σ); cosσ = Math.cos(σ); - Δσ = B * sinσ * (cos2σM + B / 4.0 * (cosσ * (-1.0 + 2.0 * cos2σM * cos2σM) - - B / 6.0 * cos2σM * (-3.0 + 4.0 * sinσ * sinσ) * (-3.0 + 4.0 * cos2σM * cos2σM))); + Δσ = + B + * sinσ + * (cos2σM + + B + / 4.0 + * (cosσ * (-1.0 + 2.0 * cos2σM * cos2σM) + - B + / 6.0 + * cos2σM + * (-3.0 + 4.0 * sinσ * sinσ) + * (-3.0 + 4.0 * cos2σM * cos2σM))); σʹ = σ; σ = dist / (zScaling * inverseScale * A) + Δσ; } while (Math.abs(σ - σʹ) >= Vector.MINIMUM_RESOLUTION && ++iterations < 100); double x = sinU1 * sinσ - cosU1 * cosσ * cosα1; - double φ2 = Math.atan2(sinU1 * cosσ + cosU1 * sinσ * cosα1, (1.0 - scaledFlattening) * Math.sqrt(sinα * sinα + x * x)); + double φ2 = + Math.atan2( + sinU1 * cosσ + cosU1 * sinσ * cosα1, + (1.0 - scaledFlattening) * Math.sqrt(sinα * sinα + x * x)); double λ = Math.atan2(sinσ * sinα1, cosU1 * cosσ - sinU1 * sinσ * cosα1); double C = scaledFlattening / 16.0 * cosSqα * (4.0 + scaledFlattening * (4.0 - 3.0 * cosSqα)); - double L = λ - (1.0 - C) * scaledFlattening * sinα * - (σ + C * sinσ * (cos2σM + C * cosσ * (-1.0 + 2.0 * cos2σM * cos2σM))); - double λ2 = (lon + L + 3.0 * Math.PI) % (2.0 * Math.PI) - Math.PI; // normalise to -180..+180 + double L = + λ + - (1.0 - C) + * scaledFlattening + * sinα + * (σ + C * sinσ * (cos2σM + C * cosσ * (-1.0 + 2.0 * cos2σM * cos2σM))); + double λ2 = (lon + L + 3.0 * Math.PI) % (2.0 * Math.PI) - Math.PI; // normalise to -180..+180 return new GeoPoint(this, φ2, λ2); } - /** Utility class for encoding / decoding from lat/lon (decimal degrees) into sortable doc value numerics (integers) */ + /** + * Utility class for encoding / decoding from lat/lon (decimal degrees) into sortable doc value + * numerics (integers) + */ public static class DocValueEncoder { private final PlanetModel planetModel; - // These are the multiplicative constants we need to use to arrive at values that fit in 21 bits. - // The formula we use to go from double to encoded value is: Math.floor((value - minimum) * factor + 0.5) + // These are the multiplicative constants we need to use to arrive at values that fit in 21 + // bits. The formula we use to go from double to encoded value is: + // Math.floor((value - minimum) * factor + 0.5) // If we plug in maximum for value, we should get 0x1FFFFF. // So, 0x1FFFFF = Math.floor((maximum - minimum) * factor + 0.5) // We factor out the 0.5 and Math.floor by stating instead: // 0x1FFFFF = (maximum - minimum) * factor // So, factor = 0x1FFFFF / (maximum - minimum) - private final static double inverseMaximumValue = 1.0 / (double)(0x1FFFFF); + private static final double inverseMaximumValue = 1.0 / (double) (0x1FFFFF); private final double inverseXFactor; private final double inverseYFactor; @@ -517,11 +631,11 @@ public class PlanetModel implements SerializableObject { private final double yFactor; private final double zFactor; - // Fudge factor for step adjustments. This is here solely to handle inaccuracies in bounding boxes - // that occur because of quantization. For unknown reasons, the fudge factor needs to be - // 10.0 rather than 1.0. See LUCENE-7430. + // Fudge factor for step adjustments. This is here solely to handle inaccuracies in bounding + // boxes that occur because of quantization. For unknown reasons, the fudge factor needs to + // be 10.0 rather than 1.0. See LUCENE-7430. - private final static double STEP_FUDGE = 10.0; + private static final double STEP_FUDGE = 10.0; // These values are the delta between a value and the next value in each specific dimension @@ -533,9 +647,12 @@ public class PlanetModel implements SerializableObject { private DocValueEncoder(final PlanetModel planetModel) { this.planetModel = planetModel; - this.inverseXFactor = (planetModel.getMaximumXValue() - planetModel.getMinimumXValue()) * inverseMaximumValue; - this.inverseYFactor = (planetModel.getMaximumYValue() - planetModel.getMinimumYValue()) * inverseMaximumValue; - this.inverseZFactor = (planetModel.getMaximumZValue() - planetModel.getMinimumZValue()) * inverseMaximumValue; + this.inverseXFactor = + (planetModel.getMaximumXValue() - planetModel.getMinimumXValue()) * inverseMaximumValue; + this.inverseYFactor = + (planetModel.getMaximumYValue() - planetModel.getMinimumYValue()) * inverseMaximumValue; + this.inverseZFactor = + (planetModel.getMaximumZValue() - planetModel.getMinimumZValue()) * inverseMaximumValue; this.xFactor = 1.0 / inverseXFactor; this.yFactor = 1.0 / inverseYFactor; @@ -546,7 +663,9 @@ public class PlanetModel implements SerializableObject { this.zStep = inverseZFactor * STEP_FUDGE; } - /** Encode a point. + /** + * Encode a point. + * * @param point is the point * @return the encoded long */ @@ -554,7 +673,9 @@ public class PlanetModel implements SerializableObject { return encodePoint(point.x, point.y, point.z); } - /** Encode a point. + /** + * Encode a point. + * * @param x is the x value * @param y is the y value * @param z is the z value @@ -564,47 +685,57 @@ public class PlanetModel implements SerializableObject { int XEncoded = encodeX(x); int YEncoded = encodeY(y); int ZEncoded = encodeZ(z); - return - (((long)(XEncoded & 0x1FFFFF)) << 42) | - (((long)(YEncoded & 0x1FFFFF)) << 21) | - ((long)(ZEncoded & 0x1FFFFF)); + return (((long) (XEncoded & 0x1FFFFF)) << 42) + | (((long) (YEncoded & 0x1FFFFF)) << 21) + | ((long) (ZEncoded & 0x1FFFFF)); } - /** Decode GeoPoint value from long docvalues value. + /** + * Decode GeoPoint value from long docvalues value. + * * @param docValue is the doc values value. * @return the GeoPoint. */ public GeoPoint decodePoint(final long docValue) { - return new GeoPoint(decodeX(((int)(docValue >> 42)) & 0x1FFFFF), - decodeY(((int)(docValue >> 21)) & 0x1FFFFF), - decodeZ(((int)(docValue)) & 0x1FFFFF)); + return new GeoPoint( + decodeX(((int) (docValue >> 42)) & 0x1FFFFF), + decodeY(((int) (docValue >> 21)) & 0x1FFFFF), + decodeZ(((int) (docValue)) & 0x1FFFFF)); } - /** Decode X value from long docvalues value. + /** + * Decode X value from long docvalues value. + * * @param docValue is the doc values value. * @return the x value. */ public double decodeXValue(final long docValue) { - return decodeX(((int)(docValue >> 42)) & 0x1FFFFF); + return decodeX(((int) (docValue >> 42)) & 0x1FFFFF); } - /** Decode Y value from long docvalues value. + /** + * Decode Y value from long docvalues value. + * * @param docValue is the doc values value. * @return the y value. */ public double decodeYValue(final long docValue) { - return decodeY(((int)(docValue >> 21)) & 0x1FFFFF); + return decodeY(((int) (docValue >> 21)) & 0x1FFFFF); } - /** Decode Z value from long docvalues value. + /** + * Decode Z value from long docvalues value. + * * @param docValue is the doc values value. * @return the z value. */ public double decodeZValue(final long docValue) { - return decodeZ(((int)(docValue)) & 0x1FFFFF); + return decodeZ(((int) (docValue)) & 0x1FFFFF); } - /** Round the provided X value down, by encoding it, decrementing it, and unencoding it. + /** + * Round the provided X value down, by encoding it, decrementing it, and unencoding it. + * * @param startValue is the starting value. * @return the rounded value. */ @@ -612,7 +743,9 @@ public class PlanetModel implements SerializableObject { return startValue - xStep; } - /** Round the provided X value up, by encoding it, incrementing it, and unencoding it. + /** + * Round the provided X value up, by encoding it, incrementing it, and unencoding it. + * * @param startValue is the starting value. * @return the rounded value. */ @@ -620,7 +753,9 @@ public class PlanetModel implements SerializableObject { return startValue + xStep; } - /** Round the provided Y value down, by encoding it, decrementing it, and unencoding it. + /** + * Round the provided Y value down, by encoding it, decrementing it, and unencoding it. + * * @param startValue is the starting value. * @return the rounded value. */ @@ -628,7 +763,9 @@ public class PlanetModel implements SerializableObject { return startValue - yStep; } - /** Round the provided Y value up, by encoding it, incrementing it, and unencoding it. + /** + * Round the provided Y value up, by encoding it, incrementing it, and unencoding it. + * * @param startValue is the starting value. * @return the rounded value. */ @@ -636,7 +773,9 @@ public class PlanetModel implements SerializableObject { return startValue + yStep; } - /** Round the provided Z value down, by encoding it, decrementing it, and unencoding it. + /** + * Round the provided Z value down, by encoding it, decrementing it, and unencoding it. + * * @param startValue is the starting value. * @return the rounded value. */ @@ -644,7 +783,9 @@ public class PlanetModel implements SerializableObject { return startValue - zStep; } - /** Round the provided Z value up, by encoding it, incrementing it, and unencoding it. + /** + * Round the provided Z value up, by encoding it, incrementing it, and unencoding it. + * * @param startValue is the starting value. * @return the rounded value. */ @@ -654,9 +795,11 @@ public class PlanetModel implements SerializableObject { // For encoding/decoding, we generally want the following behavior: // (1) If you encode the maximum value or the minimum value, the resulting int fits in 21 bits. - // (2) If you decode an encoded value, you get back the original value for both the minimum and maximum planet model values. - // (3) Rounding occurs such that a small delta from the minimum and maximum planet model values still returns the same - // values -- that is, these are in the center of the range of input values that should return the minimum or maximum when decoded + // (2) If you decode an encoded value, you get back the original value for both the minimum and + // maximum planet model values. + // (3) Rounding occurs such that a small delta from the minimum and maximum planet model values + // still returns the same values -- that is, these are in the center of the range of input + // values that should return the minimum or maximum when decoded private int encodeX(final double x) { if (x > planetModel.getMaximumXValue()) { @@ -664,7 +807,7 @@ public class PlanetModel implements SerializableObject { } else if (x < planetModel.getMinimumXValue()) { throw new IllegalArgumentException("x value less than planet model minimum"); } - return (int)Math.floor((x - planetModel.getMinimumXValue()) * xFactor + 0.5); + return (int) Math.floor((x - planetModel.getMinimumXValue()) * xFactor + 0.5); } private double decodeX(final int x) { @@ -677,7 +820,7 @@ public class PlanetModel implements SerializableObject { } else if (y < planetModel.getMinimumYValue()) { throw new IllegalArgumentException("y value less than planet model minimum"); } - return (int)Math.floor((y - planetModel.getMinimumYValue()) * yFactor + 0.5); + return (int) Math.floor((y - planetModel.getMinimumYValue()) * yFactor + 0.5); } private double decodeY(final int y) { @@ -690,7 +833,7 @@ public class PlanetModel implements SerializableObject { } else if (z < planetModel.getMinimumZValue()) { throw new IllegalArgumentException("z value less than planet model minimum"); } - return (int)Math.floor((z - planetModel.getMinimumZValue()) * zFactor + 0.5); + return (int) Math.floor((z - planetModel.getMinimumZValue()) * zFactor + 0.5); } private double decodeZ(final int z) { @@ -700,17 +843,18 @@ public class PlanetModel implements SerializableObject { @Override public boolean equals(final Object o) { - if (!(o instanceof PlanetModel)) + if (!(o instanceof PlanetModel)) { return false; - final PlanetModel other = (PlanetModel)o; + } + final PlanetModel other = (PlanetModel) o; return a == other.a && b == other.b; } - + @Override public int hashCode() { return Double.hashCode(a) + Double.hashCode(b); } - + @Override public String toString() { if (this.equals(SPHERE)) { @@ -720,9 +864,7 @@ public class PlanetModel implements SerializableObject { } else if (this.equals(CLARKE_1866)) { return "PlanetModel.CLARKE_1866"; } else { - return "PlanetModel(xyScaling="+ a +" zScaling="+ b +")"; + return "PlanetModel(xyScaling=" + a + " zScaling=" + b + ")"; } } } - - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/PlanetObject.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/PlanetObject.java index a0c175af68f..1539c84259b 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/PlanetObject.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/PlanetObject.java @@ -26,5 +26,4 @@ public interface PlanetObject extends SerializableObject { /** Returns the {@link PlanetModel} provided when this shape was created. */ PlanetModel getPlanetModel(); - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/SerializableObject.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/SerializableObject.java index feaeb9a3d98..010c49f2710 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/SerializableObject.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/SerializableObject.java @@ -34,21 +34,28 @@ import java.util.List; */ public interface SerializableObject { - /** Serialize to output stream. + /** + * Serialize to output stream. + * * @param outputStream is the output stream to write to. */ void write(OutputStream outputStream) throws IOException; - /** Write a PlanetObject to a stream. + /** + * Write a PlanetObject to a stream. + * * @param outputStream is the output stream. * @param object is the object to write. */ - public static void writePlanetObject(final OutputStream outputStream, final PlanetObject object) throws IOException { + public static void writePlanetObject(final OutputStream outputStream, final PlanetObject object) + throws IOException { object.getPlanetModel().write(outputStream); writeObject(outputStream, object); } - /** Read a PlanetObject from a stream. + /** + * Read a PlanetObject from a stream. + * * @param inputStream is the input stream. * @return the PlanetObject. */ @@ -56,36 +63,45 @@ public interface SerializableObject { final PlanetModel pm = new PlanetModel(inputStream); final SerializableObject so = readObject(pm, inputStream); if (!(so instanceof PlanetObject)) { - throw new IOException("Type of object is not expected PlanetObject: "+so.getClass().getName()); + throw new IOException( + "Type of object is not expected PlanetObject: " + so.getClass().getName()); } - return (PlanetObject)so; + return (PlanetObject) so; } - /** Write an object to a stream. + /** + * Write an object to a stream. + * * @param outputStream is the output stream. * @param object is the object to write. */ - public static void writeObject(final OutputStream outputStream, final SerializableObject object) throws IOException { + public static void writeObject(final OutputStream outputStream, final SerializableObject object) + throws IOException { writeClass(outputStream, object.getClass()); object.write(outputStream); } - /** Read an object from a stream (for objects that need a PlanetModel). + /** + * Read an object from a stream (for objects that need a PlanetModel). + * * @param planetModel is the planet model to use to deserialize the object. * @param inputStream is the input stream. * @return the deserialized object. */ - public static SerializableObject readObject(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + public static SerializableObject readObject( + final PlanetModel planetModel, final InputStream inputStream) throws IOException { try { // Read the class final Class clazz = readClass(inputStream); return readObject(planetModel, inputStream, clazz); } catch (ClassNotFoundException e) { - throw new IOException("Can't find class for deserialization: "+e.getMessage(), e); + throw new IOException("Can't find class for deserialization: " + e.getMessage(), e); } } - /** Read an object from a stream (for objects that do not need a PlanetModel). + /** + * Read an object from a stream (for objects that do not need a PlanetModel). + * * @param inputStream is the input stream. * @return the deserialized object. */ @@ -95,16 +111,20 @@ public interface SerializableObject { final Class clazz = readClass(inputStream); return readObject(inputStream, clazz); } catch (ClassNotFoundException e) { - throw new IOException("Can't find class for deserialization: "+e.getMessage(), e); + throw new IOException("Can't find class for deserialization: " + e.getMessage(), e); } } - /** Instantiate a serializable object from a stream. + /** + * Instantiate a serializable object from a stream. + * * @param planetModel is the planet model. * @param inputStream is the input stream. * @param clazz is the class to instantiate. */ - static SerializableObject readObject(final PlanetModel planetModel, final InputStream inputStream, final Class clazz) throws IOException { + static SerializableObject readObject( + final PlanetModel planetModel, final InputStream inputStream, final Class clazz) + throws IOException { try { // Look for the right constructor final Constructor c = clazz.getDeclaredConstructor(PlanetModel.class, InputStream.class); @@ -112,26 +132,33 @@ public interface SerializableObject { final Object object = c.newInstance(planetModel, inputStream); // check whether caste will work if (!(object instanceof SerializableObject)) { - throw new IOException("Object "+clazz.getName()+" does not implement SerializableObject"); + throw new IOException( + "Object " + clazz.getName() + " does not implement SerializableObject"); } - return (SerializableObject)object; + return (SerializableObject) object; } catch (InstantiationException e) { - throw new IOException("Instantiation exception for class "+clazz.getName()+": "+e.getMessage(), e); + throw new IOException( + "Instantiation exception for class " + clazz.getName() + ": " + e.getMessage(), e); } catch (IllegalAccessException e) { - throw new IOException("Illegal access creating class "+clazz.getName()+": "+e.getMessage(), e); + throw new IOException( + "Illegal access creating class " + clazz.getName() + ": " + e.getMessage(), e); } catch (NoSuchMethodException e) { - throw new IOException("No such method exception for class "+clazz.getName()+": "+e.getMessage(), e); + throw new IOException( + "No such method exception for class " + clazz.getName() + ": " + e.getMessage(), e); } catch (InvocationTargetException e) { - throw new IOException("Exception instantiating class "+clazz.getName()+": "+e.getMessage(), e); + throw new IOException( + "Exception instantiating class " + clazz.getName() + ": " + e.getMessage(), e); } - } - /** Instantiate a serializable object from a stream without a planet model. + /** + * Instantiate a serializable object from a stream without a planet model. + * * @param inputStream is the input stream. * @param clazz is the class to instantiate. */ - static SerializableObject readObject(final InputStream inputStream, final Class clazz) throws IOException { + static SerializableObject readObject(final InputStream inputStream, final Class clazz) + throws IOException { try { // Look for the right constructor final Constructor c = clazz.getDeclaredConstructor(InputStream.class); @@ -139,32 +166,37 @@ public interface SerializableObject { final Object object = c.newInstance(inputStream); // check whether caste will work if (!(object instanceof SerializableObject)) { - throw new IOException("Object "+clazz.getName()+" does not implement SerializableObject"); + throw new IOException( + "Object " + clazz.getName() + " does not implement SerializableObject"); } - return (SerializableObject)object; + return (SerializableObject) object; } catch (InstantiationException e) { - throw new IOException("Instantiation exception for class "+clazz.getName()+": "+e.getMessage(), e); + throw new IOException( + "Instantiation exception for class " + clazz.getName() + ": " + e.getMessage(), e); } catch (IllegalAccessException e) { - throw new IOException("Illegal access creating class "+clazz.getName()+": "+e.getMessage(), e); + throw new IOException( + "Illegal access creating class " + clazz.getName() + ": " + e.getMessage(), e); } catch (NoSuchMethodException e) { - throw new IOException("No such method exception for class "+clazz.getName()+": "+e.getMessage(), e); + throw new IOException( + "No such method exception for class " + clazz.getName() + ": " + e.getMessage(), e); } catch (InvocationTargetException e) { - throw new IOException("Exception instantiating class "+clazz.getName()+": "+e.getMessage(), e); + throw new IOException( + "Exception instantiating class " + clazz.getName() + ": " + e.getMessage(), e); } - } - /** Write a class to a stream. + /** + * Write a class to a stream. + * * @param outputStream is the output stream. * @param clazz is the class to write. */ static void writeClass(final OutputStream outputStream, final Class clazz) throws IOException { Integer index = StandardObjects.classRegsitry.get(clazz); - if (index == null){ + if (index == null) { writeBoolean(outputStream, false); writeString(outputStream, clazz.getName()); - } - else { + } else { writeBoolean(outputStream, true); outputStream.write(index); } @@ -172,22 +204,25 @@ public interface SerializableObject { /** * Read the class from the stream + * * @param inputStream is the stream to read from. * @return is the class read */ - static Class readClass(final InputStream inputStream) throws IOException, ClassNotFoundException { + static Class readClass(final InputStream inputStream) + throws IOException, ClassNotFoundException { boolean standard = readBoolean(inputStream); if (standard) { int index = inputStream.read(); return StandardObjects.codeRegsitry.get(index); - } - else { + } else { String className = readString(inputStream); - return Class.forName(className); + return Class.forName(className); } } - /** Write a string to a stream. + /** + * Write a string to a stream. + * * @param outputStream is the output stream. * @param value is the string to write. */ @@ -195,7 +230,9 @@ public interface SerializableObject { writeByteArray(outputStream, value.getBytes(StandardCharsets.UTF_8)); } - /** Read a string from a stream. + /** + * Read a string from a stream. + * * @param inputStream is the stream to read from. * @return the string that was read. */ @@ -203,61 +240,82 @@ public interface SerializableObject { return new String(readByteArray(inputStream), StandardCharsets.UTF_8); } - /** Write a point array. + /** + * Write a point array. + * * @param outputStream is the output stream. * @param values is the array of points to write. */ - static void writePointArray(final OutputStream outputStream, final GeoPoint[] values) throws IOException { + static void writePointArray(final OutputStream outputStream, final GeoPoint[] values) + throws IOException { writeHomogeneousArray(outputStream, values); } - /** Write a point array. + /** + * Write a point array. + * * @param outputStream is the output stream. * @param values is the list of points to write. */ - static void writePointArray(final OutputStream outputStream, final List values) throws IOException { + static void writePointArray(final OutputStream outputStream, final List values) + throws IOException { writeHomogeneousArray(outputStream, values); } - /** Read a point array. + /** + * Read a point array. + * * @param planetModel is the planet model. * @param inputStream is the input stream. * @return the array of points that was read. */ - static GeoPoint[] readPointArray(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + static GeoPoint[] readPointArray(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { return readHomogeneousArray(planetModel, inputStream, GeoPoint.class); } - /** Write a polgon array. + /** + * Write a polgon array. + * * @param outputStream is the output stream. * @param values is the array of points to write. */ - static void writePolygonArray(final OutputStream outputStream, final GeoPolygon[] values) throws IOException { + static void writePolygonArray(final OutputStream outputStream, final GeoPolygon[] values) + throws IOException { writeHeterogeneousArray(outputStream, values); } - /** Write a polygon array. + /** + * Write a polygon array. + * * @param outputStream is the output stream. * @param values is the list of points to write. */ - static void writePolygonArray(final OutputStream outputStream, final List values) throws IOException { + static void writePolygonArray(final OutputStream outputStream, final List values) + throws IOException { writeHeterogeneousArray(outputStream, values); } - /** Read a polygon array. + /** + * Read a polygon array. + * * @param planetModel is the planet model. * @param inputStream is the input stream. * @return the array of polygons that was read. */ - static GeoPolygon[] readPolygonArray(final PlanetModel planetModel, final InputStream inputStream) throws IOException { + static GeoPolygon[] readPolygonArray(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { return readHeterogeneousArray(planetModel, inputStream, GeoPolygon.class); } - /** Write an array. + /** + * Write an array. + * * @param outputStream is the output stream,. * @param values is the array. */ - static void writeHomogeneousArray(final OutputStream outputStream, final SerializableObject[] values) throws IOException { + static void writeHomogeneousArray( + final OutputStream outputStream, final SerializableObject[] values) throws IOException { if (values == null) { writeInt(outputStream, 0); } else { @@ -268,11 +326,15 @@ public interface SerializableObject { } } - /** Write an array. + /** + * Write an array. + * * @param outputStream is the output stream,. * @param values is the array. */ - static void writeHomogeneousArray(final OutputStream outputStream, final List values) throws IOException { + static void writeHomogeneousArray( + final OutputStream outputStream, final List values) + throws IOException { if (values == null) { writeInt(outputStream, 0); } else { @@ -283,27 +345,34 @@ public interface SerializableObject { } } - /** Read an array. + /** + * Read an array. + * * @param planetModel is the planet model. * @param inputStream is the input stream. * @param clazz is the class of the objects to read. * @return the array. */ - static T[] readHomogeneousArray(final PlanetModel planetModel, final InputStream inputStream, final Class clazz) throws IOException { + static T[] readHomogeneousArray( + final PlanetModel planetModel, final InputStream inputStream, final Class clazz) + throws IOException { final int count = readInt(inputStream); @SuppressWarnings("unchecked") - final T[] rval = (T[])Array.newInstance(clazz, count); + final T[] rval = (T[]) Array.newInstance(clazz, count); for (int i = 0; i < count; i++) { rval[i] = clazz.cast(readObject(planetModel, inputStream, clazz)); } return rval; } - /** Write an array. + /** + * Write an array. + * * @param outputStream is the output stream,. * @param values is the array. */ - static void writeHeterogeneousArray(final OutputStream outputStream, final SerializableObject[] values) throws IOException { + static void writeHeterogeneousArray( + final OutputStream outputStream, final SerializableObject[] values) throws IOException { if (values == null) { writeInt(outputStream, 0); } else { @@ -314,11 +383,15 @@ public interface SerializableObject { } } - /** Write an array. + /** + * Write an array. + * * @param outputStream is the output stream,. * @param values is the array. */ - static void writeHeterogeneousArray(final OutputStream outputStream, final List values) throws IOException { + static void writeHeterogeneousArray( + final OutputStream outputStream, final List values) + throws IOException { if (values == null) { writeInt(outputStream, 0); } else { @@ -329,22 +402,28 @@ public interface SerializableObject { } } - /** Read an array. + /** + * Read an array. + * * @param planetModel is the planet model. * @param inputStream is the input stream. * @return the array. */ - static T[] readHeterogeneousArray(final PlanetModel planetModel, final InputStream inputStream, final Class clazz) throws IOException { + static T[] readHeterogeneousArray( + final PlanetModel planetModel, final InputStream inputStream, final Class clazz) + throws IOException { final int count = readInt(inputStream); @SuppressWarnings("unchecked") - final T[] rval = (T[])Array.newInstance(clazz, count); + final T[] rval = (T[]) Array.newInstance(clazz, count); for (int i = 0; i < count; i++) { rval[i] = clazz.cast(readObject(planetModel, inputStream)); } return rval; } - /** Write a bitset to a stream. + /** + * Write a bitset to a stream. + * * @param outputStream is the output stream. * @param bitSet is the bit set to write. */ @@ -352,7 +431,9 @@ public interface SerializableObject { writeByteArray(outputStream, bitSet.toByteArray()); } - /** Read a bitset from a stream. + /** + * Read a bitset from a stream. + * * @param inputStream is the input stream. * @return the bitset read from the stream. */ @@ -360,16 +441,21 @@ public interface SerializableObject { return BitSet.valueOf(readByteArray(inputStream)); } - /** Write byte array. + /** + * Write byte array. + * * @param outputStream is the output stream. * @param bytes is the byte array. */ - static void writeByteArray(final OutputStream outputStream, final byte[] bytes) throws IOException { + static void writeByteArray(final OutputStream outputStream, final byte[] bytes) + throws IOException { writeInt(outputStream, bytes.length); outputStream.write(bytes); } - /** Read byte array. + /** + * Read byte array. + * * @param inputStream is the input stream. * @return the byte array. */ @@ -388,7 +474,9 @@ public interface SerializableObject { return bytes; } - /** Write a double to a stream. + /** + * Write a double to a stream. + * * @param outputStream is the output stream. * @param value is the value to write. */ @@ -396,7 +484,9 @@ public interface SerializableObject { writeLong(outputStream, Double.doubleToLongBits(value)); } - /** Read a double from a stream. + /** + * Read a double from a stream. + * * @param inputStream is the input stream. * @return the double value read from the stream. */ @@ -404,26 +494,32 @@ public interface SerializableObject { return Double.longBitsToDouble(readLong(inputStream)); } - /** Write a long to a stream. + /** + * Write a long to a stream. + * * @param outputStream is the output stream. * @param value is the value to write. */ static void writeLong(final OutputStream outputStream, final long value) throws IOException { - writeInt(outputStream, (int)value); - writeInt(outputStream, (int)(value >> 32)); + writeInt(outputStream, (int) value); + writeInt(outputStream, (int) (value >> 32)); } - /** Read a long from a stream. + /** + * Read a long from a stream. + * * @param inputStream is the input stream. * @return the long value read from the stream. */ static long readLong(final InputStream inputStream) throws IOException { - final long lower = ((long)(readInt(inputStream))) & 0x00000000ffffffffL; - final long upper = (((long)(readInt(inputStream))) << 32) & 0xffffffff00000000L; + final long lower = ((long) (readInt(inputStream))) & 0x00000000ffffffffL; + final long upper = (((long) (readInt(inputStream))) << 32) & 0xffffffff00000000L; return lower + upper; } - /** Write an int to a stream. + /** + * Write an int to a stream. + * * @param outputStream is the output stream. * @param value is the value to write. */ @@ -434,7 +530,9 @@ public interface SerializableObject { outputStream.write(value >> 24); } - /** Read an int from a stream. + /** + * Read an int from a stream. + * * @param inputStream is the input stream. * @return the value read from the stream. */ @@ -446,15 +544,20 @@ public interface SerializableObject { return l1 + l2 + l3 + l4; } - /** Write a boolean to a stream. + /** + * Write a boolean to a stream. + * * @param outputStream is the output stream. * @param value is the value to write. */ - static void writeBoolean(final OutputStream outputStream, final boolean value) throws IOException { - outputStream.write(value?1:0); + static void writeBoolean(final OutputStream outputStream, final boolean value) + throws IOException { + outputStream.write(value ? 1 : 0); } - /** Read a boolean from a stream. + /** + * Read a boolean from a stream. + * * @param inputStream is the input stream. * @return the boolean value. */ @@ -463,8 +566,6 @@ public interface SerializableObject { if (valueRead == -1) { throw new IOException("Unexpected end of input stream"); } - return (valueRead == 0)?false:true; + return (valueRead == 0) ? false : true; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/SidedPlane.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/SidedPlane.java index 5b22fc4dbdc..e7fc7f01f3c 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/SidedPlane.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/SidedPlane.java @@ -37,8 +37,8 @@ public class SidedPlane extends Plane implements Membership { } /** - * Construct a sided plane from a pair of vectors describing points, and including - * origin, plus a point p which describes the side. + * Construct a sided plane from a pair of vectors describing points, and including origin, plus a + * point p which describes the side. * * @param pX point X to evaluate * @param pY point Y to evaluate @@ -46,16 +46,19 @@ public class SidedPlane extends Plane implements Membership { * @param A is the first in-plane point * @param B is the second in-plane point */ - public SidedPlane(final double pX, final double pY, final double pZ, final Vector A, final Vector B) { + public SidedPlane( + final double pX, final double pY, final double pZ, final Vector A, final Vector B) { super(A, B); sigNum = Math.signum(evaluate(pX, pY, pZ)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } /** - * Construct a sided plane from a pair of vectors describing points, and including - * origin, plus a point p which describes the side. + * Construct a sided plane from a pair of vectors describing points, and including origin, plus a + * point p which describes the side. * * @param p point to evaluate * @param A is the first in-plane point @@ -64,13 +67,15 @@ public class SidedPlane extends Plane implements Membership { public SidedPlane(final Vector p, final Vector A, final Vector B) { super(A, B); sigNum = Math.signum(evaluate(p)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } /** - * Construct a sided plane from a pair of vectors describing points, and including - * origin. Choose the side arbitrarily. + * Construct a sided plane from a pair of vectors describing points, and including origin. Choose + * the side arbitrarily. * * @param A is the first in-plane point * @param B is the second in-plane point @@ -81,8 +86,8 @@ public class SidedPlane extends Plane implements Membership { } /** - * Construct a sided plane from a pair of vectors describing points, and including - * origin, plus a point p which describes the side. + * Construct a sided plane from a pair of vectors describing points, and including origin, plus a + * point p which describes the side. * * @param p point to evaluate * @param A is the first in-plane point @@ -90,16 +95,19 @@ public class SidedPlane extends Plane implements Membership { * @param BY is the Y value of the second in-plane point * @param BZ is the Z value of the second in-plane point */ - public SidedPlane(final Vector p, final Vector A, final double BX, final double BY, final double BZ) { + public SidedPlane( + final Vector p, final Vector A, final double BX, final double BY, final double BZ) { super(A, BX, BY, BZ); sigNum = Math.signum(evaluate(p)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } /** - * Construct a sided plane from a pair of vectors describing points, and including - * origin, plus a point p which describes the side. + * Construct a sided plane from a pair of vectors describing points, and including origin, plus a + * point p which describes the side. * * @param p point to evaluate * @param onSide is true if the point is on the correct side of the plane, false otherwise. @@ -108,23 +116,27 @@ public class SidedPlane extends Plane implements Membership { */ public SidedPlane(final Vector p, final boolean onSide, final Vector A, final Vector B) { super(A, B); - sigNum = onSide?Math.signum(evaluate(p)):-Math.signum(evaluate(p)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + sigNum = onSide ? Math.signum(evaluate(p)) : -Math.signum(evaluate(p)); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } /** * Construct a sided plane from a point and a Z coordinate. * - * @param p point to evaluate. + * @param p point to evaluate. * @param planetModel is the planet model. * @param sinLat is the sin of the latitude of the plane. */ public SidedPlane(Vector p, final PlanetModel planetModel, double sinLat) { super(planetModel, sinLat); sigNum = Math.signum(evaluate(p)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } /** @@ -137,8 +149,10 @@ public class SidedPlane extends Plane implements Membership { public SidedPlane(Vector p, double x, double y) { super(x, y); sigNum = Math.signum(evaluate(p)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } /** @@ -153,8 +167,10 @@ public class SidedPlane extends Plane implements Membership { public SidedPlane(Vector p, double vX, double vY, double vZ, double D) { super(vX, vY, vZ, D); sigNum = Math.signum(evaluate(p)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } /** @@ -167,8 +183,10 @@ public class SidedPlane extends Plane implements Membership { public SidedPlane(Vector p, Vector v, double D) { super(v, D); sigNum = Math.signum(evaluate(p)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } /** @@ -182,46 +200,61 @@ public class SidedPlane extends Plane implements Membership { */ public SidedPlane(double pX, double pY, double pZ, Vector v, double D) { super(v, D); - sigNum = Math.signum(evaluate(pX,pY,pZ)); - if (sigNum == 0.0) - throw new IllegalArgumentException("Cannot determine sidedness because check point is on plane."); + sigNum = Math.signum(evaluate(pX, pY, pZ)); + if (sigNum == 0.0) { + throw new IllegalArgumentException( + "Cannot determine sidedness because check point is on plane."); + } } - /** Construct a sided plane from two points and a third normal vector. - */ - public static SidedPlane constructNormalizedPerpendicularSidedPlane(final Vector insidePoint, - final Vector normalVector, final Vector point1, final Vector point2) { - final Vector pointsVector = new Vector(point1.x - point2.x, point1.y - point2.y, point1.z - point2.z); + /** Construct a sided plane from two points and a third normal vector. */ + public static SidedPlane constructNormalizedPerpendicularSidedPlane( + final Vector insidePoint, + final Vector normalVector, + final Vector point1, + final Vector point2) { + final Vector pointsVector = + new Vector(point1.x - point2.x, point1.y - point2.y, point1.z - point2.z); final Vector newNormalVector = new Vector(normalVector, pointsVector); try { - // To construct the plane, we now just need D, which is simply the negative of the evaluation of the circle normal vector at one of the points. + // To construct the plane, we now just need D, which is simply the negative of the evaluation + // of the circle normal vector at one of the points. return new SidedPlane(insidePoint, newNormalVector, -newNormalVector.dotProduct(point1)); } catch (IllegalArgumentException e) { return null; } } - - /** Construct a sided plane from three points. - */ - public static SidedPlane constructNormalizedThreePointSidedPlane(final Vector insidePoint, - final Vector point1, final Vector point2, final Vector point3) { + + /** Construct a sided plane from three points. */ + public static SidedPlane constructNormalizedThreePointSidedPlane( + final Vector insidePoint, final Vector point1, final Vector point2, final Vector point3) { SidedPlane rval = null; - + if (rval == null) { try { - final Vector planeNormal = new Vector( - point1.x - point2.x, point1.y - point2.y, point1.z - point2.z, - point2.x - point3.x, point2.y - point3.y, point2.z - point3.z); + final Vector planeNormal = + new Vector( + point1.x - point2.x, + point1.y - point2.y, + point1.z - point2.z, + point2.x - point3.x, + point2.y - point3.y, + point2.z - point3.z); rval = new SidedPlane(insidePoint, planeNormal, -planeNormal.dotProduct(point2)); } catch (IllegalArgumentException e) { } } - + if (rval == null) { try { - final Vector planeNormal = new Vector( - point1.x - point3.x, point1.y - point3.y, point1.z - point3.z, - point3.x - point2.x, point3.y - point2.y, point3.z - point2.z); + final Vector planeNormal = + new Vector( + point1.x - point3.x, + point1.y - point3.y, + point1.z - point3.z, + point3.x - point2.x, + point3.y - point2.y, + point3.z - point2.z); rval = new SidedPlane(insidePoint, planeNormal, -planeNormal.dotProduct(point3)); } catch (IllegalArgumentException e) { } @@ -229,29 +262,36 @@ public class SidedPlane extends Plane implements Membership { if (rval == null) { try { - final Vector planeNormal = new Vector( - point3.x - point1.x, point3.y - point1.y, point3.z - point1.z, - point1.x - point2.x, point1.y - point2.y, point1.z - point2.z); + final Vector planeNormal = + new Vector( + point3.x - point1.x, + point3.y - point1.y, + point3.z - point1.z, + point1.x - point2.x, + point1.y - point2.y, + point1.z - point2.z); rval = new SidedPlane(insidePoint, planeNormal, -planeNormal.dotProduct(point1)); } catch (IllegalArgumentException e) { } } - + return rval; } @Override public boolean isWithin(double x, double y, double z) { double evalResult = evaluate(x, y, z); - //System.out.println(Math.abs(evalResult)); - if (Math.abs(evalResult) < MINIMUM_RESOLUTION) + // System.out.println(Math.abs(evalResult)); + if (Math.abs(evalResult) < MINIMUM_RESOLUTION) { return true; + } double sigNum = Math.signum(evalResult); return sigNum == this.sigNum; } /** * Check whether a point is strictly within a plane. + * * @param v is the point. * @return true if within. */ @@ -263,6 +303,7 @@ public class SidedPlane extends Plane implements Membership { /** * Check whether a point is strictly within a plane. + * * @param x is the point x value. * @param y is the point y value. * @param z is the point z value. @@ -276,14 +317,19 @@ public class SidedPlane extends Plane implements Membership { @Override public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof SidedPlane)) return false; - if (!super.equals(o)) return false; + if (this == o) { + return true; + } + if (!(o instanceof SidedPlane)) { + return false; + } + if (!super.equals(o)) { + return false; + } SidedPlane that = (SidedPlane) o; return Double.compare(that.sigNum, sigNum) == 0; - } @Override @@ -300,4 +346,3 @@ public class SidedPlane extends Plane implements Membership { return "[A=" + x + ", B=" + y + ", C=" + z + ", D=" + D + ", side=" + sigNum + "]"; } } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/StandardObjects.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/StandardObjects.java index 3283dbcb965..707845df8f5 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/StandardObjects.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/StandardObjects.java @@ -27,58 +27,54 @@ import java.util.Map; */ class StandardObjects { - /** - * Registry of standard classes to corresponding code - */ + /** Registry of standard classes to corresponding code */ static Map, Integer> classRegsitry = new HashMap<>(); - /** - * Registry of codes to corresponding classes - */ + /** Registry of codes to corresponding classes */ static Map> codeRegsitry = new HashMap<>(); static { - classRegsitry.put(GeoPoint.class, 0); - classRegsitry.put(GeoRectangle.class, 1); - classRegsitry.put(GeoStandardCircle.class, 2); - classRegsitry.put(GeoStandardPath.class, 3); - classRegsitry.put(GeoConvexPolygon.class, 4); - classRegsitry.put(GeoConcavePolygon.class, 5); - classRegsitry.put(GeoComplexPolygon.class, 6); - classRegsitry.put(GeoCompositePolygon.class, 7); - classRegsitry.put(GeoCompositeMembershipShape.class, 8); - classRegsitry.put(GeoCompositeAreaShape.class, 9); - classRegsitry.put(GeoDegeneratePoint.class, 10); - classRegsitry.put(GeoDegenerateHorizontalLine.class, 11); - classRegsitry.put(GeoDegenerateLatitudeZone.class, 12); - classRegsitry.put(GeoDegenerateLongitudeSlice.class, 13); - classRegsitry.put(GeoDegenerateVerticalLine.class, 14); - classRegsitry.put(GeoLatitudeZone.class, 15); - classRegsitry.put(GeoLongitudeSlice.class, 16); - classRegsitry.put(GeoNorthLatitudeZone.class, 17); - classRegsitry.put(GeoNorthRectangle.class, 18); - classRegsitry.put(GeoSouthLatitudeZone.class, 19); - classRegsitry.put(GeoSouthRectangle.class, 20); - classRegsitry.put(GeoWideDegenerateHorizontalLine.class, 21); - classRegsitry.put(GeoWideLongitudeSlice.class, 22); - classRegsitry.put(GeoWideNorthRectangle.class, 23); - classRegsitry.put(GeoWideRectangle.class, 24); - classRegsitry.put(GeoWideSouthRectangle.class, 25); - classRegsitry.put(GeoWorld.class, 26); - classRegsitry.put(dXdYdZSolid.class, 27); - classRegsitry.put(dXdYZSolid.class, 28); - classRegsitry.put(dXYdZSolid.class, 29); - classRegsitry.put(dXYZSolid.class, 30); - classRegsitry.put(XdYdZSolid.class, 31); - classRegsitry.put(XdYZSolid.class, 32); - classRegsitry.put(XYdZSolid.class, 33); - classRegsitry.put(StandardXYZSolid.class, 34); - classRegsitry.put(PlanetModel.class, 35); - classRegsitry.put(GeoDegeneratePath.class, 36); - classRegsitry.put(GeoExactCircle.class, 37); - classRegsitry.put(GeoS2Shape.class, 38); + classRegsitry.put(GeoPoint.class, 0); + classRegsitry.put(GeoRectangle.class, 1); + classRegsitry.put(GeoStandardCircle.class, 2); + classRegsitry.put(GeoStandardPath.class, 3); + classRegsitry.put(GeoConvexPolygon.class, 4); + classRegsitry.put(GeoConcavePolygon.class, 5); + classRegsitry.put(GeoComplexPolygon.class, 6); + classRegsitry.put(GeoCompositePolygon.class, 7); + classRegsitry.put(GeoCompositeMembershipShape.class, 8); + classRegsitry.put(GeoCompositeAreaShape.class, 9); + classRegsitry.put(GeoDegeneratePoint.class, 10); + classRegsitry.put(GeoDegenerateHorizontalLine.class, 11); + classRegsitry.put(GeoDegenerateLatitudeZone.class, 12); + classRegsitry.put(GeoDegenerateLongitudeSlice.class, 13); + classRegsitry.put(GeoDegenerateVerticalLine.class, 14); + classRegsitry.put(GeoLatitudeZone.class, 15); + classRegsitry.put(GeoLongitudeSlice.class, 16); + classRegsitry.put(GeoNorthLatitudeZone.class, 17); + classRegsitry.put(GeoNorthRectangle.class, 18); + classRegsitry.put(GeoSouthLatitudeZone.class, 19); + classRegsitry.put(GeoSouthRectangle.class, 20); + classRegsitry.put(GeoWideDegenerateHorizontalLine.class, 21); + classRegsitry.put(GeoWideLongitudeSlice.class, 22); + classRegsitry.put(GeoWideNorthRectangle.class, 23); + classRegsitry.put(GeoWideRectangle.class, 24); + classRegsitry.put(GeoWideSouthRectangle.class, 25); + classRegsitry.put(GeoWorld.class, 26); + classRegsitry.put(dXdYdZSolid.class, 27); + classRegsitry.put(dXdYZSolid.class, 28); + classRegsitry.put(dXYdZSolid.class, 29); + classRegsitry.put(dXYZSolid.class, 30); + classRegsitry.put(XdYdZSolid.class, 31); + classRegsitry.put(XdYZSolid.class, 32); + classRegsitry.put(XYdZSolid.class, 33); + classRegsitry.put(StandardXYZSolid.class, 34); + classRegsitry.put(PlanetModel.class, 35); + classRegsitry.put(GeoDegeneratePath.class, 36); + classRegsitry.put(GeoExactCircle.class, 37); + classRegsitry.put(GeoS2Shape.class, 38); - for (Map.Entry, Integer> entry : classRegsitry.entrySet()){ - codeRegsitry.put(entry.getValue(), entry.getKey()); - } + for (Map.Entry, Integer> entry : classRegsitry.entrySet()) { + codeRegsitry.put(entry.getValue(), entry.getKey()); + } } } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/StandardXYZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/StandardXYZSolid.java index 8ad21e67132..c55c9e3e5cf 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/StandardXYZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/StandardXYZSolid.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * 3D rectangle, bounded on six sides by X,Y,Z limits @@ -54,7 +54,7 @@ class StandardXYZSolid extends BaseXYZSolid { protected final SidedPlane minZPlane; /** Max-Z plane */ protected final SidedPlane maxZPlane; - + /** true if minXPlane intersects globe */ protected final boolean minXPlaneIntersects; /** true if maxXPlane intersects globe */ @@ -68,10 +68,11 @@ class StandardXYZSolid extends BaseXYZSolid { /** true if maxZPlane intersects globe */ protected final boolean maxZPlaneIntersects; - /** These are the edge points of the shape, which are defined to be at least one point on - * each surface area boundary. In the case of a solid, this includes points which represent - * the intersection of XYZ bounding planes and the planet, as well as points representing - * the intersection of single bounding planes with the planet itself. + /** + * These are the edge points of the shape, which are defined to be at least one point on each + * surface area boundary. In the case of a solid, this includes points which represent the + * intersection of XYZ bounding planes and the planet, as well as points representing the + * intersection of single bounding planes with the planet itself. */ protected final GeoPoint[] edgePoints; @@ -91,21 +92,22 @@ class StandardXYZSolid extends BaseXYZSolid { /** * Sole constructor * - *@param planetModel is the planet model. - *@param minX is the minimum X value. - *@param maxX is the maximum X value. - *@param minY is the minimum Y value. - *@param maxY is the maximum Y value. - *@param minZ is the minimum Z value. - *@param maxZ is the maximum Z value. + * @param planetModel is the planet model. + * @param minX is the minimum X value. + * @param maxX is the maximum X value. + * @param minY is the minimum Y value. + * @param maxY is the maximum Y value. + * @param minZ is the minimum Z value. + * @param maxZ is the maximum Z value. */ - public StandardXYZSolid(final PlanetModel planetModel, - final double minX, - final double maxX, - final double minY, - final double maxY, - final double minZ, - final double maxZ) { + public StandardXYZSolid( + final PlanetModel planetModel, + final double minX, + final double maxX, + final double minY, + final double maxY, + final double minZ, + final double maxZ) { super(planetModel); // Argument checking if (maxX - minX < Vector.MINIMUM_RESOLUTION) @@ -121,24 +123,24 @@ class StandardXYZSolid extends BaseXYZSolid { this.maxY = maxY; this.minZ = minZ; this.maxZ = maxZ; - + final double worldMinX = planetModel.getMinimumXValue(); final double worldMaxX = planetModel.getMaximumXValue(); final double worldMinY = planetModel.getMinimumYValue(); final double worldMaxY = planetModel.getMaximumYValue(); final double worldMinZ = planetModel.getMinimumZValue(); final double worldMaxZ = planetModel.getMaximumZValue(); - + // We must distinguish between the case where the solid represents the entire world, // and when the solid has no overlap with any part of the surface. In both cases, // there will be no edgepoints. isWholeWorld = - (minX - worldMinX < -Vector.MINIMUM_RESOLUTION) && - (maxX - worldMaxX > Vector.MINIMUM_RESOLUTION) && - (minY - worldMinY < -Vector.MINIMUM_RESOLUTION) && - (maxY - worldMaxY > Vector.MINIMUM_RESOLUTION) && - (minZ - worldMinZ < -Vector.MINIMUM_RESOLUTION) && - (maxZ - worldMaxZ > Vector.MINIMUM_RESOLUTION); + (minX - worldMinX < -Vector.MINIMUM_RESOLUTION) + && (maxX - worldMaxX > Vector.MINIMUM_RESOLUTION) + && (minY - worldMinY < -Vector.MINIMUM_RESOLUTION) + && (maxY - worldMaxY > Vector.MINIMUM_RESOLUTION) + && (minZ - worldMinZ < -Vector.MINIMUM_RESOLUTION) + && (maxZ - worldMaxZ > Vector.MINIMUM_RESOLUTION); if (isWholeWorld) { minXPlane = null; @@ -162,32 +164,56 @@ class StandardXYZSolid extends BaseXYZSolid { edgePoints = null; } else { // Construct the planes - minXPlane = new SidedPlane(maxX,0.0,0.0,xUnitVector,-minX); - maxXPlane = new SidedPlane(minX,0.0,0.0,xUnitVector,-maxX); - minYPlane = new SidedPlane(0.0,maxY,0.0,yUnitVector,-minY); - maxYPlane = new SidedPlane(0.0,minY,0.0,yUnitVector,-maxY); - minZPlane = new SidedPlane(0.0,0.0,maxZ,zUnitVector,-minZ); - maxZPlane = new SidedPlane(0.0,0.0,minZ,zUnitVector,-maxZ); - + minXPlane = new SidedPlane(maxX, 0.0, 0.0, xUnitVector, -minX); + maxXPlane = new SidedPlane(minX, 0.0, 0.0, xUnitVector, -maxX); + minYPlane = new SidedPlane(0.0, maxY, 0.0, yUnitVector, -minY); + maxYPlane = new SidedPlane(0.0, minY, 0.0, yUnitVector, -maxY); + minZPlane = new SidedPlane(0.0, 0.0, maxZ, zUnitVector, -minZ); + maxZPlane = new SidedPlane(0.0, 0.0, minZ, zUnitVector, -maxZ); + // We need at least one point on the planet surface for each manifestation of the shape. // There can be up to 2 (on opposite sides of the world). But we have to go through - // 12 combinations of adjacent planes in order to find out if any have 2 intersection solution. - // Typically, this requires 12 square root operations. - final GeoPoint[] minXminY = minXPlane.findIntersections(planetModel,minYPlane,maxXPlane,maxYPlane,minZPlane,maxZPlane); - final GeoPoint[] minXmaxY = minXPlane.findIntersections(planetModel,maxYPlane,maxXPlane,minYPlane,minZPlane,maxZPlane); - final GeoPoint[] minXminZ = minXPlane.findIntersections(planetModel,minZPlane,maxXPlane,maxZPlane,minYPlane,maxYPlane); - final GeoPoint[] minXmaxZ = minXPlane.findIntersections(planetModel,maxZPlane,maxXPlane,minZPlane,minYPlane,maxYPlane); + // 12 combinations of adjacent planes in order to find out if any have 2 intersection + // solution. Typically, this requires 12 square root operations. + final GeoPoint[] minXminY = + minXPlane.findIntersections( + planetModel, minYPlane, maxXPlane, maxYPlane, minZPlane, maxZPlane); + final GeoPoint[] minXmaxY = + minXPlane.findIntersections( + planetModel, maxYPlane, maxXPlane, minYPlane, minZPlane, maxZPlane); + final GeoPoint[] minXminZ = + minXPlane.findIntersections( + planetModel, minZPlane, maxXPlane, maxZPlane, minYPlane, maxYPlane); + final GeoPoint[] minXmaxZ = + minXPlane.findIntersections( + planetModel, maxZPlane, maxXPlane, minZPlane, minYPlane, maxYPlane); + + final GeoPoint[] maxXminY = + maxXPlane.findIntersections( + planetModel, minYPlane, minXPlane, maxYPlane, minZPlane, maxZPlane); + final GeoPoint[] maxXmaxY = + maxXPlane.findIntersections( + planetModel, maxYPlane, minXPlane, minYPlane, minZPlane, maxZPlane); + final GeoPoint[] maxXminZ = + maxXPlane.findIntersections( + planetModel, minZPlane, minXPlane, maxZPlane, minYPlane, maxYPlane); + final GeoPoint[] maxXmaxZ = + maxXPlane.findIntersections( + planetModel, maxZPlane, minXPlane, minZPlane, minYPlane, maxYPlane); + + final GeoPoint[] minYminZ = + minYPlane.findIntersections( + planetModel, minZPlane, maxYPlane, maxZPlane, minXPlane, maxXPlane); + final GeoPoint[] minYmaxZ = + minYPlane.findIntersections( + planetModel, maxZPlane, maxYPlane, minZPlane, minXPlane, maxXPlane); + final GeoPoint[] maxYminZ = + maxYPlane.findIntersections( + planetModel, minZPlane, minYPlane, maxZPlane, minXPlane, maxXPlane); + final GeoPoint[] maxYmaxZ = + maxYPlane.findIntersections( + planetModel, maxZPlane, minYPlane, minZPlane, minXPlane, maxXPlane); - final GeoPoint[] maxXminY = maxXPlane.findIntersections(planetModel,minYPlane,minXPlane,maxYPlane,minZPlane,maxZPlane); - final GeoPoint[] maxXmaxY = maxXPlane.findIntersections(planetModel,maxYPlane,minXPlane,minYPlane,minZPlane,maxZPlane); - final GeoPoint[] maxXminZ = maxXPlane.findIntersections(planetModel,minZPlane,minXPlane,maxZPlane,minYPlane,maxYPlane); - final GeoPoint[] maxXmaxZ = maxXPlane.findIntersections(planetModel,maxZPlane,minXPlane,minZPlane,minYPlane,maxYPlane); - - final GeoPoint[] minYminZ = minYPlane.findIntersections(planetModel,minZPlane,maxYPlane,maxZPlane,minXPlane,maxXPlane); - final GeoPoint[] minYmaxZ = minYPlane.findIntersections(planetModel,maxZPlane,maxYPlane,minZPlane,minXPlane,maxXPlane); - final GeoPoint[] maxYminZ = maxYPlane.findIntersections(planetModel,minZPlane,minYPlane,maxZPlane,minXPlane,maxXPlane); - final GeoPoint[] maxYmaxZ = maxYPlane.findIntersections(planetModel,maxZPlane,minYPlane,minZPlane,minXPlane,maxXPlane); - notableMinXPoints = glueTogether(minXminY, minXmaxY, minXminZ, minXmaxZ); notableMaxXPoints = glueTogether(maxXminY, maxXmaxY, maxXminZ, maxXmaxZ); notableMinYPoints = glueTogether(minXminY, maxXminY, minYminZ, minYmaxZ); @@ -195,22 +221,26 @@ class StandardXYZSolid extends BaseXYZSolid { notableMinZPoints = glueTogether(minXminZ, maxXminZ, minYminZ, maxYminZ); notableMaxZPoints = glueTogether(minXmaxZ, maxXmaxZ, minYmaxZ, maxYmaxZ); - - //System.err.println( - // " notableMinXPoints="+Arrays.asList(notableMinXPoints)+" notableMaxXPoints="+Arrays.asList(notableMaxXPoints)+ - // " notableMinYPoints="+Arrays.asList(notableMinYPoints)+" notableMaxYPoints="+Arrays.asList(notableMaxYPoints)+ - // " notableMinZPoints="+Arrays.asList(notableMinZPoints)+" notableMaxZPoints="+Arrays.asList(notableMaxZPoints)); + // System.err.println( + // " notableMinXPoints="+Arrays.asList(notableMinXPoints)+" + // notableMaxXPoints="+Arrays.asList(notableMaxXPoints)+ + // " notableMinYPoints="+Arrays.asList(notableMinYPoints)+" + // notableMaxYPoints="+Arrays.asList(notableMaxYPoints)+ + // " notableMinZPoints="+Arrays.asList(notableMinZPoints)+" + // notableMaxZPoints="+Arrays.asList(notableMaxZPoints)); // Now, compute the edge points. - // This is the trickiest part of setting up an XYZSolid. We've computed intersections already, so - // we'll start there. - // There can be a number of shapes, each of which needs an edgepoint. Each side by itself might contribute - // an edgepoint, for instance, if the plane describing that side intercepts the planet in such a way that the ellipse - // of interception does not meet any other planes. Plane intersections can each contribute 0, 1, or 2 edgepoints. + // This is the trickiest part of setting up an XYZSolid. We've computed intersections + // already, so we'll start there. + // There can be a number of shapes, each of which needs an edgepoint. Each side by itself + // might contribute an edgepoint, for instance, if the plane describing that side + // intercepts the planet in such a way that the ellipse of interception does not meet any + // other planes. Plane intersections can each contribute 0, 1, or 2 edgepoints. // - // All of this makes for a lot of potential edgepoints, but I believe these can be pruned back with careful analysis. - // I haven't yet done that analysis, however, so I will treat them all as individual edgepoints. - + // All of this makes for a lot of potential edgepoints, but I believe these can be pruned back + // with careful analysis. I haven't yet done that analysis, however, so I will treat them all + // as individual edgepoints. + // The cases we are looking for are when the four corner points for any given // plane are all outside of the world, AND that plane intersects the world. // There are eight corner points all told; we must evaluate these WRT the planet surface. @@ -222,25 +252,34 @@ class StandardXYZSolid extends BaseXYZSolid { final boolean maxXminYmaxZ = planetModel.pointOutside(maxX, minY, maxZ); final boolean maxXmaxYminZ = planetModel.pointOutside(maxX, maxY, minZ); final boolean maxXmaxYmaxZ = planetModel.pointOutside(maxX, maxY, maxZ); - - //System.err.println("Outside world: minXminYminZ="+minXminYminZ+" minXminYmaxZ="+minXminYmaxZ+" minXmaxYminZ="+minXmaxYminZ+ - // " minXmaxYmaxZ="+minXmaxYmaxZ+" maxXminYminZ="+maxXminYminZ+" maxXminYmaxZ="+maxXminYmaxZ+" maxXmaxYminZ="+maxXmaxYminZ+ + + // System.err.println("Outside world: minXminYminZ="+minXminYminZ+" + // minXminYmaxZ="+minXminYmaxZ+" minXmaxYminZ="+minXmaxYminZ+ + // " minXmaxYmaxZ="+minXmaxYmaxZ+" maxXminYminZ="+maxXminYminZ+" + // maxXminYmaxZ="+maxXminYmaxZ+" maxXmaxYminZ="+maxXmaxYminZ+ // " maxXmaxYmaxZ="+maxXmaxYmaxZ); // Look at single-plane/world intersections. // We detect these by looking at the world model and noting its x, y, and z bounds. final GeoPoint[] minXEdges; - if (minX - worldMinX >= -Vector.MINIMUM_RESOLUTION && minX - worldMaxX <= Vector.MINIMUM_RESOLUTION && - minY < 0.0 && maxY > 0.0 && minZ < 0.0 && maxZ > 0.0 && - minXminYminZ && minXminYmaxZ && minXmaxYminZ && minXmaxYmaxZ) { + if (minX - worldMinX >= -Vector.MINIMUM_RESOLUTION + && minX - worldMaxX <= Vector.MINIMUM_RESOLUTION + && minY < 0.0 + && maxY > 0.0 + && minZ < 0.0 + && maxZ > 0.0 + && minXminYminZ + && minXminYmaxZ + && minXmaxYminZ + && minXmaxYmaxZ) { // Find any point on the minX plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. // This plane is vertical and goes through the points (0,0,0) and (1,0,0) // Then use it to compute a sample point. final GeoPoint intPoint = minXPlane.getSampleIntersectionPoint(planetModel, xVerticalPlane); if (intPoint != null) { - minXEdges = new GeoPoint[]{intPoint}; + minXEdges = new GeoPoint[] {intPoint}; } else { // No intersection found? minXEdges = EMPTY_POINTS; @@ -248,98 +287,134 @@ class StandardXYZSolid extends BaseXYZSolid { } else { minXEdges = EMPTY_POINTS; } - + final GeoPoint[] maxXEdges; - if (maxX - worldMinX >= -Vector.MINIMUM_RESOLUTION && maxX - worldMaxX <= Vector.MINIMUM_RESOLUTION && - minY < 0.0 && maxY > 0.0 && minZ < 0.0 && maxZ > 0.0 && - maxXminYminZ && maxXminYmaxZ && maxXmaxYminZ && maxXmaxYmaxZ) { + if (maxX - worldMinX >= -Vector.MINIMUM_RESOLUTION + && maxX - worldMaxX <= Vector.MINIMUM_RESOLUTION + && minY < 0.0 + && maxY > 0.0 + && minZ < 0.0 + && maxZ > 0.0 + && maxXminYminZ + && maxXminYmaxZ + && maxXmaxYminZ + && maxXmaxYmaxZ) { // Find any point on the maxX plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. // This plane is vertical and goes through the points (0,0,0) and (1,0,0) // Then use it to compute a sample point. final GeoPoint intPoint = maxXPlane.getSampleIntersectionPoint(planetModel, xVerticalPlane); if (intPoint != null) { - maxXEdges = new GeoPoint[]{intPoint}; + maxXEdges = new GeoPoint[] {intPoint}; } else { maxXEdges = EMPTY_POINTS; } } else { maxXEdges = EMPTY_POINTS; } - + final GeoPoint[] minYEdges; - if (minY - worldMinY >= -Vector.MINIMUM_RESOLUTION && minY - worldMaxY <= Vector.MINIMUM_RESOLUTION && - minX < 0.0 && maxX > 0.0 && minZ < 0.0 && maxZ > 0.0 && - minXminYminZ && minXminYmaxZ && maxXminYminZ && maxXminYmaxZ) { + if (minY - worldMinY >= -Vector.MINIMUM_RESOLUTION + && minY - worldMaxY <= Vector.MINIMUM_RESOLUTION + && minX < 0.0 + && maxX > 0.0 + && minZ < 0.0 + && maxZ > 0.0 + && minXminYminZ + && minXminYmaxZ + && maxXminYminZ + && maxXminYmaxZ) { // Find any point on the minY plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. // This plane is vertical and goes through the points (0,0,0) and (0,1,0) // Then use it to compute a sample point. final GeoPoint intPoint = minYPlane.getSampleIntersectionPoint(planetModel, yVerticalPlane); if (intPoint != null) { - minYEdges = new GeoPoint[]{intPoint}; + minYEdges = new GeoPoint[] {intPoint}; } else { minYEdges = EMPTY_POINTS; } } else { minYEdges = EMPTY_POINTS; } - + final GeoPoint[] maxYEdges; - if (maxY - worldMinY >= -Vector.MINIMUM_RESOLUTION && maxY - worldMaxY <= Vector.MINIMUM_RESOLUTION && - minX < 0.0 && maxX > 0.0 && minZ < 0.0 && maxZ > 0.0 && - minXmaxYminZ && minXmaxYmaxZ && maxXmaxYminZ && maxXmaxYmaxZ) { + if (maxY - worldMinY >= -Vector.MINIMUM_RESOLUTION + && maxY - worldMaxY <= Vector.MINIMUM_RESOLUTION + && minX < 0.0 + && maxX > 0.0 + && minZ < 0.0 + && maxZ > 0.0 + && minXmaxYminZ + && minXmaxYmaxZ + && maxXmaxYminZ + && maxXmaxYmaxZ) { // Find any point on the maxY plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. // This plane is vertical and goes through the points (0,0,0) and (0,1,0) // Then use it to compute a sample point. final GeoPoint intPoint = maxYPlane.getSampleIntersectionPoint(planetModel, yVerticalPlane); if (intPoint != null) { - maxYEdges = new GeoPoint[]{intPoint}; + maxYEdges = new GeoPoint[] {intPoint}; } else { maxYEdges = EMPTY_POINTS; } } else { maxYEdges = EMPTY_POINTS; } - + final GeoPoint[] minZEdges; - if (minZ - worldMinZ >= -Vector.MINIMUM_RESOLUTION && minZ - worldMaxZ <= Vector.MINIMUM_RESOLUTION && - minX < 0.0 && maxX > 0.0 && minY < 0.0 && maxY > 0.0 && - minXminYminZ && minXmaxYminZ && maxXminYminZ && maxXmaxYminZ) { + if (minZ - worldMinZ >= -Vector.MINIMUM_RESOLUTION + && minZ - worldMaxZ <= Vector.MINIMUM_RESOLUTION + && minX < 0.0 + && maxX > 0.0 + && minY < 0.0 + && maxY > 0.0 + && minXminYminZ + && minXmaxYminZ + && maxXminYminZ + && maxXmaxYminZ) { // Find any point on the minZ plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. // This plane is vertical and goes through the points (0,0,0) and (1,0,0) // Then use it to compute a sample point. final GeoPoint intPoint = minZPlane.getSampleIntersectionPoint(planetModel, xVerticalPlane); if (intPoint != null) { - minZEdges = new GeoPoint[]{intPoint}; + minZEdges = new GeoPoint[] {intPoint}; } else { minZEdges = EMPTY_POINTS; } } else { minZEdges = EMPTY_POINTS; } - + final GeoPoint[] maxZEdges; - if (maxZ - worldMinZ >= -Vector.MINIMUM_RESOLUTION && maxZ - worldMaxZ <= Vector.MINIMUM_RESOLUTION && - minX < 0.0 && maxX > 0.0 && minY < 0.0 && maxY > 0.0 && - minXminYmaxZ && minXmaxYmaxZ && maxXminYmaxZ && maxXmaxYmaxZ) { + if (maxZ - worldMinZ >= -Vector.MINIMUM_RESOLUTION + && maxZ - worldMaxZ <= Vector.MINIMUM_RESOLUTION + && minX < 0.0 + && maxX > 0.0 + && minY < 0.0 + && maxY > 0.0 + && minXminYmaxZ + && minXmaxYmaxZ + && maxXminYmaxZ + && maxXmaxYmaxZ) { // Find any point on the maxZ plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. - // This plane is vertical and goes through the points (0,0,0) and (1,0,0) (that is, its orientation doesn't matter) + // This plane is vertical and goes through the points (0,0,0) and (1,0,0) (that is, its + // orientation doesn't matter) // Then use it to compute a sample point. final GeoPoint intPoint = maxZPlane.getSampleIntersectionPoint(planetModel, xVerticalPlane); if (intPoint != null) { - maxZEdges = new GeoPoint[]{intPoint}; + maxZEdges = new GeoPoint[] {intPoint}; } else { maxZEdges = EMPTY_POINTS; } } else { maxZEdges = EMPTY_POINTS; } - - //System.err.println( + + // System.err.println( // " minXEdges="+Arrays.asList(minXEdges)+" maxXEdges="+Arrays.asList(maxXEdges)+ // " minYEdges="+Arrays.asList(minYEdges)+" maxYEdges="+Arrays.asList(maxYEdges)+ // " minZEdges="+Arrays.asList(minZEdges)+" maxZEdges="+Arrays.asList(maxZEdges)); @@ -351,28 +426,32 @@ class StandardXYZSolid extends BaseXYZSolid { minZPlaneIntersects = notableMinZPoints.length + minZEdges.length > 0; maxZPlaneIntersects = notableMaxZPoints.length + maxZEdges.length > 0; - // Glue everything together. This is not a minimal set of edgepoints, as of now, but it does completely describe all shapes on the - // planet. - this.edgePoints = glueTogether(minXminY, minXmaxY, minXminZ, minXmaxZ, - maxXminY, maxXmaxY, maxXminZ, maxXmaxZ, - minYminZ, minYmaxZ, maxYminZ, maxYmaxZ, - minXEdges, maxXEdges, minYEdges, maxYEdges, minZEdges, maxZEdges); + // Glue everything together. This is not a minimal set of edgepoints, as of now, but it does + // completely describe all shapes on the planet. + this.edgePoints = + glueTogether( + minXminY, minXmaxY, minXminZ, minXmaxZ, maxXminY, maxXmaxY, maxXminZ, maxXmaxZ, + minYminZ, minYmaxZ, maxYminZ, maxYmaxZ, minXEdges, maxXEdges, minYEdges, maxYEdges, + minZEdges, maxZEdges); } } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public StandardXYZSolid(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public StandardXYZSolid(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -389,96 +468,145 @@ class StandardXYZSolid extends BaseXYZSolid { protected GeoPoint[] getEdgePoints() { return edgePoints; } - + @Override public boolean isWithin(final double x, final double y, final double z) { if (isWholeWorld) { return true; } - return minXPlane.isWithin(x, y, z) && - maxXPlane.isWithin(x, y, z) && - minYPlane.isWithin(x, y, z) && - maxYPlane.isWithin(x, y, z) && - minZPlane.isWithin(x, y, z) && - maxZPlane.isWithin(x, y, z); + return minXPlane.isWithin(x, y, z) + && maxXPlane.isWithin(x, y, z) + && minYPlane.isWithin(x, y, z) + && maxYPlane.isWithin(x, y, z) + && minZPlane.isWithin(x, y, z) + && maxZPlane.isWithin(x, y, z); } @Override public int getRelationship(final GeoShape path) { if (isWholeWorld) { - if (path.getEdgePoints().length > 0) - return WITHIN; + if (path.getEdgePoints().length > 0) return WITHIN; return OVERLAPS; } - + /* for (GeoPoint p : getEdgePoints()) { System.err.println(" Edge point "+p+" path.isWithin()? "+path.isWithin(p)); } - + for (GeoPoint p : path.getEdgePoints()) { - System.err.println(" path edge point "+p+" isWithin()? "+isWithin(p)+" minx="+minXPlane.evaluate(p)+" maxx="+maxXPlane.evaluate(p)+" miny="+minYPlane.evaluate(p)+" maxy="+maxYPlane.evaluate(p)+" minz="+minZPlane.evaluate(p)+" maxz="+maxZPlane.evaluate(p)); + System.err.println(" path edge point "+p+" isWithin()? "+isWithin(p)+" minx="+minXPlane.evaluate(p) + +" maxx="+maxXPlane.evaluate(p)+" miny="+minYPlane.evaluate(p)+" maxy="+maxYPlane.evaluate(p) + +" minz="+minZPlane.evaluate(p)+" maxz="+maxZPlane.evaluate(p)); } */ - - //System.err.println(this+" getrelationship with "+path); + + // System.err.println(this+" getrelationship with " + path); final int insideRectangle = isShapeInsideArea(path); if (insideRectangle == SOME_INSIDE) { - //System.err.println(" some shape points inside area"); + // System.err.println(" some shape points inside area"); return OVERLAPS; } // Figure out if the entire XYZArea is contained by the shape. final int insideShape = isAreaInsideShape(path); if (insideShape == SOME_INSIDE) { - //System.err.println(" some area points inside shape"); + // System.err.println(" some area points inside shape"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE && insideShape == ALL_INSIDE) { - //System.err.println(" inside of each other"); + // System.err.println(" inside of each other"); return OVERLAPS; } - if ((minXPlaneIntersects && path.intersects(minXPlane, notableMinXPoints, maxXPlane, minYPlane, maxYPlane, minZPlane, maxZPlane)) || - (maxXPlaneIntersects && path.intersects(maxXPlane, notableMaxXPoints, minXPlane, minYPlane, maxYPlane, minZPlane, maxZPlane)) || - (minYPlaneIntersects && path.intersects(minYPlane, notableMinYPoints, maxYPlane, minXPlane, maxXPlane, minZPlane, maxZPlane)) || - (maxYPlaneIntersects && path.intersects(maxYPlane, notableMaxYPoints, minYPlane, minXPlane, maxXPlane, minZPlane, maxZPlane)) || - (minZPlaneIntersects && path.intersects(minZPlane, notableMinZPoints, maxZPlane, minXPlane, maxXPlane, minYPlane, maxYPlane)) || - (maxZPlaneIntersects && path.intersects(maxZPlane, notableMaxZPoints, minZPlane, minXPlane, maxXPlane, minYPlane, maxYPlane))) { - //System.err.println(" edges intersect"); + if ((minXPlaneIntersects + && path.intersects( + minXPlane, + notableMinXPoints, + maxXPlane, + minYPlane, + maxYPlane, + minZPlane, + maxZPlane)) + || (maxXPlaneIntersects + && path.intersects( + maxXPlane, + notableMaxXPoints, + minXPlane, + minYPlane, + maxYPlane, + minZPlane, + maxZPlane)) + || (minYPlaneIntersects + && path.intersects( + minYPlane, + notableMinYPoints, + maxYPlane, + minXPlane, + maxXPlane, + minZPlane, + maxZPlane)) + || (maxYPlaneIntersects + && path.intersects( + maxYPlane, + notableMaxYPoints, + minYPlane, + minXPlane, + maxXPlane, + minZPlane, + maxZPlane)) + || (minZPlaneIntersects + && path.intersects( + minZPlane, + notableMinZPoints, + maxZPlane, + minXPlane, + maxXPlane, + minYPlane, + maxYPlane)) + || (maxZPlaneIntersects + && path.intersects( + maxZPlane, + notableMaxZPoints, + minZPlane, + minXPlane, + maxXPlane, + minYPlane, + maxYPlane))) { + // System.err.println(" edges intersect"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE) { - //System.err.println(" all shape points inside area"); + // System.err.println(" all shape points inside area"); return WITHIN; } if (insideShape == ALL_INSIDE) { - //System.err.println(" all area points inside shape"); + // System.err.println(" all area points inside shape"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override public boolean equals(Object o) { - if (!(o instanceof StandardXYZSolid)) + if (!(o instanceof StandardXYZSolid)) { return false; + } StandardXYZSolid other = (StandardXYZSolid) o; - if (!super.equals(other) || - other.isWholeWorld != isWholeWorld) { + if (!super.equals(other) || other.isWholeWorld != isWholeWorld) { return false; } if (!isWholeWorld) { - return other.minXPlane.equals(minXPlane) && - other.maxXPlane.equals(maxXPlane) && - other.minYPlane.equals(minYPlane) && - other.maxYPlane.equals(maxYPlane) && - other.minZPlane.equals(minZPlane) && - other.maxZPlane.equals(maxZPlane); + return other.minXPlane.equals(minXPlane) + && other.maxXPlane.equals(maxXPlane) + && other.minYPlane.equals(minYPlane) + && other.maxYPlane.equals(maxYPlane) + && other.minZPlane.equals(minZPlane) + && other.maxZPlane.equals(maxZPlane); } return true; } @@ -486,22 +614,36 @@ class StandardXYZSolid extends BaseXYZSolid { @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + (isWholeWorld?1:0); + result = 31 * result + (isWholeWorld ? 1 : 0); if (!isWholeWorld) { - result = 31 * result + minXPlane.hashCode(); - result = 31 * result + maxXPlane.hashCode(); - result = 31 * result + minYPlane.hashCode(); - result = 31 * result + maxYPlane.hashCode(); - result = 31 * result + minZPlane.hashCode(); - result = 31 * result + maxZPlane.hashCode(); + result = 31 * result + minXPlane.hashCode(); + result = 31 * result + maxXPlane.hashCode(); + result = 31 * result + minYPlane.hashCode(); + result = 31 * result + maxYPlane.hashCode(); + result = 31 * result + minZPlane.hashCode(); + result = 31 * result + maxZPlane.hashCode(); } return result; } @Override public String toString() { - return "StandardXYZSolid: {planetmodel="+planetModel+", isWholeWorld="+isWholeWorld+", minXplane="+minXPlane+", maxXplane="+maxXPlane+", minYplane="+minYPlane+", maxYplane="+maxYPlane+", minZplane="+minZPlane+", maxZplane="+maxZPlane+"}"; + return "StandardXYZSolid: {planetmodel=" + + planetModel + + ", isWholeWorld=" + + isWholeWorld + + ", minXplane=" + + minXPlane + + ", maxXplane=" + + maxXPlane + + ", minYplane=" + + minYPlane + + ", maxYplane=" + + maxYPlane + + ", minZplane=" + + minZPlane + + ", maxZplane=" + + maxZPlane + + "}"; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Tools.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Tools.java index e8ee29e530e..a3a070af431 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Tools.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Tools.java @@ -22,20 +22,18 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class Tools { - private Tools() { - } + private Tools() {} /** - * Java acos yields a NAN if you take an arc-cos of an - * angle that's just a tiny bit greater than 1.0, so - * here's a more resilient version. + * Java acos yields a NAN if you take an arc-cos of an angle that's just a tiny bit greater than + * 1.0, so here's a more resilient version. */ public static double safeAcos(double value) { - if (value > 1.0) + if (value > 1.0) { value = 1.0; - else if (value < -1.0) + } else if (value < -1.0) { value = -1.0; + } return Math.acos(value); } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Vector.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Vector.java index 46605c13d8a..b0b15c8fac8 100755 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Vector.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Vector.java @@ -17,29 +17,20 @@ package org.apache.lucene.spatial3d.geom; /** - * A 3d vector in space, not necessarily - * going through the origin. + * A 3d vector in space, not necessarily going through the origin. * * @lucene.experimental */ public class Vector { - /** - * Values that are all considered to be essentially zero have a magnitude - * less than this. - */ + /** Values that are all considered to be essentially zero have a magnitude less than this. */ public static final double MINIMUM_RESOLUTION = 1.0e-12; - /** - * Angular version of minimum resolution. - */ + /** Angular version of minimum resolution. */ public static final double MINIMUM_ANGULAR_RESOLUTION = Math.PI * MINIMUM_RESOLUTION; - /** - * For squared quantities, the bound is squared too. - */ + /** For squared quantities, the bound is squared too. */ public static final double MINIMUM_RESOLUTION_SQUARED = MINIMUM_RESOLUTION * MINIMUM_RESOLUTION; - /** - * For cubed quantities, cube the bound. - */ - public static final double MINIMUM_RESOLUTION_CUBED = MINIMUM_RESOLUTION_SQUARED * MINIMUM_RESOLUTION; + /** For cubed quantities, cube the bound. */ + public static final double MINIMUM_RESOLUTION_CUBED = + MINIMUM_RESOLUTION_SQUARED * MINIMUM_RESOLUTION; /** The x value */ public final double x; @@ -49,16 +40,17 @@ public class Vector { public final double z; /** - * Gram-Schmidt convergence envelope is a bit smaller than we really need because we don't want the math to fail afterwards in - * other places. - */ + * Gram-Schmidt convergence envelope is a bit smaller than we really need because we don't want + * the math to fail afterwards in other places. + */ private static final double MINIMUM_GRAM_SCHMIDT_ENVELOPE = MINIMUM_RESOLUTION * 0.5; - + /** * Construct from (U.S.) x,y,z coordinates. - *@param x is the x value. - *@param y is the y value. - *@param z is the z value. + * + * @param x is the x value. + * @param y is the y value. + * @param z is the z value. */ public Vector(double x, double y, double z) { this.x = x; @@ -67,13 +59,11 @@ public class Vector { } /** - * Construct a vector that is perpendicular to - * two other (non-zero) vectors. If the vectors are parallel, - * IllegalArgumentException will be thrown. - * Produces a normalized final vector. + * Construct a vector that is perpendicular to two other (non-zero) vectors. If the vectors are + * parallel, IllegalArgumentException will be thrown. Produces a normalized final vector. * * @param A is the first vector - * @param BX is the X value of the second + * @param BX is the X value of the second * @param BY is the Y value of the second * @param BZ is the Z value of the second */ @@ -81,42 +71,46 @@ public class Vector { // We're really looking at two vectors and computing a perpendicular one from that. this(A.x, A.y, A.z, BX, BY, BZ); } - + /** - * Construct a vector that is perpendicular to - * two other (non-zero) vectors. If the vectors are parallel, - * IllegalArgumentException will be thrown. - * Produces a normalized final vector. + * Construct a vector that is perpendicular to two other (non-zero) vectors. If the vectors are + * parallel, IllegalArgumentException will be thrown. Produces a normalized final vector. * - * @param AX is the X value of the first + * @param AX is the X value of the first * @param AY is the Y value of the first * @param AZ is the Z value of the first - * @param BX is the X value of the second + * @param BX is the X value of the second * @param BY is the Y value of the second * @param BZ is the Z value of the second */ - public Vector(final double AX, final double AY, final double AZ, final double BX, final double BY, final double BZ) { + public Vector( + final double AX, + final double AY, + final double AZ, + final double BX, + final double BY, + final double BZ) { // We're really looking at two vectors and computing a perpendicular one from that. - // We can think of this as having three points -- the origin, and two points that aren't the origin. - // Normally, we can compute the perpendicular vector this way: + // We can think of this as having three points -- the origin, and two points that aren't the + // origin. Normally, we can compute the perpendicular vector this way: // x = u2v3 - u3v2 // y = u3v1 - u1v3 // z = u1v2 - u2v1 - // Sometimes that produces a plane that does not contain the original three points, however, due to - // numerical precision issues. Then we continue making the answer more precise using the + // Sometimes that produces a plane that does not contain the original three points, however, due + // to numerical precision issues. Then we continue making the answer more precise using the // Gram-Schmidt process: https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process - + // Compute the naive perpendicular final double thisX = AY * BZ - AZ * BY; final double thisY = AZ * BX - AX * BZ; final double thisZ = AX * BY - AY * BX; - + final double magnitude = magnitude(thisX, thisY, thisZ); if (magnitude == 0.0) { throw new IllegalArgumentException("Degenerate/parallel vector constructed"); } - final double inverseMagnitude = 1.0/magnitude; - + final double inverseMagnitude = 1.0 / magnitude; + double normalizeX = thisX * inverseMagnitude; double normalizeY = thisY * inverseMagnitude; double normalizeZ = thisZ * inverseMagnitude; @@ -128,7 +122,8 @@ public class Vector { while (true) { final double currentDotProdA = AX * normalizeX + AY * normalizeY + AZ * normalizeZ; final double currentDotProdB = BX * normalizeX + BY * normalizeY + BZ * normalizeZ; - if (Math.abs(currentDotProdA) < MINIMUM_GRAM_SCHMIDT_ENVELOPE && Math.abs(currentDotProdB) < MINIMUM_GRAM_SCHMIDT_ENVELOPE) { + if (Math.abs(currentDotProdA) < MINIMUM_GRAM_SCHMIDT_ENVELOPE + && Math.abs(currentDotProdB) < MINIMUM_GRAM_SCHMIDT_ENVELOPE) { break; } // Converge on the one that has largest dot product @@ -158,10 +153,11 @@ public class Vector { normalizeX = normalizeX * inverseCorrectedMagnitude; normalizeY = normalizeY * inverseCorrectedMagnitude; normalizeZ = normalizeZ * inverseCorrectedMagnitude; - //This is probably not needed as the method seems to converge - //quite quickly. But it is safer to have a way out. + // This is probably not needed as the method seems to converge + // quite quickly. But it is safer to have a way out. if (i++ > 10) { - throw new IllegalArgumentException("Plane could not be constructed! Could not find a normal vector."); + throw new IllegalArgumentException( + "Plane could not be constructed! Could not find a normal vector."); } } this.x = normalizeX; @@ -170,10 +166,8 @@ public class Vector { } /** - * Construct a vector that is perpendicular to - * two other (non-zero) vectors. If the vectors are parallel, - * IllegalArgumentException will be thrown. - * Produces a normalized final vector. + * Construct a vector that is perpendicular to two other (non-zero) vectors. If the vectors are + * parallel, IllegalArgumentException will be thrown. Produces a normalized final vector. * * @param A is the first vector * @param B is the second @@ -182,17 +176,15 @@ public class Vector { this(A, B.x, B.y, B.z); } - /** Compute a magnitude of an x,y,z value. - */ + /** Compute a magnitude of an x,y,z value. */ public static double magnitude(final double x, final double y, final double z) { - return Math.sqrt(x*x + y*y + z*z); + return Math.sqrt(x * x + y * y + z * z); } - + /** * Compute a normalized unit vector based on the current vector. * - * @return the normalized vector, or null if the current vector has - * a magnitude of zero. + * @return the normalized vector, or null if the current vector has a magnitude of zero. */ public Vector normalize() { double denom = magnitude(); @@ -204,28 +196,29 @@ public class Vector { } /** - * Evaluate the cross product of two vectors against a point. - * If the dot product of the resultant vector resolves to "zero", then - * return true. - * @param A is the first vector to use for the cross product. - * @param B is the second vector to use for the cross product. - * @param point is the point to evaluate. - * @return true if we get a zero dot product. - */ - public static boolean crossProductEvaluateIsZero(final Vector A, final Vector B, final Vector point) { + * Evaluate the cross product of two vectors against a point. If the dot product of the resultant + * vector resolves to "zero", then return true. + * + * @param A is the first vector to use for the cross product. + * @param B is the second vector to use for the cross product. + * @param point is the point to evaluate. + * @return true if we get a zero dot product. + */ + public static boolean crossProductEvaluateIsZero( + final Vector A, final Vector B, final Vector point) { // Include Gram-Schmidt in-line so we avoid creating objects unnecessarily // Compute the naive perpendicular final double thisX = A.y * B.z - A.z * B.y; final double thisY = A.z * B.x - A.x * B.z; final double thisZ = A.x * B.y - A.y * B.x; - + final double magnitude = magnitude(thisX, thisY, thisZ); if (magnitude == 0.0) { return true; } - - final double inverseMagnitude = 1.0/magnitude; - + + final double inverseMagnitude = 1.0 / magnitude; + double normalizeX = thisX * inverseMagnitude; double normalizeY = thisY * inverseMagnitude; double normalizeZ = thisZ * inverseMagnitude; @@ -237,7 +230,8 @@ public class Vector { while (true) { final double currentDotProdA = A.x * normalizeX + A.y * normalizeY + A.z * normalizeZ; final double currentDotProdB = B.x * normalizeX + B.y * normalizeY + B.z * normalizeZ; - if (Math.abs(currentDotProdA) < MINIMUM_GRAM_SCHMIDT_ENVELOPE && Math.abs(currentDotProdB) < MINIMUM_GRAM_SCHMIDT_ENVELOPE) { + if (Math.abs(currentDotProdA) < MINIMUM_GRAM_SCHMIDT_ENVELOPE + && Math.abs(currentDotProdB) < MINIMUM_GRAM_SCHMIDT_ENVELOPE) { break; } // Converge on the one that has largest dot product @@ -267,13 +261,15 @@ public class Vector { normalizeX = normalizeX * inverseCorrectedMagnitude; normalizeY = normalizeY * inverseCorrectedMagnitude; normalizeZ = normalizeZ * inverseCorrectedMagnitude; - //This is probably not needed as the method seems to converge - //quite quickly. But it is safer to have a way out. + // This is probably not needed as the method seems to converge + // quite quickly. But it is safer to have a way out. if (i++ > 10) { - throw new IllegalArgumentException("Plane could not be constructed! Could not find a normal vector."); + throw new IllegalArgumentException( + "Plane could not be constructed! Could not find a normal vector."); } } - return Math.abs(normalizeX * point.x + normalizeY * point.y + normalizeZ * point.z) < MINIMUM_RESOLUTION; + return Math.abs(normalizeX * point.x + normalizeY * point.y + normalizeZ * point.z) + < MINIMUM_RESOLUTION; } /** @@ -299,85 +295,69 @@ public class Vector { } /** - * Determine if this vector, taken from the origin, - * describes a point within a set of planes. + * Determine if this vector, taken from the origin, describes a point within a set of planes. * - * @param bounds is the first part of the set of planes. + * @param bounds is the first part of the set of planes. * @param moreBounds is the second part of the set of planes. * @return true if the point is within the bounds. */ public boolean isWithin(final Membership[] bounds, final Membership... moreBounds) { // Return true if the point described is within all provided bounds - //System.err.println(" checking if "+this+" is within bounds"); + // System.err.println(" checking if " + this + " is within bounds"); for (final Membership bound : bounds) { if (bound != null && !bound.isWithin(this)) { - //System.err.println(" NOT within "+bound); + // System.err.println(" NOT within "+bound); return false; } } for (final Membership bound : moreBounds) { if (bound != null && !bound.isWithin(this)) { - //System.err.println(" NOT within "+bound); + // System.err.println(" NOT within " + bound); return false; } } - //System.err.println(" is within"); + // System.err.println(" is within"); return true; } - /** - * Translate vector. - */ + /** Translate vector. */ public Vector translate(final double xOffset, final double yOffset, final double zOffset) { return new Vector(x - xOffset, y - yOffset, z - zOffset); } - /** - * Rotate vector counter-clockwise in x-y by an angle. - */ + /** Rotate vector counter-clockwise in x-y by an angle. */ public Vector rotateXY(final double angle) { return rotateXY(Math.sin(angle), Math.cos(angle)); } - /** - * Rotate vector counter-clockwise in x-y by an angle, expressed as sin and cos. - */ + /** Rotate vector counter-clockwise in x-y by an angle, expressed as sin and cos. */ public Vector rotateXY(final double sinAngle, final double cosAngle) { return new Vector(x * cosAngle - y * sinAngle, x * sinAngle + y * cosAngle, z); } - /** - * Rotate vector counter-clockwise in x-z by an angle. - */ + /** Rotate vector counter-clockwise in x-z by an angle. */ public Vector rotateXZ(final double angle) { return rotateXZ(Math.sin(angle), Math.cos(angle)); } - /** - * Rotate vector counter-clockwise in x-z by an angle, expressed as sin and cos. - */ + /** Rotate vector counter-clockwise in x-z by an angle, expressed as sin and cos. */ public Vector rotateXZ(final double sinAngle, final double cosAngle) { return new Vector(x * cosAngle - z * sinAngle, y, x * sinAngle + z * cosAngle); } - /** - * Rotate vector counter-clockwise in z-y by an angle. - */ + /** Rotate vector counter-clockwise in z-y by an angle. */ public Vector rotateZY(final double angle) { return rotateZY(Math.sin(angle), Math.cos(angle)); } - /** - * Rotate vector counter-clockwise in z-y by an angle, expressed as sin and cos. - */ + /** Rotate vector counter-clockwise in z-y by an angle, expressed as sin and cos. */ public Vector rotateZY(final double sinAngle, final double cosAngle) { return new Vector(x, z * sinAngle + y * cosAngle, z * cosAngle - y * sinAngle); } /** - * Compute the square of a straight-line distance to a point described by the - * vector taken from the origin. - * Monotonically increasing for arc distances up to PI. + * Compute the square of a straight-line distance to a point described by the vector taken from + * the origin. Monotonically increasing for arc distances up to PI. * * @param v is the vector to compute a distance to. * @return the square of the linear distance. @@ -390,9 +370,8 @@ public class Vector { } /** - * Compute the square of a straight-line distance to a point described by the - * vector taken from the origin. - * Monotonically increasing for arc distances up to PI. + * Compute the square of a straight-line distance to a point described by the vector taken from + * the origin. Monotonically increasing for arc distances up to PI. * * @param x is the x part of the vector to compute a distance to. * @param y is the y part of the vector to compute a distance to. @@ -407,8 +386,7 @@ public class Vector { } /** - * Compute the straight-line distance to a point described by the - * vector taken from the origin. + * Compute the straight-line distance to a point described by the vector taken from the origin. * Monotonically increasing for arc distances up to PI. * * @param v is the vector to compute a distance to. @@ -419,8 +397,7 @@ public class Vector { } /** - * Compute the straight-line distance to a point described by the - * vector taken from the origin. + * Compute the straight-line distance to a point described by the vector taken from the origin. * Monotonically increasing for arc distances up to PI. * * @param x is the x part of the vector to compute a distance to. @@ -433,9 +410,8 @@ public class Vector { } /** - * Compute the square of the normal distance to a vector described by a - * vector taken from the origin. - * Monotonically increasing for arc distances up to PI/2. + * Compute the square of the normal distance to a vector described by a vector taken from the + * origin. Monotonically increasing for arc distances up to PI/2. * * @param v is the vector to compute a distance to. * @return the square of the normal distance. @@ -449,9 +425,8 @@ public class Vector { } /** - * Compute the square of the normal distance to a vector described by a - * vector taken from the origin. - * Monotonically increasing for arc distances up to PI/2. + * Compute the square of the normal distance to a vector described by a vector taken from the + * origin. Monotonically increasing for arc distances up to PI/2. * * @param x is the x part of the vector to compute a distance to. * @param y is the y part of the vector to compute a distance to. @@ -467,9 +442,8 @@ public class Vector { } /** - * Compute the normal (perpendicular) distance to a vector described by a - * vector taken from the origin. - * Monotonically increasing for arc distances up to PI/2. + * Compute the normal (perpendicular) distance to a vector described by a vector taken from the + * origin. Monotonically increasing for arc distances up to PI/2. * * @param v is the vector to compute a distance to. * @return the normal distance. @@ -479,9 +453,8 @@ public class Vector { } /** - * Compute the normal (perpendicular) distance to a vector described by a - * vector taken from the origin. - * Monotonically increasing for arc distances up to PI/2. + * Compute the normal (perpendicular) distance to a vector described by a vector taken from the + * origin. Monotonically increasing for arc distances up to PI/2. * * @param x is the x part of the vector to compute a distance to. * @param y is the y part of the vector to compute a distance to. @@ -498,17 +471,19 @@ public class Vector { * @return the magnitude. */ public double magnitude() { - return magnitude(x,y,z); + return magnitude(x, y, z); } /** * Compute whether two vectors are numerically identical. + * * @param otherX is the other vector X. * @param otherY is the other vector Y. * @param otherZ is the other vector Z. * @return true if they are numerically identical. */ - public boolean isNumericallyIdentical(final double otherX, final double otherY, final double otherZ) { + public boolean isNumericallyIdentical( + final double otherX, final double otherY, final double otherZ) { final double deltaX = x - otherX; final double deltaY = y - otherY; final double deltaZ = z - otherZ; @@ -517,6 +492,7 @@ public class Vector { /** * Compute whether two vectors are numerically identical. + * * @param other is the other vector. * @return true if they are numerically identical. */ @@ -529,6 +505,7 @@ public class Vector { /** * Compute whether two vectors are parallel. + * * @param otherX is the other vector X. * @param otherY is the other vector Y. * @param otherZ is the other vector Z. @@ -543,6 +520,7 @@ public class Vector { /** * Compute whether two vectors are numerically identical. + * * @param other is the other vector. * @return true if they are parallel. */ @@ -553,32 +531,46 @@ public class Vector { return thisX * thisX + thisY * thisY + thisZ * thisZ < MINIMUM_RESOLUTION_SQUARED; } - /** Compute the desired magnitude of a unit vector projected to a given - * planet model. + /** + * Compute the desired magnitude of a unit vector projected to a given planet model. + * * @param planetModel is the planet model. * @param x is the unit vector x value. * @param y is the unit vector y value. * @param z is the unit vector z value. - * @return a magnitude value for that (x,y,z) that projects the vector onto the specified ellipsoid. + * @return a magnitude value for that (x,y,z) that projects the vector onto the specified + * ellipsoid. */ - static double computeDesiredEllipsoidMagnitude(final PlanetModel planetModel, final double x, final double y, final double z) { - return 1.0 / Math.sqrt(x*x*planetModel.inverseXYScalingSquared + y*y*planetModel.inverseXYScalingSquared + z*z*planetModel.inverseZScalingSquared); + static double computeDesiredEllipsoidMagnitude( + final PlanetModel planetModel, final double x, final double y, final double z) { + return 1.0 + / Math.sqrt( + x * x * planetModel.inverseXYScalingSquared + + y * y * planetModel.inverseXYScalingSquared + + z * z * planetModel.inverseZScalingSquared); } - /** Compute the desired magnitude of a unit vector projected to a given - * planet model. The unit vector is specified only by a z value. + /** + * Compute the desired magnitude of a unit vector projected to a given planet model. The unit + * vector is specified only by a z value. + * * @param planetModel is the planet model. * @param z is the unit vector z value. - * @return a magnitude value for that z value that projects the vector onto the specified ellipsoid. + * @return a magnitude value for that z value that projects the vector onto the specified + * ellipsoid. */ static double computeDesiredEllipsoidMagnitude(final PlanetModel planetModel, final double z) { - return 1.0 / Math.sqrt((1.0-z*z)*planetModel.inverseXYScalingSquared + z*z*planetModel.inverseZScalingSquared); + return 1.0 + / Math.sqrt( + (1.0 - z * z) * planetModel.inverseXYScalingSquared + + z * z * planetModel.inverseZScalingSquared); } @Override public boolean equals(Object o) { - if (!(o instanceof Vector)) + if (!(o instanceof Vector)) { return false; + } Vector other = (Vector) o; return (other.x == x && other.y == y && other.z == z); } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZBounds.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZBounds.java index 0b0b701ce6f..c675de11f1a 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZBounds.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZBounds.java @@ -23,15 +23,15 @@ package org.apache.lucene.spatial3d.geom; */ public class XYZBounds implements Bounds { - /** A 'fudge factor', which is added to maximums and subtracted from minimums, - * in order to compensate for potential error deltas. This would not be necessary - * except that our 'bounds' is defined as always equaling or exceeding the boundary - * of the shape, and we cannot guarantee that without making MINIMUM_RESOLUTION - * unacceptably large. - * Also, see LUCENE-7290 for a description of how geometry can magnify the bounds delta. + /** + * A 'fudge factor', which is added to maximums and subtracted from minimums, in order to + * compensate for potential error deltas. This would not be necessary except that our 'bounds' is + * defined as always equaling or exceeding the boundary of the shape, and we cannot guarantee that + * without making MINIMUM_RESOLUTION unacceptably large. Also, see LUCENE-7290 for a description + * of how geometry can magnify the bounds delta. */ private static final double FUDGE_FACTOR = Vector.MINIMUM_RESOLUTION * 1e3; - + /** Minimum x */ private Double minX = null; /** Maximum x */ @@ -44,7 +44,7 @@ public class XYZBounds implements Bounds { private Double minZ = null; /** Maximum z */ private Double maxZ = null; - + /** Set to true if no longitude bounds can be stated */ private boolean noLongitudeBound = false; /** Set to true if no top latitude bound can be stated */ @@ -53,144 +53,178 @@ public class XYZBounds implements Bounds { private boolean noBottomLatitudeBound = false; /** Construct an empty bounds object */ - public XYZBounds() { - } + public XYZBounds() {} // Accessor methods - - /** Return the minimum X value. - *@return minimum X value. + + /** + * Return the minimum X value. + * + * @return minimum X value. */ public Double getMinimumX() { return minX; } - /** Return the maximum X value. - *@return maximum X value. + /** + * Return the maximum X value. + * + * @return maximum X value. */ public Double getMaximumX() { return maxX; } - /** Return the minimum Y value. - *@return minimum Y value. + /** + * Return the minimum Y value. + * + * @return minimum Y value. */ public Double getMinimumY() { return minY; } - /** Return the maximum Y value. - *@return maximum Y value. + /** + * Return the maximum Y value. + * + * @return maximum Y value. */ public Double getMaximumY() { return maxY; } - - /** Return the minimum Z value. - *@return minimum Z value. + + /** + * Return the minimum Z value. + * + * @return minimum Z value. */ public Double getMinimumZ() { return minZ; } - /** Return the maximum Z value. - *@return maximum Z value. + /** + * Return the maximum Z value. + * + * @return maximum Z value. */ public Double getMaximumZ() { return maxZ; } - /** Return true if minX is as small as the planet model allows. - *@return true if minX has reached its bound. + /** + * Return true if minX is as small as the planet model allows. + * + * @return true if minX has reached its bound. */ public boolean isSmallestMinX(final PlanetModel planetModel) { - if (minX == null) + if (minX == null) { return false; + } return minX - planetModel.getMinimumXValue() < Vector.MINIMUM_RESOLUTION; } - - /** Return true if maxX is as large as the planet model allows. - *@return true if maxX has reached its bound. + + /** + * Return true if maxX is as large as the planet model allows. + * + * @return true if maxX has reached its bound. */ public boolean isLargestMaxX(final PlanetModel planetModel) { - if (maxX == null) + if (maxX == null) { return false; + } return planetModel.getMaximumXValue() - maxX < Vector.MINIMUM_RESOLUTION; } - /** Return true if minY is as small as the planet model allows. - *@return true if minY has reached its bound. + /** + * Return true if minY is as small as the planet model allows. + * + * @return true if minY has reached its bound. */ public boolean isSmallestMinY(final PlanetModel planetModel) { - if (minY == null) + if (minY == null) { return false; + } return minY - planetModel.getMinimumYValue() < Vector.MINIMUM_RESOLUTION; } - - /** Return true if maxY is as large as the planet model allows. - *@return true if maxY has reached its bound. + + /** + * Return true if maxY is as large as the planet model allows. + * + * @return true if maxY has reached its bound. */ public boolean isLargestMaxY(final PlanetModel planetModel) { - if (maxY == null) + if (maxY == null) { return false; + } return planetModel.getMaximumYValue() - maxY < Vector.MINIMUM_RESOLUTION; } - - /** Return true if minZ is as small as the planet model allows. - *@return true if minZ has reached its bound. + + /** + * Return true if minZ is as small as the planet model allows. + * + * @return true if minZ has reached its bound. */ public boolean isSmallestMinZ(final PlanetModel planetModel) { - if (minZ == null) + if (minZ == null) { return false; + } return minZ - planetModel.getMinimumZValue() < Vector.MINIMUM_RESOLUTION; } - - /** Return true if maxZ is as large as the planet model allows. - *@return true if maxZ has reached its bound. + + /** + * Return true if maxZ is as large as the planet model allows. + * + * @return true if maxZ has reached its bound. */ public boolean isLargestMaxZ(final PlanetModel planetModel) { - if (maxZ == null) + if (maxZ == null) { return false; + } return planetModel.getMaximumZValue() - maxZ < Vector.MINIMUM_RESOLUTION; } // Modification methods - + @Override - public Bounds addPlane(final PlanetModel planetModel, final Plane plane, final Membership... bounds) { + public Bounds addPlane( + final PlanetModel planetModel, final Plane plane, final Membership... bounds) { plane.recordBounds(planetModel, this, bounds); return this; } - /** Add a horizontal plane to the bounds description. - * This method should EITHER use the supplied latitude, OR use the supplied - * plane, depending on what is most efficient. - *@param planetModel is the planet model. - *@param latitude is the latitude. - *@param horizontalPlane is the plane. - *@param bounds are the constraints on the plane. - *@return updated Bounds object. + /** + * Add a horizontal plane to the bounds description. This method should EITHER use the supplied + * latitude, OR use the supplied plane, depending on what is most efficient. + * + * @param planetModel is the planet model. + * @param latitude is the latitude. + * @param horizontalPlane is the plane. + * @param bounds are the constraints on the plane. + * @return updated Bounds object. */ - public Bounds addHorizontalPlane(final PlanetModel planetModel, - final double latitude, - final Plane horizontalPlane, - final Membership... bounds) { + public Bounds addHorizontalPlane( + final PlanetModel planetModel, + final double latitude, + final Plane horizontalPlane, + final Membership... bounds) { return addPlane(planetModel, horizontalPlane, bounds); } - - /** Add a vertical plane to the bounds description. - * This method should EITHER use the supplied longitude, OR use the supplied - * plane, depending on what is most efficient. - *@param planetModel is the planet model. - *@param longitude is the longitude. - *@param verticalPlane is the plane. - *@param bounds are the constraints on the plane. - *@return updated Bounds object. + + /** + * Add a vertical plane to the bounds description. This method should EITHER use the supplied + * longitude, OR use the supplied plane, depending on what is most efficient. + * + * @param planetModel is the planet model. + * @param longitude is the longitude. + * @param verticalPlane is the plane. + * @param bounds are the constraints on the plane. + * @return updated Bounds object. */ - public Bounds addVerticalPlane(final PlanetModel planetModel, - final double longitude, - final Plane verticalPlane, - final Membership... bounds) { + public Bounds addVerticalPlane( + final PlanetModel planetModel, + final double longitude, + final Plane verticalPlane, + final Membership... bounds) { return addPlane(planetModel, verticalPlane, bounds); } @@ -198,8 +232,10 @@ public class XYZBounds implements Bounds { public Bounds addXValue(final GeoPoint point) { return addXValue(point.x); } - - /** Add a specific X value. + + /** + * Add a specific X value. + * * @param x is the value to add. * @return the bounds object. */ @@ -219,8 +255,10 @@ public class XYZBounds implements Bounds { public Bounds addYValue(final GeoPoint point) { return addYValue(point.y); } - - /** Add a specific Y value. + + /** + * Add a specific Y value. + * * @param y is the value to add. * @return the bounds object. */ @@ -240,8 +278,10 @@ public class XYZBounds implements Bounds { public Bounds addZValue(final GeoPoint point) { return addZValue(point.z); } - - /** Add a specific Z value. + + /** + * Add a specific Z value. + * * @param z is the value to add. * @return the bounds object. */ @@ -258,7 +298,11 @@ public class XYZBounds implements Bounds { } @Override - public Bounds addIntersection(final PlanetModel planetModel, final Plane plane1, final Plane plane2, final Membership... bounds) { + public Bounds addIntersection( + final PlanetModel planetModel, + final Plane plane1, + final Plane plane2, + final Membership... bounds) { plane1.recordBounds(planetModel, this, plane2, bounds); return this; } @@ -305,7 +349,18 @@ public class XYZBounds implements Bounds { @Override public String toString() { - return "XYZBounds: [xmin="+minX+" xmax="+maxX+" ymin="+minY+" ymax="+maxY+" zmin="+minZ+" zmax="+maxZ+"]"; + return "XYZBounds: [xmin=" + + minX + + " xmax=" + + maxX + + " ymin=" + + minY + + " ymax=" + + maxY + + " zmin=" + + minZ + + " zmax=" + + maxZ + + "]"; } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZSolid.java index cceb11083bf..ee2dd2352c6 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZSolid.java @@ -21,6 +21,4 @@ package org.apache.lucene.spatial3d.geom; * * @lucene.internal */ -public interface XYZSolid extends GeoArea, PlanetObject { -} - +public interface XYZSolid extends GeoArea, PlanetObject {} diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZSolidFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZSolidFactory.java index 14b9bb62a8a..79b68a4797f 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZSolidFactory.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZSolidFactory.java @@ -22,11 +22,11 @@ package org.apache.lucene.spatial3d.geom; * @lucene.experimental */ public class XYZSolidFactory { - private XYZSolidFactory() { - } + private XYZSolidFactory() {} /** * Create a XYZSolid of the right kind given (x,y,z) bounds. + * * @param planetModel is the planet model * @param minX is the min X boundary * @param maxX is the max X boundary @@ -35,45 +35,57 @@ public class XYZSolidFactory { * @param minZ is the min Z boundary * @param maxZ is the max Z boundary */ - public static XYZSolid makeXYZSolid(final PlanetModel planetModel, final double minX, final double maxX, final double minY, final double maxY, final double minZ, final double maxZ) { + public static XYZSolid makeXYZSolid( + final PlanetModel planetModel, + final double minX, + final double maxX, + final double minY, + final double maxY, + final double minZ, + final double maxZ) { if (Math.abs(maxX - minX) < Vector.MINIMUM_RESOLUTION) { if (Math.abs(maxY - minY) < Vector.MINIMUM_RESOLUTION) { if (Math.abs(maxZ - minZ) < Vector.MINIMUM_RESOLUTION) { - return new dXdYdZSolid(planetModel, (minX+maxX) * 0.5, (minY+maxY) * 0.5, minZ); + return new dXdYdZSolid(planetModel, (minX + maxX) * 0.5, (minY + maxY) * 0.5, minZ); } else { - return new dXdYZSolid(planetModel, (minX+maxX) * 0.5, (minY+maxY) * 0.5, minZ, maxZ); + return new dXdYZSolid(planetModel, (minX + maxX) * 0.5, (minY + maxY) * 0.5, minZ, maxZ); } } else { if (Math.abs(maxZ - minZ) < Vector.MINIMUM_RESOLUTION) { - return new dXYdZSolid(planetModel, (minX+maxX) * 0.5, minY, maxY, (minZ+maxZ) * 0.5); + return new dXYdZSolid(planetModel, (minX + maxX) * 0.5, minY, maxY, (minZ + maxZ) * 0.5); } else { - return new dXYZSolid(planetModel, (minX+maxX) * 0.5, minY, maxY, minZ, maxZ); + return new dXYZSolid(planetModel, (minX + maxX) * 0.5, minY, maxY, minZ, maxZ); } } } if (Math.abs(maxY - minY) < Vector.MINIMUM_RESOLUTION) { if (Math.abs(maxZ - minZ) < Vector.MINIMUM_RESOLUTION) { - return new XdYdZSolid(planetModel, minX, maxX, (minY+maxY) * 0.5, (minZ+maxZ) * 0.5); + return new XdYdZSolid(planetModel, minX, maxX, (minY + maxY) * 0.5, (minZ + maxZ) * 0.5); } else { - return new XdYZSolid(planetModel, minX, maxX, (minY+maxY) * 0.5, minZ, maxZ); + return new XdYZSolid(planetModel, minX, maxX, (minY + maxY) * 0.5, minZ, maxZ); } } if (Math.abs(maxZ - minZ) < Vector.MINIMUM_RESOLUTION) { - return new XYdZSolid(planetModel, minX, maxX, minY, maxY, (minZ+maxZ) * 0.5); + return new XYdZSolid(planetModel, minX, maxX, minY, maxY, (minZ + maxZ) * 0.5); } return new StandardXYZSolid(planetModel, minX, maxX, minY, maxY, minZ, maxZ); } - + /** * Create a XYZSolid of the right kind given (x,y,z) bounds. + * * @param planetModel is the planet model * @param bounds is the XYZ bounds object. * @return the solid. */ public static XYZSolid makeXYZSolid(final PlanetModel planetModel, final XYZBounds bounds) { - return makeXYZSolid(planetModel, bounds.getMinimumX(), bounds.getMaximumX(), - bounds.getMinimumY(), bounds.getMaximumY(), - bounds.getMinimumZ(), bounds.getMaximumZ()); + return makeXYZSolid( + planetModel, + bounds.getMinimumX(), + bounds.getMaximumX(), + bounds.getMinimumY(), + bounds.getMaximumY(), + bounds.getMinimumZ(), + bounds.getMaximumZ()); } - } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYdZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYdZSolid.java index f5b2ae6d982..2a0079bfa46 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYdZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYdZSolid.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in Z @@ -48,11 +48,12 @@ class XYdZSolid extends BaseXYZSolid { protected final SidedPlane maxYPlane; /** Z plane */ protected final Plane zPlane; - - /** These are the edge points of the shape, which are defined to be at least one point on - * each surface area boundary. In the case of a solid, this includes points which represent - * the intersection of XYZ bounding planes and the planet, as well as points representing - * the intersection of single bounding planes with the planet itself. + + /** + * These are the edge points of the shape, which are defined to be at least one point on each + * surface area boundary. In the case of a solid, this includes points which represent the + * intersection of XYZ bounding planes and the planet, as well as points representing the + * intersection of single bounding planes with the planet itself. */ protected final GeoPoint[] edgePoints; @@ -62,19 +63,20 @@ class XYdZSolid extends BaseXYZSolid { /** * Sole constructor * - *@param planetModel is the planet model. - *@param minX is the minimum X value. - *@param maxX is the maximum X value. - *@param minY is the minimum Y value. - *@param maxY is the maximum Y value. - *@param Z is the Z value. + * @param planetModel is the planet model. + * @param minX is the minimum X value. + * @param maxX is the maximum X value. + * @param minY is the minimum Y value. + * @param maxY is the maximum Y value. + * @param Z is the Z value. */ - public XYdZSolid(final PlanetModel planetModel, - final double minX, - final double maxX, - final double minY, - final double maxY, - final double Z) { + public XYdZSolid( + final PlanetModel planetModel, + final double minX, + final double maxX, + final double minY, + final double maxY, + final double Z) { super(planetModel); // Argument checking if (maxX - minX < Vector.MINIMUM_RESOLUTION) @@ -90,32 +92,38 @@ class XYdZSolid extends BaseXYZSolid { final double worldMinZ = planetModel.getMinimumZValue(); final double worldMaxZ = planetModel.getMaximumZValue(); - + // Construct the planes - minXPlane = new SidedPlane(maxX,0.0,0.0,xUnitVector,-minX); - maxXPlane = new SidedPlane(minX,0.0,0.0,xUnitVector,-maxX); - minYPlane = new SidedPlane(0.0,maxY,0.0,yUnitVector,-minY); - maxYPlane = new SidedPlane(0.0,minY,0.0,yUnitVector,-maxY); - zPlane = new Plane(zUnitVector,-Z); - + minXPlane = new SidedPlane(maxX, 0.0, 0.0, xUnitVector, -minX); + maxXPlane = new SidedPlane(minX, 0.0, 0.0, xUnitVector, -maxX); + minYPlane = new SidedPlane(0.0, maxY, 0.0, yUnitVector, -minY); + maxYPlane = new SidedPlane(0.0, minY, 0.0, yUnitVector, -maxY); + zPlane = new Plane(zUnitVector, -Z); + // We need at least one point on the planet surface for each manifestation of the shape. // There can be up to 2 (on opposite sides of the world). But we have to go through // 4 combinations of adjacent planes in order to find out if any have 2 intersection solution. - // Typically, this requires 4 square root operations. - final GeoPoint[] minXZ = minXPlane.findIntersections(planetModel,zPlane,maxXPlane,minYPlane,maxYPlane); - final GeoPoint[] maxXZ = maxXPlane.findIntersections(planetModel,zPlane,minXPlane,minYPlane,maxYPlane); - final GeoPoint[] minYZ = minYPlane.findIntersections(planetModel,zPlane,maxYPlane,minXPlane,maxXPlane); - final GeoPoint[] maxYZ = maxYPlane.findIntersections(planetModel,zPlane,minYPlane,minXPlane,maxXPlane); - + // Typically, this requires 4 square root operations. + final GeoPoint[] minXZ = + minXPlane.findIntersections(planetModel, zPlane, maxXPlane, minYPlane, maxYPlane); + final GeoPoint[] maxXZ = + maxXPlane.findIntersections(planetModel, zPlane, minXPlane, minYPlane, maxYPlane); + final GeoPoint[] minYZ = + minYPlane.findIntersections(planetModel, zPlane, maxYPlane, minXPlane, maxXPlane); + final GeoPoint[] maxYZ = + maxYPlane.findIntersections(planetModel, zPlane, minYPlane, minXPlane, maxXPlane); + notableZPoints = glueTogether(minXZ, maxXZ, minYZ, maxYZ); // Now, compute the edge points. - // This is the trickiest part of setting up an XYZSolid. We've computed intersections already, so - // we'll start there. We know that at most there will be two disconnected shapes on the planet surface. - // But there's also a case where exactly one plane slices through the world, and none of the bounding plane - // intersections do. Thus, if we don't find any of the edge intersection cases, we have to look for that last case. - - // If we still haven't encountered anything, we need to look at single-plane/world intersections. + // This is the trickiest part of setting up an XYZSolid. We've computed intersections already, + // so we'll start there. We know that at most there will be two disconnected shapes on the + // planet surface. But there's also a case where exactly one plane slices through the world, + // and none of the bounding plane intersections do. Thus, if we don't find any of the edge + // intersection cases, we have to look for that last case. + + // If we still haven't encountered anything, we need to look at single-plane/world + // intersections. // We detect these by looking at the world model and noting its x, y, and z bounds. // The cases we are looking for are when the four corner points for any given // plane are all outside of the world, AND that plane intersects the world. @@ -126,21 +134,28 @@ class XYdZSolid extends BaseXYZSolid { final boolean maxXmaxYZ = planetModel.pointOutside(maxX, maxY, Z); final GeoPoint[] zEdges; - if (Z - worldMinZ >= -Vector.MINIMUM_RESOLUTION && Z - worldMaxZ <= Vector.MINIMUM_RESOLUTION && - minX < 0.0 && maxX > 0.0 && minY < 0.0 && maxY > 0.0 && - minXminYZ && minXmaxYZ && maxXminYZ && maxXmaxYZ) { + if (Z - worldMinZ >= -Vector.MINIMUM_RESOLUTION + && Z - worldMaxZ <= Vector.MINIMUM_RESOLUTION + && minX < 0.0 + && maxX > 0.0 + && minY < 0.0 + && maxY > 0.0 + && minXminYZ + && minXmaxYZ + && maxXminYZ + && maxXmaxYZ) { // Find any point on the minZ plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. // This plane is vertical and goes through the points (0,0,0) and (1,0,0) // Then use it to compute a sample point. final GeoPoint intPoint = zPlane.getSampleIntersectionPoint(planetModel, xVerticalPlane); if (intPoint != null) { - zEdges = new GeoPoint[]{intPoint}; + zEdges = new GeoPoint[] {intPoint}; } else { zEdges = EMPTY_POINTS; } } else { - zEdges= EMPTY_POINTS; + zEdges = EMPTY_POINTS; } this.edgePoints = glueTogether(minXZ, maxXZ, minYZ, maxYZ, zEdges); @@ -148,16 +163,19 @@ class XYdZSolid extends BaseXYZSolid { /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public XYdZSolid(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public XYdZSolid(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -173,23 +191,23 @@ class XYdZSolid extends BaseXYZSolid { protected GeoPoint[] getEdgePoints() { return edgePoints; } - + @Override public boolean isWithin(final double x, final double y, final double z) { - return minXPlane.isWithin(x, y, z) && - maxXPlane.isWithin(x, y, z) && - minYPlane.isWithin(x, y, z) && - maxYPlane.isWithin(x, y, z) && - zPlane.evaluateIsZero(x, y, z); + return minXPlane.isWithin(x, y, z) + && maxXPlane.isWithin(x, y, z) + && minYPlane.isWithin(x, y, z) + && maxYPlane.isWithin(x, y, z) + && zPlane.evaluateIsZero(x, y, z); } @Override public int getRelationship(final GeoShape path) { - - //System.err.println(this+" getrelationship with "+path); + + // System.err.println(this + " getrelationship with " + path); final int insideRectangle = isShapeInsideArea(path); if (insideRectangle == SOME_INSIDE) { - //System.err.println(" some inside"); + // System.err.println(" some inside"); return OVERLAPS; } @@ -200,58 +218,69 @@ class XYdZSolid extends BaseXYZSolid { } if (insideRectangle == ALL_INSIDE && insideShape == ALL_INSIDE) { - //System.err.println(" inside of each other"); + // System.err.println(" inside of each other"); return OVERLAPS; } if (path.intersects(zPlane, notableZPoints, minXPlane, maxXPlane, minYPlane, maxYPlane)) { - //System.err.println(" edges intersect"); + // System.err.println(" edges intersect"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE) { - //System.err.println(" shape inside rectangle"); + // System.err.println(" shape inside rectangle"); return WITHIN; } if (insideShape == ALL_INSIDE) { - //System.err.println(" shape contains rectangle"); + // System.err.println(" shape contains rectangle"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override public boolean equals(Object o) { - if (!(o instanceof XYdZSolid)) + if (!(o instanceof XYdZSolid)) { return false; + } XYdZSolid other = (XYdZSolid) o; if (!super.equals(other)) { return false; } - return other.minXPlane.equals(minXPlane) && - other.maxXPlane.equals(maxXPlane) && - other.minYPlane.equals(minYPlane) && - other.maxYPlane.equals(maxYPlane) && - other.zPlane.equals(zPlane); + return other.minXPlane.equals(minXPlane) + && other.maxXPlane.equals(maxXPlane) + && other.minYPlane.equals(minYPlane) + && other.maxYPlane.equals(maxYPlane) + && other.zPlane.equals(zPlane); } @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + minXPlane.hashCode(); - result = 31 * result + maxXPlane.hashCode(); - result = 31 * result + minYPlane.hashCode(); - result = 31 * result + maxYPlane.hashCode(); - result = 31 * result + zPlane.hashCode(); + result = 31 * result + minXPlane.hashCode(); + result = 31 * result + maxXPlane.hashCode(); + result = 31 * result + minYPlane.hashCode(); + result = 31 * result + maxYPlane.hashCode(); + result = 31 * result + zPlane.hashCode(); return result; } @Override public String toString() { - return "XYdZSolid: {planetmodel="+planetModel+", minXplane="+minXPlane+", maxXplane="+maxXPlane+", minYplane="+minYPlane+", maxYplane="+maxYPlane+", zplane="+zPlane+"}"; + return "XYdZSolid: {planetmodel=" + + planetModel + + ", minXplane=" + + minXPlane + + ", maxXplane=" + + maxXPlane + + ", minYplane=" + + minYPlane + + ", maxYplane=" + + maxYPlane + + ", zplane=" + + zPlane + + "}"; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XdYZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XdYZSolid.java index 3c11b4b7e52..b24aaf2a790 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XdYZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XdYZSolid.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in Y @@ -48,11 +48,12 @@ class XdYZSolid extends BaseXYZSolid { protected final SidedPlane minZPlane; /** Max-Z plane */ protected final SidedPlane maxZPlane; - - /** These are the edge points of the shape, which are defined to be at least one point on - * each surface area boundary. In the case of a solid, this includes points which represent - * the intersection of XYZ bounding planes and the planet, as well as points representing - * the intersection of single bounding planes with the planet itself. + + /** + * These are the edge points of the shape, which are defined to be at least one point on each + * surface area boundary. In the case of a solid, this includes points which represent the + * intersection of XYZ bounding planes and the planet, as well as points representing the + * intersection of single bounding planes with the planet itself. */ protected final GeoPoint[] edgePoints; @@ -62,19 +63,20 @@ class XdYZSolid extends BaseXYZSolid { /** * Sole constructor * - *@param planetModel is the planet model. - *@param minX is the minimum X value. - *@param maxX is the maximum X value. - *@param Y is the Y value. - *@param minZ is the minimum Z value. - *@param maxZ is the maximum Z value. + * @param planetModel is the planet model. + * @param minX is the minimum X value. + * @param maxX is the maximum X value. + * @param Y is the Y value. + * @param minZ is the minimum Z value. + * @param maxZ is the maximum Z value. */ - public XdYZSolid(final PlanetModel planetModel, - final double minX, - final double maxX, - final double Y, - final double minZ, - final double maxZ) { + public XdYZSolid( + final PlanetModel planetModel, + final double minX, + final double maxX, + final double Y, + final double minZ, + final double maxZ) { super(planetModel); // Argument checking if (maxX - minX < Vector.MINIMUM_RESOLUTION) @@ -90,31 +92,36 @@ class XdYZSolid extends BaseXYZSolid { final double worldMinY = planetModel.getMinimumYValue(); final double worldMaxY = planetModel.getMaximumYValue(); - + // Construct the planes - minXPlane = new SidedPlane(maxX,0.0,0.0,xUnitVector,-minX); - maxXPlane = new SidedPlane(minX,0.0,0.0,xUnitVector,-maxX); - yPlane = new Plane(yUnitVector,-Y); - minZPlane = new SidedPlane(0.0,0.0,maxZ,zUnitVector,-minZ); - maxZPlane = new SidedPlane(0.0,0.0,minZ,zUnitVector,-maxZ); - + minXPlane = new SidedPlane(maxX, 0.0, 0.0, xUnitVector, -minX); + maxXPlane = new SidedPlane(minX, 0.0, 0.0, xUnitVector, -maxX); + yPlane = new Plane(yUnitVector, -Y); + minZPlane = new SidedPlane(0.0, 0.0, maxZ, zUnitVector, -minZ); + maxZPlane = new SidedPlane(0.0, 0.0, minZ, zUnitVector, -maxZ); + // We need at least one point on the planet surface for each manifestation of the shape. // There can be up to 2 (on opposite sides of the world). But we have to go through // 4 combinations of adjacent planes in order to find out if any have 2 intersection solution. - // Typically, this requires 4 square root operations. - final GeoPoint[] minXY = minXPlane.findIntersections(planetModel,yPlane,maxXPlane,minZPlane,maxZPlane); - final GeoPoint[] maxXY = maxXPlane.findIntersections(planetModel,yPlane,minXPlane,minZPlane,maxZPlane); - final GeoPoint[] YminZ = yPlane.findIntersections(planetModel,minZPlane,maxZPlane,minXPlane,maxXPlane); - final GeoPoint[] YmaxZ = yPlane.findIntersections(planetModel,maxZPlane,minZPlane,minXPlane,maxXPlane); - + // Typically, this requires 4 square root operations. + final GeoPoint[] minXY = + minXPlane.findIntersections(planetModel, yPlane, maxXPlane, minZPlane, maxZPlane); + final GeoPoint[] maxXY = + maxXPlane.findIntersections(planetModel, yPlane, minXPlane, minZPlane, maxZPlane); + final GeoPoint[] YminZ = + yPlane.findIntersections(planetModel, minZPlane, maxZPlane, minXPlane, maxXPlane); + final GeoPoint[] YmaxZ = + yPlane.findIntersections(planetModel, maxZPlane, minZPlane, minXPlane, maxXPlane); + notableYPoints = glueTogether(minXY, maxXY, YminZ, YmaxZ); // Now, compute the edge points. - // This is the trickiest part of setting up an XYZSolid. We've computed intersections already, so - // we'll start there. We know that at most there will be two disconnected shapes on the planet surface. - // But there's also a case where exactly one plane slices through the world, and none of the bounding plane - // intersections do. Thus, if we don't find any of the edge intersection cases, we have to look for that last case. - + // This is the trickiest part of setting up an XYZSolid. We've computed intersections already, + // so we'll start there. We know that at most there will be two disconnected shapes on the + // planet surface. But there's also a case where exactly one plane slices through the world, + // and none of the bounding plane intersections do. Thus, if we don't find any of the edge + // intersection cases, we have to look for that last case. + // We need to look at single-plane/world intersections. // We detect these by looking at the world model and noting its x, y, and z bounds. // The cases we are looking for are when the four corner points for any given @@ -126,16 +133,23 @@ class XdYZSolid extends BaseXYZSolid { final boolean maxXYmaxZ = planetModel.pointOutside(maxX, Y, maxZ); final GeoPoint[] yEdges; - if (Y - worldMinY >= -Vector.MINIMUM_RESOLUTION && Y - worldMaxY <= Vector.MINIMUM_RESOLUTION && - minX < 0.0 && maxX > 0.0 && minZ < 0.0 && maxZ > 0.0 && - minXYminZ && minXYmaxZ && maxXYminZ && maxXYmaxZ) { + if (Y - worldMinY >= -Vector.MINIMUM_RESOLUTION + && Y - worldMaxY <= Vector.MINIMUM_RESOLUTION + && minX < 0.0 + && maxX > 0.0 + && minZ < 0.0 + && maxZ > 0.0 + && minXYminZ + && minXYmaxZ + && maxXYminZ + && maxXYmaxZ) { // Find any point on the minY plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. // This plane is vertical and goes through the points (0,0,0) and (0,1,0) // Then use it to compute a sample point. final GeoPoint intPoint = yPlane.getSampleIntersectionPoint(planetModel, yVerticalPlane); if (intPoint != null) { - yEdges = new GeoPoint[]{intPoint}; + yEdges = new GeoPoint[] {intPoint}; } else { yEdges = EMPTY_POINTS; } @@ -148,16 +162,19 @@ class XdYZSolid extends BaseXYZSolid { /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public XdYZSolid(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public XdYZSolid(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -173,22 +190,22 @@ class XdYZSolid extends BaseXYZSolid { protected GeoPoint[] getEdgePoints() { return edgePoints; } - + @Override public boolean isWithin(final double x, final double y, final double z) { - return minXPlane.isWithin(x, y, z) && - maxXPlane.isWithin(x, y, z) && - yPlane.evaluateIsZero(x, y, z) && - minZPlane.isWithin(x, y, z) && - maxZPlane.isWithin(x, y, z); + return minXPlane.isWithin(x, y, z) + && maxXPlane.isWithin(x, y, z) + && yPlane.evaluateIsZero(x, y, z) + && minZPlane.isWithin(x, y, z) + && maxZPlane.isWithin(x, y, z); } @Override public int getRelationship(final GeoShape path) { - //System.err.println(this+" getrelationship with "+path); + // System.err.println(this + " getrelationship with " + path); final int insideRectangle = isShapeInsideArea(path); if (insideRectangle == SOME_INSIDE) { - //System.err.println(" some inside"); + // System.err.println(" some inside"); return OVERLAPS; } @@ -199,58 +216,69 @@ class XdYZSolid extends BaseXYZSolid { } if (insideRectangle == ALL_INSIDE && insideShape == ALL_INSIDE) { - //System.err.println(" inside of each other"); + // System.err.println(" inside of each other"); return OVERLAPS; } if (path.intersects(yPlane, notableYPoints, minXPlane, maxXPlane, minZPlane, maxZPlane)) { - //System.err.println(" edges intersect"); + // System.err.println(" edges intersect"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE) { - //System.err.println(" shape inside rectangle"); + // System.err.println(" shape inside rectangle"); return WITHIN; } if (insideShape == ALL_INSIDE) { - //System.err.println(" shape contains rectangle"); + // System.err.println(" shape contains rectangle"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override public boolean equals(Object o) { - if (!(o instanceof XdYZSolid)) + if (!(o instanceof XdYZSolid)) { return false; + } XdYZSolid other = (XdYZSolid) o; if (!super.equals(other)) { return false; } - return other.minXPlane.equals(minXPlane) && - other.maxXPlane.equals(maxXPlane) && - other.yPlane.equals(yPlane) && - other.minZPlane.equals(minZPlane) && - other.maxZPlane.equals(maxZPlane); + return other.minXPlane.equals(minXPlane) + && other.maxXPlane.equals(maxXPlane) + && other.yPlane.equals(yPlane) + && other.minZPlane.equals(minZPlane) + && other.maxZPlane.equals(maxZPlane); } @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + minXPlane.hashCode(); - result = 31 * result + maxXPlane.hashCode(); - result = 31 * result + yPlane.hashCode(); - result = 31 * result + minZPlane.hashCode(); - result = 31 * result + maxZPlane.hashCode(); + result = 31 * result + minXPlane.hashCode(); + result = 31 * result + maxXPlane.hashCode(); + result = 31 * result + yPlane.hashCode(); + result = 31 * result + minZPlane.hashCode(); + result = 31 * result + maxZPlane.hashCode(); return result; } @Override public String toString() { - return "XdYZSolid: {planetmodel="+planetModel+", minXplane="+minXPlane+", maxXplane="+maxXPlane+", yplane="+yPlane+", minZplane="+minZPlane+", maxZplane="+maxZPlane+"}"; + return "XdYZSolid: {planetmodel=" + + planetModel + + ", minXplane=" + + minXPlane + + ", maxXplane=" + + maxXPlane + + ", yplane=" + + yPlane + + ", minZplane=" + + minZPlane + + ", maxZplane=" + + maxZPlane + + "}"; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XdYdZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XdYdZSolid.java index 7f0d9d154ed..8e17742524b 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XdYdZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XdYdZSolid.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in Y and Z. - * This figure, in fact, represents either zero, one, or two points, so the - * actual data stored is minimal. + * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in Y and Z. This figure, in fact, + * represents either zero, one, or two points, so the actual data stored is minimal. * * @lucene.internal */ @@ -40,21 +39,22 @@ class XdYdZSolid extends BaseXYZSolid { /** The points in this figure on the planet surface; also doubles for edge points */ protected final GeoPoint[] surfacePoints; - + /** * Sole constructor * - *@param planetModel is the planet model. - *@param minX is the minimum X value. - *@param maxX is the maximum X value. - *@param Y is the Y value. - *@param Z is the Z value. + * @param planetModel is the planet model. + * @param minX is the minimum X value. + * @param maxX is the maximum X value. + * @param Y is the Y value. + * @param Z is the Z value. */ - public XdYdZSolid(final PlanetModel planetModel, - final double minX, - final double maxX, - final double Y, - final double Z) { + public XdYdZSolid( + final PlanetModel planetModel, + final double minX, + final double maxX, + final double Y, + final double Z) { super(planetModel); // Argument checking if (maxX - minX < Vector.MINIMUM_RESOLUTION) @@ -66,24 +66,27 @@ class XdYdZSolid extends BaseXYZSolid { this.Z = Z; // Build the planes and intersect them. - final Plane yPlane = new Plane(yUnitVector,-Y); - final Plane zPlane = new Plane(zUnitVector,-Z); - final SidedPlane minXPlane = new SidedPlane(maxX,0.0,0.0,xUnitVector,-minX); - final SidedPlane maxXPlane = new SidedPlane(minX,0.0,0.0,xUnitVector,-maxX); - surfacePoints = yPlane.findIntersections(planetModel,zPlane,minXPlane,maxXPlane); + final Plane yPlane = new Plane(yUnitVector, -Y); + final Plane zPlane = new Plane(zUnitVector, -Z); + final SidedPlane minXPlane = new SidedPlane(maxX, 0.0, 0.0, xUnitVector, -minX); + final SidedPlane maxXPlane = new SidedPlane(minX, 0.0, 0.0, xUnitVector, -maxX); + surfacePoints = yPlane.findIntersections(planetModel, zPlane, minXPlane, maxXPlane); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public XdYdZSolid(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public XdYdZSolid(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -98,22 +101,21 @@ class XdYdZSolid extends BaseXYZSolid { protected GeoPoint[] getEdgePoints() { return surfacePoints; } - + @Override public boolean isWithin(final double x, final double y, final double z) { for (final GeoPoint p : surfacePoints) { - if (p.isIdentical(x,y,z)) - return true; + if (p.isIdentical(x, y, z)) return true; } return false; } @Override public int getRelationship(final GeoShape path) { - //System.err.println(this+" getrelationship with "+path); + // System.err.println(this+" getrelationship with " + path); final int insideRectangle = isShapeInsideArea(path); if (insideRectangle == SOME_INSIDE) { - //System.err.println(" some inside"); + // System.err.println(" some inside"); return OVERLAPS; } @@ -124,33 +126,35 @@ class XdYdZSolid extends BaseXYZSolid { } if (insideRectangle == ALL_INSIDE && insideShape == ALL_INSIDE) { - //System.err.println(" inside of each other"); + // System.err.println(" inside of each other"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE) { return WITHIN; } - + if (insideShape == ALL_INSIDE) { - //System.err.println(" shape contains rectangle"); + // System.err.println(" shape contains rectangle"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override public boolean equals(Object o) { - if (!(o instanceof XdYdZSolid)) + if (!(o instanceof XdYdZSolid)) { return false; + } XdYdZSolid other = (XdYdZSolid) o; - if (!super.equals(other) || surfacePoints.length != other.surfacePoints.length ) { + if (!super.equals(other) || surfacePoints.length != other.surfacePoints.length) { return false; } for (int i = 0; i < surfacePoints.length; i++) { - if (!surfacePoints[i].equals(other.surfacePoints[i])) + if (!surfacePoints[i].equals(other.surfacePoints[i])) { return false; + } } return true; } @@ -159,7 +163,7 @@ class XdYdZSolid extends BaseXYZSolid { public int hashCode() { int result = super.hashCode(); for (final GeoPoint p : surfacePoints) { - result = 31 * result + p.hashCode(); + result = 31 * result + p.hashCode(); } return result; } @@ -170,8 +174,6 @@ class XdYdZSolid extends BaseXYZSolid { for (final GeoPoint p : surfacePoints) { sb.append(" ").append(p).append(" "); } - return "XdYdZSolid: {planetmodel="+planetModel+", "+sb.toString()+"}"; + return "XdYdZSolid: {planetmodel=" + planetModel + ", " + sb.toString() + "}"; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXYZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXYZSolid.java index e4f5adaca4e..dbc5af474c8 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXYZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXYZSolid.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in X. @@ -48,11 +48,12 @@ class dXYZSolid extends BaseXYZSolid { protected final SidedPlane minZPlane; /** Max-Z plane */ protected final SidedPlane maxZPlane; - - /** These are the edge points of the shape, which are defined to be at least one point on - * each surface area boundary. In the case of a solid, this includes points which represent - * the intersection of XYZ bounding planes and the planet, as well as points representing - * the intersection of single bounding planes with the planet itself. + + /** + * These are the edge points of the shape, which are defined to be at least one point on each + * surface area boundary. In the case of a solid, this includes points which represent the + * intersection of XYZ bounding planes and the planet, as well as points representing the + * intersection of single bounding planes with the planet itself. */ protected final GeoPoint[] edgePoints; @@ -62,19 +63,20 @@ class dXYZSolid extends BaseXYZSolid { /** * Sole constructor * - *@param planetModel is the planet model. - *@param X is the X value. - *@param minY is the minimum Y value. - *@param maxY is the maximum Y value. - *@param minZ is the minimum Z value. - *@param maxZ is the maximum Z value. + * @param planetModel is the planet model. + * @param X is the X value. + * @param minY is the minimum Y value. + * @param maxY is the maximum Y value. + * @param minZ is the minimum Z value. + * @param maxZ is the maximum Z value. */ - public dXYZSolid(final PlanetModel planetModel, - final double X, - final double minY, - final double maxY, - final double minZ, - final double maxZ) { + public dXYZSolid( + final PlanetModel planetModel, + final double X, + final double minY, + final double maxY, + final double minZ, + final double maxZ) { super(planetModel); // Argument checking if (maxY - minY < Vector.MINIMUM_RESOLUTION) @@ -87,37 +89,46 @@ class dXYZSolid extends BaseXYZSolid { this.maxY = maxY; this.minZ = minZ; this.maxZ = maxZ; - + final double worldMinX = planetModel.getMinimumXValue(); final double worldMaxX = planetModel.getMaximumXValue(); - + // Construct the planes - xPlane = new Plane(xUnitVector,-X); - minYPlane = new SidedPlane(0.0,maxY,0.0,yUnitVector,-minY); - maxYPlane = new SidedPlane(0.0,minY,0.0,yUnitVector,-maxY); - minZPlane = new SidedPlane(0.0,0.0,maxZ,zUnitVector,-minZ); - maxZPlane = new SidedPlane(0.0,0.0,minZ,zUnitVector,-maxZ); - + xPlane = new Plane(xUnitVector, -X); + minYPlane = new SidedPlane(0.0, maxY, 0.0, yUnitVector, -minY); + maxYPlane = new SidedPlane(0.0, minY, 0.0, yUnitVector, -maxY); + minZPlane = new SidedPlane(0.0, 0.0, maxZ, zUnitVector, -minZ); + maxZPlane = new SidedPlane(0.0, 0.0, minZ, zUnitVector, -maxZ); + // We need at least one point on the planet surface for each manifestation of the shape. // There can be up to 2 (on opposite sides of the world). But we have to go through // 4 combinations of adjacent planes in order to find out if any have 2 intersection solution. - // Typically, this requires 4 square root operations. - final GeoPoint[] XminY = xPlane.findIntersections(planetModel,minYPlane,maxYPlane,minZPlane,maxZPlane); - final GeoPoint[] XmaxY = xPlane.findIntersections(planetModel,maxYPlane,minYPlane,minZPlane,maxZPlane); - final GeoPoint[] XminZ = xPlane.findIntersections(planetModel,minZPlane,maxZPlane,minYPlane,maxYPlane); - final GeoPoint[] XmaxZ = xPlane.findIntersections(planetModel,maxZPlane,minZPlane,minYPlane,maxYPlane); + // Typically, this requires 4 square root operations. + final GeoPoint[] XminY = + xPlane.findIntersections(planetModel, minYPlane, maxYPlane, minZPlane, maxZPlane); + final GeoPoint[] XmaxY = + xPlane.findIntersections(planetModel, maxYPlane, minYPlane, minZPlane, maxZPlane); + final GeoPoint[] XminZ = + xPlane.findIntersections(planetModel, minZPlane, maxZPlane, minYPlane, maxYPlane); + final GeoPoint[] XmaxZ = + xPlane.findIntersections(planetModel, maxZPlane, minZPlane, minYPlane, maxYPlane); notableXPoints = glueTogether(XminY, XmaxY, XminZ, XmaxZ); // Now, compute the edge points. - // This is the trickiest part of setting up an XYZSolid. We've computed intersections already, so - // we'll start there. We know that at most there will be two disconnected shapes on the planet surface. - // But there's also a case where exactly one plane slices through the world, and none of the bounding plane - // intersections do. Thus, if we don't find any of the edge intersection cases, we have to look for that last case. - + // This is the trickiest part of setting up an XYZSolid. We've computed intersections already, + // so + // we'll start there. We know that at most there will be two disconnected shapes on the planet + // surface. + // But there's also a case where exactly one plane slices through the world, and none of the + // bounding plane + // intersections do. Thus, if we don't find any of the edge intersection cases, we have to look + // for that last case. + // We need to look at single-plane/world intersections. // We detect these by looking at the world model and noting its x, y, and z bounds. - // For the single-dimension degenerate case, there's really only one plane that can possibly intersect the world. + // For the single-dimension degenerate case, there's really only one plane that can possibly + // intersect the world. // The cases we are looking for are when the four corner points for any given // plane are all outside of the world, AND that plane intersects the world. // There are four corner points all told; we must evaluate these WRT the planet surface. @@ -127,16 +138,23 @@ class dXYZSolid extends BaseXYZSolid { final boolean XmaxYmaxZ = planetModel.pointOutside(X, maxY, maxZ); final GeoPoint[] xEdges; - if (X - worldMinX >= -Vector.MINIMUM_RESOLUTION && X - worldMaxX <= Vector.MINIMUM_RESOLUTION && - minY < 0.0 && maxY > 0.0 && minZ < 0.0 && maxZ > 0.0 && - XminYminZ && XminYmaxZ && XmaxYminZ && XmaxYmaxZ) { + if (X - worldMinX >= -Vector.MINIMUM_RESOLUTION + && X - worldMaxX <= Vector.MINIMUM_RESOLUTION + && minY < 0.0 + && maxY > 0.0 + && minZ < 0.0 + && maxZ > 0.0 + && XminYminZ + && XminYmaxZ + && XmaxYminZ + && XmaxYmaxZ) { // Find any point on the X plane that intersects the world // First construct a perpendicular plane that will allow us to find a sample point. // This plane is vertical and goes through the points (0,0,0) and (1,0,0) // Then use it to compute a sample point. final GeoPoint intPoint = xPlane.getSampleIntersectionPoint(planetModel, xVerticalPlane); if (intPoint != null) { - xEdges = new GeoPoint[]{intPoint}; + xEdges = new GeoPoint[] {intPoint}; } else { xEdges = EMPTY_POINTS; } @@ -144,21 +162,24 @@ class dXYZSolid extends BaseXYZSolid { xEdges = EMPTY_POINTS; } - this.edgePoints = glueTogether(XminY,XmaxY,XminZ,XmaxZ,xEdges); + this.edgePoints = glueTogether(XminY, XmaxY, XminZ, XmaxZ, xEdges); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public dXYZSolid(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public dXYZSolid(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -174,87 +195,99 @@ class dXYZSolid extends BaseXYZSolid { protected GeoPoint[] getEdgePoints() { return edgePoints; } - + @Override public boolean isWithin(final double x, final double y, final double z) { - return xPlane.evaluateIsZero(x, y, z) && - minYPlane.isWithin(x, y, z) && - maxYPlane.isWithin(x, y, z) && - minZPlane.isWithin(x, y, z) && - maxZPlane.isWithin(x, y, z); + return xPlane.evaluateIsZero(x, y, z) + && minYPlane.isWithin(x, y, z) + && maxYPlane.isWithin(x, y, z) + && minZPlane.isWithin(x, y, z) + && maxZPlane.isWithin(x, y, z); } @Override public int getRelationship(final GeoShape path) { - //System.err.println(this+" getrelationship with "+path); + // System.err.println(this + " getRelationship with " + path); final int insideRectangle = isShapeInsideArea(path); if (insideRectangle == SOME_INSIDE) { - //System.err.println(" some shape points inside area"); + // System.err.println(" some shape points inside area"); return OVERLAPS; } // Figure out if the entire XYZArea is contained by the shape. final int insideShape = isAreaInsideShape(path); if (insideShape == SOME_INSIDE) { - //System.err.println(" some area points inside shape"); + // System.err.println(" some area points inside shape"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE && insideShape == ALL_INSIDE) { - //System.err.println(" inside of each other"); + // System.err.println(" inside of each other"); return OVERLAPS; } - // The entire locus of points in this shape is on a single plane, so we only need ot look for an intersection with that plane. - //System.err.println("xPlane = "+xPlane); + // The entire locus of points in this shape is on a single plane, so we only need ot look for an + // intersection with that plane. + // System.err.println("xPlane = " + xPlane); if (path.intersects(xPlane, notableXPoints, minYPlane, maxYPlane, minZPlane, maxZPlane)) { - //System.err.println(" edges intersect"); + // System.err.println(" edges intersect"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE) { - //System.err.println(" shape points inside area"); + // System.err.println(" shape points inside area"); return WITHIN; } if (insideShape == ALL_INSIDE) { - //System.err.println(" shape contains all area"); + // System.err.println(" shape contains all area"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override public boolean equals(Object o) { - if (!(o instanceof dXYZSolid)) + if (!(o instanceof dXYZSolid)) { return false; + } dXYZSolid other = (dXYZSolid) o; if (!super.equals(other)) { return false; } - return other.xPlane.equals(xPlane) && - other.minYPlane.equals(minYPlane) && - other.maxYPlane.equals(maxYPlane) && - other.minZPlane.equals(minZPlane) && - other.maxZPlane.equals(maxZPlane); + return other.xPlane.equals(xPlane) + && other.minYPlane.equals(minYPlane) + && other.maxYPlane.equals(maxYPlane) + && other.minZPlane.equals(minZPlane) + && other.maxZPlane.equals(maxZPlane); } @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + xPlane.hashCode(); - result = 31 * result + minYPlane.hashCode(); - result = 31 * result + maxYPlane.hashCode(); - result = 31 * result + minZPlane.hashCode(); - result = 31 * result + maxZPlane.hashCode(); + result = 31 * result + xPlane.hashCode(); + result = 31 * result + minYPlane.hashCode(); + result = 31 * result + maxYPlane.hashCode(); + result = 31 * result + minZPlane.hashCode(); + result = 31 * result + maxZPlane.hashCode(); return result; } @Override public String toString() { - return "dXYZSolid: {planetmodel="+planetModel+", xplane="+xPlane+", minYplane="+minYPlane+", maxYplane="+maxYPlane+", minZplane="+minZPlane+", maxZplane="+maxZPlane+"}"; + return "dXYZSolid: {planetmodel=" + + planetModel + + ", xplane=" + + xPlane + + ", minYplane=" + + minYPlane + + ", maxYplane=" + + maxYPlane + + ", minZplane=" + + minZPlane + + ", maxZplane=" + + maxZPlane + + "}"; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXYdZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXYdZSolid.java index 0587bc292f9..0350f22e41e 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXYdZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXYdZSolid.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in X and Z. - * This figure, in fact, represents either zero, one, or two points, so the - * actual data stored is minimal. + * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in X and Z. This figure, in fact, + * represents either zero, one, or two points, so the actual data stored is minimal. * * @lucene.internal */ @@ -40,21 +39,22 @@ class dXYdZSolid extends BaseXYZSolid { /** The points in this figure on the planet surface; also doubles for edge points */ protected final GeoPoint[] surfacePoints; - + /** * Sole constructor * - *@param planetModel is the planet model. - *@param X is the X value. - *@param minY is the minimum Y value. - *@param maxY is the maximum Y value. - *@param Z is the Z value. + * @param planetModel is the planet model. + * @param X is the X value. + * @param minY is the minimum Y value. + * @param maxY is the maximum Y value. + * @param Z is the Z value. */ - public dXYdZSolid(final PlanetModel planetModel, - final double X, - final double minY, - final double maxY, - final double Z) { + public dXYdZSolid( + final PlanetModel planetModel, + final double X, + final double minY, + final double maxY, + final double Z) { super(planetModel); // Argument checking if (maxY - minY < Vector.MINIMUM_RESOLUTION) @@ -66,24 +66,27 @@ class dXYdZSolid extends BaseXYZSolid { this.Z = Z; // Build the planes and intersect them. - final Plane xPlane = new Plane(xUnitVector,-X); - final Plane zPlane = new Plane(zUnitVector,-Z); - final SidedPlane minYPlane = new SidedPlane(0.0,maxY,0.0,yUnitVector,-minY); - final SidedPlane maxYPlane = new SidedPlane(0.0,minY,0.0,yUnitVector,-maxY); - surfacePoints = xPlane.findIntersections(planetModel,zPlane,minYPlane,maxYPlane); + final Plane xPlane = new Plane(xUnitVector, -X); + final Plane zPlane = new Plane(zUnitVector, -Z); + final SidedPlane minYPlane = new SidedPlane(0.0, maxY, 0.0, yUnitVector, -minY); + final SidedPlane maxYPlane = new SidedPlane(0.0, minY, 0.0, yUnitVector, -maxY); + surfacePoints = xPlane.findIntersections(planetModel, zPlane, minYPlane, maxYPlane); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public dXYdZSolid(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public dXYdZSolid(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -98,22 +101,23 @@ class dXYdZSolid extends BaseXYZSolid { protected GeoPoint[] getEdgePoints() { return surfacePoints; } - + @Override public boolean isWithin(final double x, final double y, final double z) { for (final GeoPoint p : surfacePoints) { - if (p.isIdentical(x,y,z)) + if (p.isIdentical(x, y, z)) { return true; + } } return false; } @Override public int getRelationship(final GeoShape path) { - //System.err.println(this+" getrelationship with "+path); + // System.err.println(this + " getRelationship with " + path); final int insideRectangle = isShapeInsideArea(path); if (insideRectangle == SOME_INSIDE) { - //System.err.println(" some inside"); + // System.err.println(" some inside"); return OVERLAPS; } @@ -124,33 +128,35 @@ class dXYdZSolid extends BaseXYZSolid { } if (insideRectangle == ALL_INSIDE && insideShape == ALL_INSIDE) { - //System.err.println(" inside of each other"); + // System.err.println(" inside of each other"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE) { return WITHIN; } - + if (insideShape == ALL_INSIDE) { - //System.err.println(" shape contains rectangle"); + // System.err.println(" shape contains rectangle"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override public boolean equals(Object o) { - if (!(o instanceof dXYdZSolid)) + if (!(o instanceof dXYdZSolid)) { return false; + } dXYdZSolid other = (dXYdZSolid) o; - if (!super.equals(other) || surfacePoints.length != other.surfacePoints.length ) { + if (!super.equals(other) || surfacePoints.length != other.surfacePoints.length) { return false; } for (int i = 0; i < surfacePoints.length; i++) { - if (!surfacePoints[i].equals(other.surfacePoints[i])) + if (!surfacePoints[i].equals(other.surfacePoints[i])) { return false; + } } return true; } @@ -159,7 +165,7 @@ class dXYdZSolid extends BaseXYZSolid { public int hashCode() { int result = super.hashCode(); for (final GeoPoint p : surfacePoints) { - result = 31 * result + p.hashCode(); + result = 31 * result + p.hashCode(); } return result; } @@ -170,8 +176,6 @@ class dXYdZSolid extends BaseXYZSolid { for (final GeoPoint p : surfacePoints) { sb.append(" ").append(p).append(" "); } - return "dXYdZSolid: {planetmodel="+planetModel+", "+sb.toString()+"}"; + return "dXYdZSolid: {planetmodel=" + planetModel + ", " + sb.toString() + "}"; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXdYZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXdYZSolid.java index 3dfa6ec6d55..45f399a4283 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXdYZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXdYZSolid.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** - * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in X and Y. - * This figure, in fact, represents either zero, one, or two points, so the - * actual data stored is minimal. + * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in X and Y. This figure, in fact, + * represents either zero, one, or two points, so the actual data stored is minimal. * * @lucene.internal */ @@ -40,21 +39,22 @@ class dXdYZSolid extends BaseXYZSolid { /** The points in this figure on the planet surface; also doubles for edge points */ protected final GeoPoint[] surfacePoints; - + /** * Sole constructor * - *@param planetModel is the planet model. - *@param X is the X value. - *@param Y is the Y value. - *@param minZ is the minimum Z value. - *@param maxZ is the maximum Z value. + * @param planetModel is the planet model. + * @param X is the X value. + * @param Y is the Y value. + * @param minZ is the minimum Z value. + * @param maxZ is the maximum Z value. */ - public dXdYZSolid(final PlanetModel planetModel, - final double X, - final double Y, - final double minZ, - final double maxZ) { + public dXdYZSolid( + final PlanetModel planetModel, + final double X, + final double Y, + final double minZ, + final double maxZ) { super(planetModel); // Argument checking if (maxZ - minZ < Vector.MINIMUM_RESOLUTION) @@ -66,24 +66,27 @@ class dXdYZSolid extends BaseXYZSolid { this.maxZ = maxZ; // Build the planes and intersect them. - final Plane xPlane = new Plane(xUnitVector,-X); - final Plane yPlane = new Plane(yUnitVector,-Y); - final SidedPlane minZPlane = new SidedPlane(0.0,0.0,maxZ,zUnitVector,-minZ); - final SidedPlane maxZPlane = new SidedPlane(0.0,0.0,minZ,zUnitVector,-maxZ); - surfacePoints = xPlane.findIntersections(planetModel,yPlane,minZPlane,maxZPlane); + final Plane xPlane = new Plane(xUnitVector, -X); + final Plane yPlane = new Plane(yUnitVector, -Y); + final SidedPlane minZPlane = new SidedPlane(0.0, 0.0, maxZ, zUnitVector, -minZ); + final SidedPlane maxZPlane = new SidedPlane(0.0, 0.0, minZ, zUnitVector, -maxZ); + surfacePoints = xPlane.findIntersections(planetModel, yPlane, minZPlane, maxZPlane); } /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public dXdYZSolid(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public dXdYZSolid(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -98,22 +101,23 @@ class dXdYZSolid extends BaseXYZSolid { protected GeoPoint[] getEdgePoints() { return surfacePoints; } - + @Override public boolean isWithin(final double x, final double y, final double z) { for (final GeoPoint p : surfacePoints) { - if (p.isIdentical(x,y,z)) + if (p.isIdentical(x, y, z)) { return true; + } } return false; } @Override public int getRelationship(final GeoShape path) { - //System.err.println(this+" getrelationship with "+path); + // System.err.println(this + " getRelationship with " + path); final int insideRectangle = isShapeInsideArea(path); if (insideRectangle == SOME_INSIDE) { - //System.err.println(" some inside"); + // System.err.println(" some inside"); return OVERLAPS; } @@ -124,33 +128,35 @@ class dXdYZSolid extends BaseXYZSolid { } if (insideRectangle == ALL_INSIDE && insideShape == ALL_INSIDE) { - //System.err.println(" inside of each other"); + // System.err.println(" inside of each other"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE) { return WITHIN; } - + if (insideShape == ALL_INSIDE) { - //System.err.println(" shape contains rectangle"); + // System.err.println(" shape contains rectangle"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override public boolean equals(Object o) { - if (!(o instanceof dXdYZSolid)) + if (!(o instanceof dXdYZSolid)) { return false; + } dXdYZSolid other = (dXdYZSolid) o; - if (!super.equals(other) || surfacePoints.length != other.surfacePoints.length ) { + if (!super.equals(other) || surfacePoints.length != other.surfacePoints.length) { return false; } for (int i = 0; i < surfacePoints.length; i++) { - if (!surfacePoints[i].equals(other.surfacePoints[i])) + if (!surfacePoints[i].equals(other.surfacePoints[i])) { return false; + } } return true; } @@ -159,7 +165,7 @@ class dXdYZSolid extends BaseXYZSolid { public int hashCode() { int result = super.hashCode(); for (final GeoPoint p : surfacePoints) { - result = 31 * result + p.hashCode(); + result = 31 * result + p.hashCode(); } return result; } @@ -170,8 +176,6 @@ class dXdYZSolid extends BaseXYZSolid { for (final GeoPoint p : surfacePoints) { sb.append(" ").append(p).append(" "); } - return "dXdYZSolid: {planetmodel="+planetModel+", "+sb.toString()+"}"; + return "dXdYZSolid: {planetmodel=" + planetModel + ", " + sb.toString() + "}"; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXdYdZSolid.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXdYdZSolid.java index 61b2608dd0c..9cd5924da47 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXdYdZSolid.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/dXdYdZSolid.java @@ -16,9 +16,9 @@ */ package org.apache.lucene.spatial3d.geom; +import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.IOException; /** * 3D rectangle, bounded on six sides by X,Y,Z limits, degenerate in all dimensions @@ -38,39 +38,38 @@ class dXdYdZSolid extends BaseXYZSolid { protected final boolean isOnSurface; /** The point */ protected final GeoPoint thePoint; - - /** These are the edge points of the shape, which are defined to be at least one point on - * each surface area boundary. In the case of a solid, this includes points which represent - * the intersection of XYZ bounding planes and the planet, as well as points representing - * the intersection of single bounding planes with the planet itself. + + /** + * These are the edge points of the shape, which are defined to be at least one point on each + * surface area boundary. In the case of a solid, this includes points which represent the + * intersection of XYZ bounding planes and the planet, as well as points representing the + * intersection of single bounding planes with the planet itself. */ protected final GeoPoint[] edgePoints; /** Empty array of {@link GeoPoint}. */ protected static final GeoPoint[] nullPoints = new GeoPoint[0]; - + /** * Sole constructor * - *@param planetModel is the planet model. - *@param X is the X value. - *@param Y is the Y value. - *@param Z is the Z value. + * @param planetModel is the planet model. + * @param X is the X value. + * @param Y is the Y value. + * @param Z is the Z value. */ - public dXdYdZSolid(final PlanetModel planetModel, - final double X, - final double Y, - final double Z) { + public dXdYdZSolid( + final PlanetModel planetModel, final double X, final double Y, final double Z) { super(planetModel); - + this.X = X; this.Y = Y; this.Z = Z; - isOnSurface = planetModel.pointOnSurface(X,Y,Z); + isOnSurface = planetModel.pointOnSurface(X, Y, Z); if (isOnSurface) { - thePoint = new GeoPoint(X,Y,Z); - edgePoints = new GeoPoint[]{thePoint}; + thePoint = new GeoPoint(X, Y, Z); + edgePoints = new GeoPoint[] {thePoint}; } else { thePoint = null; edgePoints = nullPoints; @@ -79,14 +78,17 @@ class dXdYdZSolid extends BaseXYZSolid { /** * Constructor for deserialization. + * * @param planetModel is the planet model. * @param inputStream is the input stream. */ - public dXdYdZSolid(final PlanetModel planetModel, final InputStream inputStream) throws IOException { - this(planetModel, - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream), - SerializableObject.readDouble(inputStream)); + public dXdYdZSolid(final PlanetModel planetModel, final InputStream inputStream) + throws IOException { + this( + planetModel, + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream), + SerializableObject.readDouble(inputStream)); } @Override @@ -100,13 +102,13 @@ class dXdYdZSolid extends BaseXYZSolid { protected GeoPoint[] getEdgePoints() { return edgePoints; } - + @Override public boolean isWithin(final double x, final double y, final double z) { if (!isOnSurface) { return false; } - return thePoint.isIdentical(x,y,z); + return thePoint.isIdentical(x, y, z); } @Override @@ -114,46 +116,46 @@ class dXdYdZSolid extends BaseXYZSolid { if (!isOnSurface) { return DISJOINT; } - - //System.err.println(this+" getrelationship with "+path); + + // System.err.println(this+" getrelationship with "+path); final int insideRectangle = isShapeInsideArea(path); if (insideRectangle == SOME_INSIDE) { - //System.err.println(" some shape points inside area"); + // System.err.println(" some shape points inside area"); return OVERLAPS; } // Figure out if the entire XYZArea is contained by the shape. final int insideShape = isAreaInsideShape(path); if (insideShape == SOME_INSIDE) { - //System.err.println(" some area points inside shape"); + // System.err.println(" some area points inside shape"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE && insideShape == ALL_INSIDE) { - //System.err.println(" inside of each other"); + // System.err.println(" inside of each other"); return OVERLAPS; } if (insideRectangle == ALL_INSIDE) { - //System.err.println(" shape inside area entirely"); + // System.err.println(" shape inside area entirely"); return WITHIN; } if (insideShape == ALL_INSIDE) { - //System.err.println(" shape contains area entirely"); + // System.err.println(" shape contains area entirely"); return CONTAINS; } - //System.err.println(" disjoint"); + // System.err.println(" disjoint"); return DISJOINT; } @Override public boolean equals(Object o) { - if (!(o instanceof dXdYdZSolid)) + if (!(o instanceof dXdYdZSolid)) { return false; + } dXdYdZSolid other = (dXdYdZSolid) o; - if (!super.equals(other) || - other.isOnSurface != isOnSurface) { + if (!super.equals(other) || other.isOnSurface != isOnSurface) { return false; } if (isOnSurface) { @@ -165,17 +167,21 @@ class dXdYdZSolid extends BaseXYZSolid { @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + (isOnSurface?1:0); + result = 31 * result + (isOnSurface ? 1 : 0); if (isOnSurface) { - result = 31 * result + thePoint.hashCode(); + result = 31 * result + thePoint.hashCode(); } return result; } @Override public String toString() { - return "dXdYdZSolid: {planetmodel="+planetModel+", isOnSurface="+isOnSurface+", thePoint="+thePoint+"}"; + return "dXdYdZSolid: {planetmodel=" + + planetModel + + ", isOnSurface=" + + isOnSurface + + ", thePoint=" + + thePoint + + "}"; } - } - diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/package-info.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/package-info.java index 446365cbd24..10f12a08d02 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/package-info.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/package-info.java @@ -16,7 +16,7 @@ */ /** - * Shapes implemented using 3D planar geometry. This package has no dependencies aside from Java. + * Shapes implemented using 3D planar geometry. This package has no dependencies aside from Java. * This code was contributed under the name "Geo3D". */ package org.apache.lucene.spatial3d.geom; diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/package-info.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/package-info.java index 032d26f7393..a950bbadb03 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/package-info.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/package-info.java @@ -16,6 +16,7 @@ */ /** - * Lucene field & query support for the spatial geometry implemented in {@link org.apache.lucene.spatial3d.geom}. + * Lucene field & query support for the spatial geometry implemented in {@link + * org.apache.lucene.spatial3d.geom}. */ package org.apache.lucene.spatial3d; diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DDocValues.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DDocValues.java index 41471c84e46..185fd9e0f41 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DDocValues.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DDocValues.java @@ -19,7 +19,6 @@ package org.apache.lucene.spatial3d; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.spatial3d.geom.GeoPoint; import org.apache.lucene.spatial3d.geom.PlanetModel; - import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -31,18 +30,23 @@ public class TestGeo3DDocValues extends LuceneTestCase { checkPointEncoding(-45.0, -100.0); final int testAmt = TestUtil.nextInt(random(), 1000, 2000); for (int i = 0; i < testAmt; i++) { - checkPointEncoding(random().nextDouble() * 180.0 - 90.0, random().nextDouble() * 360.0 - 180.0); + checkPointEncoding( + random().nextDouble() * 180.0 - 90.0, random().nextDouble() * 360.0 - 180.0); } } - + void checkPointEncoding(final double latitude, final double longitude) { - PlanetModel planetModel = RandomPicks.randomFrom(random(), new PlanetModel[] {PlanetModel.WGS84, PlanetModel.CLARKE_1866}); - final GeoPoint point = new GeoPoint(planetModel, Geo3DUtil.fromDegrees(latitude), Geo3DUtil.fromDegrees(longitude)); + PlanetModel planetModel = + RandomPicks.randomFrom( + random(), new PlanetModel[] {PlanetModel.WGS84, PlanetModel.CLARKE_1866}); + final GeoPoint point = + new GeoPoint( + planetModel, Geo3DUtil.fromDegrees(latitude), Geo3DUtil.fromDegrees(longitude)); long pointValue = planetModel.getDocValueEncoder().encodePoint(point); final double x = planetModel.getDocValueEncoder().decodeXValue(pointValue); final double y = planetModel.getDocValueEncoder().decodeYValue(pointValue); final double z = planetModel.getDocValueEncoder().decodeZValue(pointValue); - final GeoPoint pointR = new GeoPoint(x,y,z); + final GeoPoint pointR = new GeoPoint(x, y, z); // Check whether stable pointValue = planetModel.getDocValueEncoder().encodePoint(x, y, z); assertEquals(x, planetModel.getDocValueEncoder().decodeXValue(pointValue), 0.0); @@ -51,5 +55,4 @@ public class TestGeo3DDocValues extends LuceneTestCase { // Check whether has some relationship with original point assertEquals(0.0, point.arcDistance(pointR), 0.02); } - } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DPoint.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DPoint.java index b3ded508d03..d4616e1d6c5 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DPoint.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DPoint.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.spatial3d; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; @@ -25,8 +27,6 @@ import java.util.HashSet; import java.util.List; import java.util.Random; import java.util.Set; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FilterCodec; import org.apache.lucene.codecs.PointsFormat; @@ -82,20 +82,24 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.TestUtil; -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; - public class TestGeo3DPoint extends LuceneTestCase { protected PlanetModel randomPlanetModel() { - return RandomPicks.randomFrom(random(), new PlanetModel[] {PlanetModel.WGS84, PlanetModel.CLARKE_1866, PlanetModel.SPHERE}); + return RandomPicks.randomFrom( + random(), + new PlanetModel[] {PlanetModel.WGS84, PlanetModel.CLARKE_1866, PlanetModel.SPHERE}); } private static Codec getCodec() { if (Codec.getDefault().getName().equals("Lucene84")) { int maxPointsInLeafNode = TestUtil.nextInt(random(), 16, 2048); - double maxMBSortInHeap = 3.0 + (3*random().nextDouble()); + double maxMBSortInHeap = 3.0 + (3 * random().nextDouble()); if (VERBOSE) { - System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap); + System.out.println( + "TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + + maxPointsInLeafNode + + " and maxMBSortInHeap=" + + maxMBSortInHeap); } return new FilterCodec("Lucene84", Codec.getDefault()) { @@ -131,8 +135,16 @@ public class TestGeo3DPoint extends LuceneTestCase { IndexReader r = DirectoryReader.open(w); // We can't wrap with "exotic" readers because the query must see the BKD3DDVFormat: IndexSearcher s = newSearcher(r, false); - assertEquals(1, s.search(Geo3DPoint.newShapeQuery("field", - GeoCircleFactory.makeGeoCircle(planetModel, toRadians(50), toRadians(-97), Math.PI/180.)), 1).totalHits.value); + assertEquals( + 1, + s.search( + Geo3DPoint.newShapeQuery( + "field", + GeoCircleFactory.makeGeoCircle( + planetModel, toRadians(50), toRadians(-97), Math.PI / 180.)), + 1) + .totalHits + .value); w.close(); r.close(); dir.close(); @@ -153,12 +165,16 @@ public class TestGeo3DPoint extends LuceneTestCase { final int splitCount; final PlanetModel planetModel; - public Cell(Cell parent, - int xMinEnc, int xMaxEnc, - int yMinEnc, int yMaxEnc, - int zMinEnc, int zMaxEnc, - final PlanetModel planetModel, - int splitCount) { + public Cell( + Cell parent, + int xMinEnc, + int xMaxEnc, + int yMinEnc, + int yMaxEnc, + int zMinEnc, + int zMaxEnc, + final PlanetModel planetModel, + int splitCount) { this.parent = parent; this.xMinEnc = xMinEnc; this.xMaxEnc = xMaxEnc; @@ -177,14 +193,33 @@ public class TestGeo3DPoint extends LuceneTestCase { int docY = planetModel.encodeValue(point.y); int docZ = planetModel.encodeValue(point.z); - return docX >= xMinEnc && docX <= xMaxEnc && - docY >= yMinEnc && docY <= yMaxEnc && - docZ >= zMinEnc && docZ <= zMaxEnc; + return docX >= xMinEnc + && docX <= xMaxEnc + && docY >= yMinEnc + && docY <= yMaxEnc + && docZ >= zMinEnc + && docZ <= zMaxEnc; } @Override public String toString() { - return "cell=" + cellID + (parent == null ? "" : " parentCellID=" + parent.cellID) + " x: " + xMinEnc + " TO " + xMaxEnc + ", y: " + yMinEnc + " TO " + yMaxEnc + ", z: " + zMinEnc + " TO " + zMaxEnc + ", splits: " + splitCount; + return "cell=" + + cellID + + (parent == null ? "" : " parentCellID=" + parent.cellID) + + " x: " + + xMinEnc + + " TO " + + xMaxEnc + + ", y: " + + yMinEnc + + " TO " + + yMaxEnc + + ", z: " + + zMinEnc + + " TO " + + zMaxEnc + + ", splits: " + + splitCount; } } @@ -193,7 +228,10 @@ public class TestGeo3DPoint extends LuceneTestCase { } private static GeoPoint quantize(GeoPoint point, final PlanetModel planetModel) { - return new GeoPoint(quantize(point.x, planetModel), quantize(point.y, planetModel), quantize(point.z, planetModel)); + return new GeoPoint( + quantize(point.x, planetModel), + quantize(point.y, planetModel), + quantize(point.z, planetModel)); } /** Tests consistency of GeoArea.getRelationship vs GeoShape.isWithin */ @@ -207,19 +245,24 @@ public class TestGeo3DPoint extends LuceneTestCase { GeoPoint[] docs = new GeoPoint[numDocs]; GeoPoint[] unquantizedDocs = new GeoPoint[numDocs]; PlanetModel planetModel = PlanetModel.CLARKE_1866; - for(int docID=0;docID geoPoints = new ArrayList<>(); - while (geoPoints.size() < vertexCount) { - final GeoPoint gPt = new GeoPoint(planetModel, toRadians(GeoTestUtil.nextLatitude()), toRadians(GeoTestUtil.nextLongitude())); - geoPoints.add(gPt); - } - try { - final GeoShape rval = GeoPolygonFactory.makeGeoPolygon(planetModel, geoPoints); - if (rval == null) { - // Degenerate polygon - continue; + case 0: + { + // Polygons + final int vertexCount = random().nextInt(3) + 3; + final List geoPoints = new ArrayList<>(); + while (geoPoints.size() < vertexCount) { + final GeoPoint gPt = + new GeoPoint( + planetModel, + toRadians(GeoTestUtil.nextLatitude()), + toRadians(GeoTestUtil.nextLongitude())); + geoPoints.add(gPt); + } + try { + final GeoShape rval = GeoPolygonFactory.makeGeoPolygon(planetModel, geoPoints); + if (rval == null) { + // Degenerate polygon + continue; + } + return rval; + } catch (IllegalArgumentException e) { + // This is what happens when we create a shape that is invalid. Although it is + // conceivable that there are cases where + // the exception is thrown incorrectly, we aren't going to be able to do that in this + // random test. + continue; + } } - return rval; - } catch (IllegalArgumentException e) { - // This is what happens when we create a shape that is invalid. Although it is conceivable that there are cases where - // the exception is thrown incorrectly, we aren't going to be able to do that in this random test. - continue; - } - } - case 1: { - // Circles + case 1: + { + // Circles - double lat = toRadians(GeoTestUtil.nextLatitude()); - double lon = toRadians(GeoTestUtil.nextLongitude()); + double lat = toRadians(GeoTestUtil.nextLatitude()); + double lon = toRadians(GeoTestUtil.nextLongitude()); - double angle = random().nextDouble() * Math.PI/2.0; + double angle = random().nextDouble() * Math.PI / 2.0; - try { - return GeoCircleFactory.makeGeoCircle(planetModel, lat, lon, angle); - } catch (IllegalArgumentException iae) { - // angle is too small; try again: - continue; - } - } + try { + return GeoCircleFactory.makeGeoCircle(planetModel, lat, lon, angle); + } catch (IllegalArgumentException iae) { + // angle is too small; try again: + continue; + } + } - case 2: { - // Rectangles - double lat0 = toRadians(GeoTestUtil.nextLatitude()); - double lat1 = toRadians(GeoTestUtil.nextLatitude()); - if (lat1 < lat0) { - double x = lat0; - lat0 = lat1; - lat1 = x; - } - double lon0 = toRadians(GeoTestUtil.nextLongitude()); - double lon1 = toRadians(GeoTestUtil.nextLongitude()); - if (lon1 < lon0) { - double x = lon0; - lon0 = lon1; - lon1 = x; - } + case 2: + { + // Rectangles + double lat0 = toRadians(GeoTestUtil.nextLatitude()); + double lat1 = toRadians(GeoTestUtil.nextLatitude()); + if (lat1 < lat0) { + double x = lat0; + lat0 = lat1; + lat1 = x; + } + double lon0 = toRadians(GeoTestUtil.nextLongitude()); + double lon1 = toRadians(GeoTestUtil.nextLongitude()); + if (lon1 < lon0) { + double x = lon0; + lon0 = lon1; + lon1 = x; + } - return GeoBBoxFactory.makeGeoBBox(planetModel, lat1, lat0, lon0, lon1); - } + return GeoBBoxFactory.makeGeoBBox(planetModel, lat1, lat0, lon0, lon1); + } - case 3: { - // Paths - final int pointCount = random().nextInt(5) + 1; - final double width = toRadians(random().nextInt(89)+1); - final GeoPoint[] points = new GeoPoint[pointCount]; - for (int i = 0; i < pointCount; i++) { - points[i] = new GeoPoint(planetModel, toRadians(GeoTestUtil.nextLatitude()), toRadians(GeoTestUtil.nextLongitude())); - } - try { - return GeoPathFactory.makeGeoPath(planetModel, width, points); - } catch (IllegalArgumentException e) { - // This is what happens when we create a shape that is invalid. Although it is conceivable that there are cases where - // the exception is thrown incorrectly, we aren't going to be able to do that in this random test. - continue; - } - } + case 3: + { + // Paths + final int pointCount = random().nextInt(5) + 1; + final double width = toRadians(random().nextInt(89) + 1); + final GeoPoint[] points = new GeoPoint[pointCount]; + for (int i = 0; i < pointCount; i++) { + points[i] = + new GeoPoint( + planetModel, + toRadians(GeoTestUtil.nextLatitude()), + toRadians(GeoTestUtil.nextLongitude())); + } + try { + return GeoPathFactory.makeGeoPath(planetModel, width, points); + } catch (IllegalArgumentException e) { + // This is what happens when we create a shape that is invalid. Although it is + // conceivable that there are cases where + // the exception is thrown incorrectly, we aren't going to be able to do that in this + // random test. + continue; + } + } - default: - throw new IllegalStateException("Unexpected shape type"); + default: + throw new IllegalStateException("Unexpected shape type"); } } } - private static void verify(double[] lats, double[] lons, final PlanetModel planetModel) throws Exception { + private static void verify(double[] lats, double[] lons, final PlanetModel planetModel) + throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); GeoPoint[] points = new GeoPoint[lats.length]; GeoPoint[] unquantizedPoints = new GeoPoint[lats.length]; - + // Pre-quantize all lat/lons: - for(int i=0;i 0 && random().nextInt(100) == 42) { int idToDelete = random().nextInt(id); - w.deleteDocuments(new Term("id", ""+idToDelete)); + w.deleteDocuments(new Term("id", "" + idToDelete)); deleted.add(idToDelete); if (VERBOSE) { System.err.println(" delete id=" + idToDelete); @@ -804,7 +1006,7 @@ public class TestGeo3DPoint extends LuceneTestCase { final int iters = atLeast(100); - for (int iter=0;iter Math.PI) { maximumAngle = Math.PI; } - // The minimum angle is MINIMUM_EDGE_ANGLE, or enough to be sure nobody afterwards needs more than - // 180 degrees. And since we have three points to start with, we already know that. + // The minimum angle is MINIMUM_EDGE_ANGLE, or enough to be sure nobody afterwards needs + // more than 180 degrees. And since we have three points to start with, we already know + // that. final double minimumAngle = MINIMUM_EDGE_ANGLE; // Pick the angle final double angle = random().nextDouble() * (maximumAngle - minimumAngle) + minimumAngle; @@ -981,7 +1242,8 @@ public class TestGeo3DPoint extends LuceneTestCase { accumulatedAngle += angle; } // Pick the arc distance randomly; not quite the full range though - arcDistance[i] = random().nextDouble() * (Math.PI * 0.5 - MINIMUM_ARC_ANGLE) + MINIMUM_ARC_ANGLE; + arcDistance[i] = + random().nextDouble() * (Math.PI * 0.5 - MINIMUM_ARC_ANGLE) + MINIMUM_ARC_ANGLE; } if (clockwiseDesired) { // Reverse the signs @@ -989,19 +1251,21 @@ public class TestGeo3DPoint extends LuceneTestCase { angles[i] = -angles[i]; } } - - // Now, use the pole's information plus angles and arcs to create GeoPoints in the right order. + + // Now, use the pole's information plus angles and arcs to create GeoPoints in the right + // order. final List polyPoints = convertToPoints(pm, pole, angles, arcDistance); - - // Next, do some holes. No more than 2 of these. The poles for holes must always be within the polygon, so we're - // going to use Geo3D to help us select those given the points we just made. - - final int holeCount = createHoles?TestUtil.nextInt(random(), 0, 2):0; - + + // Next, do some holes. No more than 2 of these. The poles for holes must always be within + // the polygon, so we're going to use Geo3D to help us select those given the points we just + // made. + + final int holeCount = createHoles ? TestUtil.nextInt(random(), 0, 2) : 0; + final List holeList = new ArrayList<>(); - + /* Hole logic is broken and needs rethinking - + // Create the geo3d polygon, so we can test out our poles. final GeoPolygon poly; try { @@ -1010,7 +1274,7 @@ public class TestGeo3DPoint extends LuceneTestCase { // This is what happens when three adjacent points are colinear, so try again. continue; } - + for (int i = 0; i < holeCount; i++) { // Choose a pole. The poly has to be within the polygon, but it also cannot be on the polygon edge. // If we can't find a good pole we have to give it up and not do the hole. @@ -1037,13 +1301,13 @@ public class TestGeo3DPoint extends LuceneTestCase { } } */ - + final Polygon[] holes = holeList.toArray(new Polygon[0]); - + // Finally, build the polygon and return it final double[] lats = new double[polyPoints.size() + 1]; final double[] lons = new double[polyPoints.size() + 1]; - + for (int i = 0; i < polyPoints.size(); i++) { lats[i] = polyPoints.get(i).getLatitude() * 180.0 / Math.PI; lons[i] = polyPoints.get(i).getLongitude() * 180.0 / Math.PI; @@ -1053,36 +1317,51 @@ public class TestGeo3DPoint extends LuceneTestCase { return new Polygon(lats, lons, holes); } } - - protected static List convertToPoints(final PlanetModel pm, final GeoPoint pole, final double[] angles, final double[] arcDistances) { - // To do the point rotations, we need the sine and cosine of the pole latitude and longitude. Get it here for performance. + + protected static List convertToPoints( + final PlanetModel pm, + final GeoPoint pole, + final double[] angles, + final double[] arcDistances) { + // To do the point rotations, we need the sine and cosine of the pole latitude and longitude. + // Get it here for performance. final double sinLatitude = Math.sin(pole.getLatitude()); final double cosLatitude = Math.cos(pole.getLatitude()); final double sinLongitude = Math.sin(pole.getLongitude()); final double cosLongitude = Math.cos(pole.getLongitude()); final List rval = new ArrayList<>(); for (int i = 0; i < angles.length; i++) { - rval.add(createPoint(pm, angles[i], arcDistances[i], sinLatitude, cosLatitude, sinLongitude, cosLongitude)); + rval.add( + createPoint( + pm, + angles[i], + arcDistances[i], + sinLatitude, + cosLatitude, + sinLongitude, + cosLongitude)); } return rval; } - - protected static GeoPoint createPoint(final PlanetModel pm, - final double angle, - final double arcDistance, - final double sinLatitude, - final double cosLatitude, - final double sinLongitude, - final double cosLongitude) { + + protected static GeoPoint createPoint( + final PlanetModel pm, + final double angle, + final double arcDistance, + final double sinLatitude, + final double cosLatitude, + final double sinLongitude, + final double cosLongitude) { // From the angle and arc distance, convert to (x,y,z) in unit space. - // We want the perspective to be looking down the x axis. The "angle" measurement is thus in the Y-Z plane. - // The arcdistance is in X. + // We want the perspective to be looking down the x axis. The "angle" measurement is thus in + // the Y-Z plane. The arcdistance is in X. final double x = Math.cos(arcDistance); final double yzScale = Math.sin(arcDistance); final double y = Math.cos(angle) * yzScale; final double z = Math.sin(angle) * yzScale; // Now, rotate coordinates so that we shift everything from pole = x-axis to actual coordinates. - // This transformation should take the point (1,0,0) and transform it to the pole's actual (x,y,z) coordinates. + // This transformation should take the point (1,0,0) and transform it to the pole's actual + // (x,y,z) coordinates. // Coordinate rotation formula: // x1 = x0 cos T - y0 sin T // y1 = x0 sin T + y0 cos T @@ -1094,7 +1373,8 @@ public class TestGeo3DPoint extends LuceneTestCase { // y2 = y1 // z2 = - x1 sin al + z1 cos al // So, we reverse the order of the transformations, AND we transform backwards. - // Transforming backwards means using these identities: sin(-angle) = -sin(angle), cos(-angle) = cos(angle) + // Transforming backwards means using these identities: sin(-angle) = -sin(angle), cos(-angle) = + // cos(angle) // So: // x1 = x0 cos al - z0 sin al // y1 = y0 @@ -1111,12 +1391,14 @@ public class TestGeo3DPoint extends LuceneTestCase { // Scale to put the point on the surface return pm.createSurfacePoint(x2, y2, z2); } - - protected static boolean verifyPolygon(final PlanetModel pm, final Polygon polygon, final GeoPolygon outsidePolygon) { - // Each point in the new poly should be inside the outside poly, and each edge should not intersect the outside poly edge + + protected static boolean verifyPolygon( + final PlanetModel pm, final Polygon polygon, final GeoPolygon outsidePolygon) { + // Each point in the new poly should be inside the outside poly, and each edge should not + // intersect the outside poly edge final double[] lats = polygon.getPolyLats(); final double[] lons = polygon.getPolyLons(); - final List polyPoints = new ArrayList<>(lats.length-1); + final List polyPoints = new ArrayList<>(lats.length - 1); for (int i = 0; i < lats.length - 1; i++) { final GeoPoint newPoint = new GeoPoint(pm, toRadians(lats[i]), toRadians(lons[i])); if (!outsidePolygon.isWithin(newPoint)) { @@ -1124,16 +1406,17 @@ public class TestGeo3DPoint extends LuceneTestCase { } polyPoints.add(newPoint); } - // We don't need to construct the world to find intersections -- just the bordering planes. + // We don't need to construct the world to find intersections -- just the bordering planes. for (int planeIndex = 0; planeIndex < polyPoints.size(); planeIndex++) { final GeoPoint startPoint = polyPoints.get(planeIndex); final GeoPoint endPoint = polyPoints.get(legalIndex(planeIndex + 1, polyPoints.size())); - final GeoPoint beforeStartPoint = polyPoints.get(legalIndex(planeIndex - 1, polyPoints.size())); + final GeoPoint beforeStartPoint = + polyPoints.get(legalIndex(planeIndex - 1, polyPoints.size())); final GeoPoint afterEndPoint = polyPoints.get(legalIndex(planeIndex + 2, polyPoints.size())); final SidedPlane beforePlane = new SidedPlane(endPoint, beforeStartPoint, startPoint); final SidedPlane afterPlane = new SidedPlane(startPoint, endPoint, afterEndPoint); final Plane plane = new Plane(startPoint, endPoint); - + // Check for intersections!! if (outsidePolygon.intersects(plane, null, beforePlane, afterPlane)) { return false; @@ -1141,7 +1424,7 @@ public class TestGeo3DPoint extends LuceneTestCase { } return true; } - + protected static int legalIndex(int index, int size) { if (index >= size) { index -= size; @@ -1158,22 +1441,39 @@ public class TestGeo3DPoint extends LuceneTestCase { final double ENCODING_TOLERANCE = planetModel.DECODE; int iters = atLeast(10000); - for(int iter=0;iter planetMax) { @@ -1380,10 +1694,19 @@ public class TestGeo3DPoint extends LuceneTestCase { private int targetStackUpto; final StringBuilder b; - // In the first phase, we always return CROSSES to do a full scan of the BKD tree to see which leaf block the document lives in + // In the first phase, we always return CROSSES to do a full scan of the BKD tree to see which + // leaf block the document lives in boolean firstPhase = true; - public ExplainingVisitor(GeoShape shape, GeoPoint targetDocPoint, GeoPoint scaledDocPoint, IntersectVisitor in, int targetDocID, int numDims, int bytesPerDim, StringBuilder b) { + public ExplainingVisitor( + GeoShape shape, + GeoPoint targetDocPoint, + GeoPoint scaledDocPoint, + IntersectVisitor in, + int targetDocID, + int numDims, + int bytesPerDim, + StringBuilder b) { this.shape = shape; this.targetDocPoint = targetDocPoint; this.scaledDocPoint = scaledDocPoint; @@ -1420,7 +1743,7 @@ public class TestGeo3DPoint extends LuceneTestCase { assert stackToTargetDoc == null; stackToTargetDoc = new ArrayList<>(stack); b.append(" full BKD path to target doc:\n"); - for(Cell cell : stack) { + for (Cell cell : stack) { b.append(" " + cell + "\n"); } } @@ -1428,7 +1751,8 @@ public class TestGeo3DPoint extends LuceneTestCase { if (docID == targetDocID) { double x = Geo3DPoint.decodeDimension(packedValue, 0, shape.getPlanetModel()); double y = Geo3DPoint.decodeDimension(packedValue, Integer.BYTES, shape.getPlanetModel()); - double z = Geo3DPoint.decodeDimension(packedValue, 2 * Integer.BYTES, shape.getPlanetModel()); + double z = + Geo3DPoint.decodeDimension(packedValue, 2 * Integer.BYTES, shape.getPlanetModel()); b.append("leaf visit docID=" + docID + " x=" + x + " y=" + y + " z=" + z + "\n"); in.visit(docID, packedValue); } @@ -1438,27 +1762,34 @@ public class TestGeo3DPoint extends LuceneTestCase { @Override public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { Cell cell = new Cell(minPackedValue, maxPackedValue); - //System.out.println("compare: " + cell); + // System.out.println("compare: " + cell); - // TODO: this is a bit hacky, having to reverse-engineer where we are in the BKD tree's recursion ... but it's the lesser evil vs e.g. - // polluting this visitor API, or implementing this "under the hood" in BKDReader instead? + // TODO: this is a bit hacky, having to reverse-engineer where we are in the BKD tree's + // recursion ... but it's the lesser evil vs e.g. polluting this visitor API, or + // implementing this "under the hood" in BKDReader instead? if (firstPhase) { // Pop stack: - while (stack.size() > 0 && stack.get(stack.size()-1).contains(cell) == false) { - stack.remove(stack.size()-1); - //System.out.println(" pop"); + while (stack.size() > 0 && stack.get(stack.size() - 1).contains(cell) == false) { + stack.remove(stack.size() - 1); + // System.out.println(" pop"); } // Push stack: stack.add(cell); - //System.out.println(" push"); + // System.out.println(" push"); return Relation.CELL_CROSSES_QUERY; } else { Relation result = in.compare(minPackedValue, maxPackedValue); - if (targetStackUpto < stackToTargetDoc.size() && cell.equals(stackToTargetDoc.get(targetStackUpto))) { - b.append(" on cell " + stackToTargetDoc.get(targetStackUpto) + ", wrapped visitor returned " + result + "\n"); + if (targetStackUpto < stackToTargetDoc.size() + && cell.equals(stackToTargetDoc.get(targetStackUpto))) { + b.append( + " on cell " + + stackToTargetDoc.get(targetStackUpto) + + ", wrapped visitor returned " + + result + + "\n"); targetStackUpto++; } return result; @@ -1476,14 +1807,28 @@ public class TestGeo3DPoint extends LuceneTestCase { /** Returns true if this cell fully contains the other one */ public boolean contains(Cell other) { - for(int dim=0;dim 0) { + if (Arrays.compareUnsigned( + other.maxPackedValue, + offset, + offset + bytesPerDim, + maxPackedValue, + offset, + offset + bytesPerDim) + > 0) { return false; } } @@ -1493,39 +1838,71 @@ public class TestGeo3DPoint extends LuceneTestCase { @Override public String toString() { - double xMin = Geo3DUtil.decodeValueFloor(NumericUtils.sortableBytesToInt(minPackedValue, 0), shape.getPlanetModel()); - double xMax = Geo3DUtil.decodeValueCeil(NumericUtils.sortableBytesToInt(maxPackedValue, 0), shape.getPlanetModel()); - double yMin = Geo3DUtil.decodeValueFloor(NumericUtils.sortableBytesToInt(minPackedValue, 1 * Integer.BYTES), shape.getPlanetModel()); - double yMax = Geo3DUtil.decodeValueCeil(NumericUtils.sortableBytesToInt(maxPackedValue, 1 * Integer.BYTES), shape.getPlanetModel()); - double zMin = Geo3DUtil.decodeValueFloor(NumericUtils.sortableBytesToInt(minPackedValue, 2 * Integer.BYTES), shape.getPlanetModel()); - double zMax = Geo3DUtil.decodeValueCeil(NumericUtils.sortableBytesToInt(maxPackedValue, 2 * Integer.BYTES), shape.getPlanetModel()); - final XYZSolid xyzSolid = XYZSolidFactory.makeXYZSolid(shape.getPlanetModel(), xMin, xMax, yMin, yMax, zMin, zMax); + double xMin = + Geo3DUtil.decodeValueFloor( + NumericUtils.sortableBytesToInt(minPackedValue, 0), shape.getPlanetModel()); + double xMax = + Geo3DUtil.decodeValueCeil( + NumericUtils.sortableBytesToInt(maxPackedValue, 0), shape.getPlanetModel()); + double yMin = + Geo3DUtil.decodeValueFloor( + NumericUtils.sortableBytesToInt(minPackedValue, 1 * Integer.BYTES), + shape.getPlanetModel()); + double yMax = + Geo3DUtil.decodeValueCeil( + NumericUtils.sortableBytesToInt(maxPackedValue, 1 * Integer.BYTES), + shape.getPlanetModel()); + double zMin = + Geo3DUtil.decodeValueFloor( + NumericUtils.sortableBytesToInt(minPackedValue, 2 * Integer.BYTES), + shape.getPlanetModel()); + double zMax = + Geo3DUtil.decodeValueCeil( + NumericUtils.sortableBytesToInt(maxPackedValue, 2 * Integer.BYTES), + shape.getPlanetModel()); + final XYZSolid xyzSolid = + XYZSolidFactory.makeXYZSolid( + shape.getPlanetModel(), xMin, xMax, yMin, yMax, zMin, zMax); final int relationship = xyzSolid.getRelationship(shape); final boolean pointWithinCell = xyzSolid.isWithin(targetDocPoint); final boolean scaledWithinCell = xyzSolid.isWithin(scaledDocPoint); final String relationshipString; switch (relationship) { - case GeoArea.CONTAINS: - relationshipString = "CONTAINS"; - break; - case GeoArea.WITHIN: - relationshipString = "WITHIN"; - break; - case GeoArea.OVERLAPS: - relationshipString = "OVERLAPS"; - break; - case GeoArea.DISJOINT: - relationshipString = "DISJOINT"; - break; - default: - relationshipString = "UNKNOWN"; - break; + case GeoArea.CONTAINS: + relationshipString = "CONTAINS"; + break; + case GeoArea.WITHIN: + relationshipString = "WITHIN"; + break; + case GeoArea.OVERLAPS: + relationshipString = "OVERLAPS"; + break; + case GeoArea.DISJOINT: + relationshipString = "DISJOINT"; + break; + default: + relationshipString = "UNKNOWN"; + break; } - return "Cell(x=" + xMin + " TO " + xMax + " y=" + yMin + " TO " + yMax + " z=" + zMin + " TO " + zMax + - "); Shape relationship = "+relationshipString+ - "; Quantized point within cell = "+pointWithinCell+ - "; Unquantized point within cell = "+scaledWithinCell; + return "Cell(x=" + + xMin + + " TO " + + xMax + + " y=" + + yMin + + " TO " + + yMax + + " z=" + + zMin + + " TO " + + zMax + + "); Shape relationship = " + + relationshipString + + "; Quantized point within cell = " + + pointWithinCell + + "; Unquantized point within cell = " + + scaledWithinCell; } @Override @@ -1535,7 +1912,8 @@ public class TestGeo3DPoint extends LuceneTestCase { } Cell otherCell = (Cell) other; - return Arrays.equals(minPackedValue, otherCell.minPackedValue) && Arrays.equals(maxPackedValue, otherCell.maxPackedValue); + return Arrays.equals(minPackedValue, otherCell.minPackedValue) + && Arrays.equals(maxPackedValue, otherCell.maxPackedValue); } @Override @@ -1545,11 +1923,18 @@ public class TestGeo3DPoint extends LuceneTestCase { } } - public static String explain(String fieldName, GeoShape shape, GeoPoint targetDocPoint, GeoPoint scaledDocPoint, IndexReader reader, int docID) throws Exception { + public static String explain( + String fieldName, + GeoShape shape, + GeoPoint targetDocPoint, + GeoPoint scaledDocPoint, + IndexReader reader, + int docID) + throws Exception { final XYZBounds bounds = new XYZBounds(); shape.getBounds(bounds); - + // First find the leaf reader that owns this doc: int subIndex = ReaderUtil.subIndex(docID, reader.leaves()); LeafReader leafReader = reader.leaves().get(subIndex).reader(); @@ -1558,9 +1943,16 @@ public class TestGeo3DPoint extends LuceneTestCase { b.append("target is in leaf " + leafReader + " of full reader " + reader + "\n"); DocIdSetBuilder hits = new DocIdSetBuilder(leafReader.maxDoc()); - ExplainingVisitor visitor = new ExplainingVisitor(shape, targetDocPoint, scaledDocPoint, - new PointInShapeIntersectVisitor(hits, shape, bounds), - docID - reader.leaves().get(subIndex).docBase, 3, Integer.BYTES, b); + ExplainingVisitor visitor = + new ExplainingVisitor( + shape, + targetDocPoint, + scaledDocPoint, + new PointInShapeIntersectVisitor(hits, shape, bounds), + docID - reader.leaves().get(subIndex).docBase, + 3, + Integer.BYTES, + b); // Do first phase, where we just figure out the "path" that leads to the target docID: leafReader.getPointValues(fieldName).intersect(visitor); diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/Geo3DUtil.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/Geo3DUtil.java index a4b25d09018..0000a96c823 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/Geo3DUtil.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/Geo3DUtil.java @@ -19,10 +19,10 @@ package org.apache.lucene.spatial3d.geom; class Geo3DUtil { /** How many radians are in one degree */ - final static double RADIANS_PER_DEGREE = Math.PI / 180.0; + static final double RADIANS_PER_DEGREE = Math.PI / 180.0; /** How many degrees in a radian */ - final static double DEGREES_PER_RADIAN = 180.0 / Math.PI; - + static final double DEGREES_PER_RADIAN = 180.0 / Math.PI; + /** Converts degress to radians */ static double fromDegrees(final double degrees) { return degrees * RADIANS_PER_DEGREE; diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/RandomGeo3dShapeGenerator.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/RandomGeo3dShapeGenerator.java index 153571f9424..ca89e1b9057 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/RandomGeo3dShapeGenerator.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/RandomGeo3dShapeGenerator.java @@ -1,132 +1,128 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.lucene.spatial3d.geom; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.lucene.util.LuceneTestCase; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble; - /** - * Class for generating random Geo3dShapes. They can be generated under - * given constraints which are expressed as a shape and a relationship. - * - * note that convexity for polygons is defined as polygons that contains - * antipodal points, otherwise they are convex. Internally they can be - * created using GeoConvexPolygons and GeoConcavePolygons. + * Class for generating random Geo3dShapes. They can be generated under given constraints which are + * expressed as a shape and a relationship. * + *

    note that convexity for polygons is defined as polygons that contains antipodal points, + * otherwise they are convex. Internally they can be created using GeoConvexPolygons and + * GeoConcavePolygons. */ public class RandomGeo3dShapeGenerator extends LuceneTestCase { /* Max num of iterations to find right shape under given constrains */ - final private static int MAX_SHAPE_ITERATIONS = 20; + private static final int MAX_SHAPE_ITERATIONS = 20; /* Max num of iterations to find right point under given constrains */ - final private static int MAX_POINT_ITERATIONS = 1000; + private static final int MAX_POINT_ITERATIONS = 1000; /* Supported shapes */ - final protected static int CONVEX_POLYGON = 0; - final protected static int CONVEX_POLYGON_WITH_HOLES = 1; - final protected static int CONCAVE_POLYGON = 2; - final protected static int CONCAVE_POLYGON_WITH_HOLES = 3; - final protected static int COMPLEX_POLYGON = 4; - final protected static int CIRCLE = 5; - final protected static int RECTANGLE = 6; - final protected static int PATH = 7; - final protected static int COLLECTION = 8; - final protected static int POINT = 9; - final protected static int LINE = 10; - final protected static int EXACT_CIRCLE = 11; + protected static final int CONVEX_POLYGON = 0; + protected static final int CONVEX_POLYGON_WITH_HOLES = 1; + protected static final int CONCAVE_POLYGON = 2; + protected static final int CONCAVE_POLYGON_WITH_HOLES = 3; + protected static final int COMPLEX_POLYGON = 4; + protected static final int CIRCLE = 5; + protected static final int RECTANGLE = 6; + protected static final int PATH = 7; + protected static final int COLLECTION = 8; + protected static final int POINT = 9; + protected static final int LINE = 10; + protected static final int EXACT_CIRCLE = 11; /* Helper shapes for generating constraints whch are just three sided polygons */ - final protected static int CONVEX_SIMPLE_POLYGON = 500; - final protected static int CONCAVE_SIMPLE_POLYGON = 501; + protected static final int CONVEX_SIMPLE_POLYGON = 500; + protected static final int CONCAVE_SIMPLE_POLYGON = 501; /** - * Method that returns a random generated Planet model from the supported - * Planet models. currently SPHERE and WGS84 + * Method that returns a random generated Planet model from the supported Planet models. currently + * SPHERE and WGS84 * * @return a random generated Planet model */ public PlanetModel randomPlanetModel() { final int shapeType = random().nextInt(2); switch (shapeType) { - case 0: { - return PlanetModel.SPHERE; - } - case 1: { - return PlanetModel.WGS84; - } + case 0: + { + return PlanetModel.SPHERE; + } + case 1: + { + return PlanetModel.WGS84; + } default: throw new IllegalStateException("Unexpected planet model"); } } /** - * Method that returns a random generated a random Shape code from all - * supported shapes. + * Method that returns a random generated a random Shape code from all supported shapes. * * @return a random generated shape code */ - public int randomShapeType(){ + public int randomShapeType() { return random().nextInt(12); } /** - * Method that returns a random generated GeoAreaShape code from all - * supported GeoAreaShapes. + * Method that returns a random generated GeoAreaShape code from all supported GeoAreaShapes. * - * We are removing Collections because it is difficult to create shapes - * with properties in some cases. + *

    We are removing Collections because it is difficult to create shapes with properties in some + * cases. * * @return a random generated polygon code */ - public int randomGeoAreaShapeType(){ + public int randomGeoAreaShapeType() { return random().nextInt(12); } /** - * Method that returns a random generated a random Shape code from all - * convex supported shapes. + * Method that returns a random generated a random Shape code from all convex supported shapes. * * @return a random generated convex shape code */ - public int randomConvexShapeType(){ + public int randomConvexShapeType() { int shapeType = randomShapeType(); - while (isConcave(shapeType)){ + while (isConcave(shapeType)) { shapeType = randomShapeType(); } return shapeType; } /** - * Method that returns a random generated a random Shape code from all - * concave supported shapes. + * Method that returns a random generated a random Shape code from all concave supported shapes. * * @return a random generated concave shape code */ - public int randomConcaveShapeType(){ + public int randomConcaveShapeType() { int shapeType = randomShapeType(); - while (!isConcave(shapeType)){ + while (!isConcave(shapeType)) { shapeType = randomShapeType(); } return shapeType; @@ -137,7 +133,7 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * * @return true if the shape represented by the code is concave */ - public boolean isConcave(int shapeType){ + public boolean isConcave(int shapeType) { return (shapeType == CONCAVE_POLYGON); } @@ -146,7 +142,7 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * * @return an empty Constraints object */ - public Constraints getEmptyConstraint(){ + public Constraints getEmptyConstraint() { return new Constraints(); } @@ -165,8 +161,8 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated GeoPoint under given constraints. Returns - * NULL if it cannot find a point under the given constraints. + * Method that returns a random generated GeoPoint under given constraints. Returns NULL if it + * cannot find a point under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -175,13 +171,13 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { public GeoPoint randomGeoPoint(PlanetModel planetModel, Constraints constraints) { int iterations = 0; while (iterations < MAX_POINT_ITERATIONS) { - double lat = randomDouble() * Math.PI/2; + double lat = randomDouble() * Math.PI / 2; if (random().nextBoolean()) { - lat = (-1)*lat; + lat = (-1) * lat; } - double lon = randomDouble() * Math.PI; + double lon = randomDouble() * Math.PI; if (random().nextBoolean()) { - lon = (-1)*lon; + lon = (-1) * lon; } iterations++; GeoPoint point = new GeoPoint(planetModel, lat, lon); @@ -199,25 +195,26 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * @param planetModel The planet model. * @return The random generated GeoAreaShape. */ - public GeoAreaShape randomGeoAreaShape(int shapeType, PlanetModel planetModel){ + public GeoAreaShape randomGeoAreaShape(int shapeType, PlanetModel planetModel) { GeoAreaShape geoAreaShape = null; - while (geoAreaShape == null){ - geoAreaShape = randomGeoAreaShape(shapeType,planetModel,new Constraints()); + while (geoAreaShape == null) { + geoAreaShape = randomGeoAreaShape(shapeType, planetModel, new Constraints()); } return geoAreaShape; } /** - * Method that returns a random generated GeoAreaShape under given constraints. Returns - * NULL if it cannot build the GeoAreaShape under the given constraints. + * Method that returns a random generated GeoAreaShape under given constraints. Returns NULL if it + * cannot build the GeoAreaShape under the given constraints. * * @param shapeType The GeoAreaShape code. * @param planetModel The planet model. * @param constraints The given constraints. * @return The random generated GeoAreaShape. */ - public GeoAreaShape randomGeoAreaShape(int shapeType, PlanetModel planetModel, Constraints constraints){ - return (GeoAreaShape)randomGeoShape(shapeType, planetModel, constraints); + public GeoAreaShape randomGeoAreaShape( + int shapeType, PlanetModel planetModel, Constraints constraints) { + return (GeoAreaShape) randomGeoShape(shapeType, planetModel, constraints); } /** @@ -227,91 +224,107 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * @param planetModel The planet model. * @return The random generated GeoShape. */ - public GeoShape randomGeoShape(int shapeType, PlanetModel planetModel){ + public GeoShape randomGeoShape(int shapeType, PlanetModel planetModel) { GeoShape geoShape = null; - while (geoShape == null){ - geoShape = randomGeoShape(shapeType,planetModel,new Constraints()); + while (geoShape == null) { + geoShape = randomGeoShape(shapeType, planetModel, new Constraints()); } return geoShape; } /** - * Method that returns a random generated GeoShape under given constraints. Returns - * NULL if it cannot build the GeoShape under the given constraints. + * Method that returns a random generated GeoShape under given constraints. Returns NULL if it + * cannot build the GeoShape under the given constraints. * * @param shapeType The polygon code. * @param planetModel The planet model. * @param constraints The given constraints. * @return The random generated GeoShape. */ - public GeoShape randomGeoShape(int shapeType, PlanetModel planetModel, Constraints constraints){ + public GeoShape randomGeoShape(int shapeType, PlanetModel planetModel, Constraints constraints) { switch (shapeType) { - case CONVEX_POLYGON: { - return convexPolygon(planetModel, constraints); - } - case CONVEX_POLYGON_WITH_HOLES: { - return convexPolygonWithHoles(planetModel, constraints); - } - case CONCAVE_POLYGON: { - return concavePolygon(planetModel, constraints); - } - case CONCAVE_POLYGON_WITH_HOLES: { - return concavePolygonWithHoles(planetModel, constraints); - } - case COMPLEX_POLYGON: { - return complexPolygon(planetModel, constraints); - } - case CIRCLE: { - return circle(planetModel, constraints); - } - case RECTANGLE: { - return rectangle(planetModel, constraints); - } - case PATH: { - return path(planetModel, constraints); - } - case COLLECTION: { - return collection(planetModel, constraints); - } - case POINT: { - return point(planetModel, constraints); - } - case LINE: { - return line(planetModel, constraints); - } - case CONVEX_SIMPLE_POLYGON: { - return simpleConvexPolygon(planetModel, constraints); - } - case CONCAVE_SIMPLE_POLYGON: { - return concaveSimplePolygon(planetModel, constraints); - } - case EXACT_CIRCLE: { - return exactCircle(planetModel, constraints); - } + case CONVEX_POLYGON: + { + return convexPolygon(planetModel, constraints); + } + case CONVEX_POLYGON_WITH_HOLES: + { + return convexPolygonWithHoles(planetModel, constraints); + } + case CONCAVE_POLYGON: + { + return concavePolygon(planetModel, constraints); + } + case CONCAVE_POLYGON_WITH_HOLES: + { + return concavePolygonWithHoles(planetModel, constraints); + } + case COMPLEX_POLYGON: + { + return complexPolygon(planetModel, constraints); + } + case CIRCLE: + { + return circle(planetModel, constraints); + } + case RECTANGLE: + { + return rectangle(planetModel, constraints); + } + case PATH: + { + return path(planetModel, constraints); + } + case COLLECTION: + { + return collection(planetModel, constraints); + } + case POINT: + { + return point(planetModel, constraints); + } + case LINE: + { + return line(planetModel, constraints); + } + case CONVEX_SIMPLE_POLYGON: + { + return simpleConvexPolygon(planetModel, constraints); + } + case CONCAVE_SIMPLE_POLYGON: + { + return concaveSimplePolygon(planetModel, constraints); + } + case EXACT_CIRCLE: + { + return exactCircle(planetModel, constraints); + } default: throw new IllegalStateException("Unexpected shape type"); } } /** - * Method that returns a random generated a GeoPointShape under given constraints. Returns - * NULL if it cannot build the GeoCircle under the given constraints. + * Method that returns a random generated a GeoPointShape under given constraints. Returns NULL if + * it cannot build the GeoCircle under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. * @return The random generated GeoPointShape. */ - private GeoPointShape point(PlanetModel planetModel , Constraints constraints) { - int iterations=0; + private GeoPointShape point(PlanetModel planetModel, Constraints constraints) { + int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; final GeoPoint point = randomGeoPoint(planetModel, constraints); - if (point == null){ + if (point == null) { continue; } try { - GeoPointShape pointShape = GeoPointShapeFactory.makeGeoPointShape(planetModel, point.getLatitude(), point.getLongitude()); + GeoPointShape pointShape = + GeoPointShapeFactory.makeGeoPointShape( + planetModel, point.getLatitude(), point.getLongitude()); if (!constraints.valid(pointShape)) { continue; } @@ -324,25 +337,27 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a GeoCircle under given constraints. Returns - * NULL if it cannot build the GeoCircle under the given constraints. + * Method that returns a random generated a GeoCircle under given constraints. Returns NULL if it + * cannot build the GeoCircle under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. * @return The random generated GeoCircle. */ - private GeoCircle circle(PlanetModel planetModel , Constraints constraints) { - int iterations=0; + private GeoCircle circle(PlanetModel planetModel, Constraints constraints) { + int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; final GeoPoint center = randomGeoPoint(planetModel, constraints); - if (center == null){ + if (center == null) { continue; } final double radius = randomCutoffAngle(); try { - GeoCircle circle = GeoCircleFactory.makeGeoCircle(planetModel, center.getLatitude(), center.getLongitude(), radius); + GeoCircle circle = + GeoCircleFactory.makeGeoCircle( + planetModel, center.getLatitude(), center.getLongitude(), radius); if (!constraints.valid(circle)) { continue; } @@ -355,26 +370,28 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a GeoCircle under given constraints. Returns - * NULL if it cannot build the GeoCircle under the given constraints. + * Method that returns a random generated a GeoCircle under given constraints. Returns NULL if it + * cannot build the GeoCircle under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. * @return The random generated GeoCircle. */ - private GeoCircle exactCircle(PlanetModel planetModel , Constraints constraints) { - int iterations=0; + private GeoCircle exactCircle(PlanetModel planetModel, Constraints constraints) { + int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; final GeoPoint center = randomGeoPoint(planetModel, constraints); - if (center == null){ + if (center == null) { continue; } final double radius = randomCutoffAngle(); - final int pow = random().nextInt(10) +3; + final int pow = random().nextInt(10) + 3; final double accuracy = random().nextDouble() * Math.pow(10, (-1) * pow); try { - GeoCircle circle = GeoCircleFactory.makeExactGeoCircle(planetModel, center.getLatitude(), center.getLongitude(), radius, accuracy); + GeoCircle circle = + GeoCircleFactory.makeExactGeoCircle( + planetModel, center.getLatitude(), center.getLongitude(), radius, accuracy); if (!constraints.valid(circle)) { continue; } @@ -387,8 +404,8 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a GeoBBox under given constraints. Returns - * NULL if it cannot build the GeoBBox under the given constraints. + * Method that returns a random generated a GeoBBox under given constraints. Returns NULL if it + * cannot build the GeoBBox under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -400,11 +417,11 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; final GeoPoint point1 = randomGeoPoint(planetModel, constraints); - if (point1 == null){ + if (point1 == null) { continue; } final GeoPoint point2 = randomGeoPoint(planetModel, constraints); - if (point2 == null){ + if (point2 == null) { continue; } @@ -427,8 +444,8 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated degenerate GeoPath under given constraints. Returns - * NULL if it cannot build the degenerate GeoPath under the given constraints. + * Method that returns a random generated degenerate GeoPath under given constraints. Returns NULL + * if it cannot build the degenerate GeoPath under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -438,13 +455,15 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; - int vertexCount = random().nextInt(2) + 2; + int vertexCount = random().nextInt(2) + 2; List geoPoints = points(vertexCount, planetModel, constraints); - if (geoPoints.size() < 2){ + if (geoPoints.size() < 2) { continue; } try { - GeoPath path = GeoPathFactory.makeGeoPath(planetModel, 0, geoPoints.toArray(new GeoPoint[geoPoints.size()])); + GeoPath path = + GeoPathFactory.makeGeoPath( + planetModel, 0, geoPoints.toArray(new GeoPoint[geoPoints.size()])); if (!constraints.valid(path)) { continue; } @@ -457,8 +476,8 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a GeoPath under given constraints. Returns - * NULL if it cannot build the GeoPath under the given constraints. + * Method that returns a random generated a GeoPath under given constraints. Returns NULL if it + * cannot build the GeoPath under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -468,14 +487,16 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; - int vertexCount = random().nextInt(2) + 2; + int vertexCount = random().nextInt(2) + 2; List geoPoints = points(vertexCount, planetModel, constraints); - if (geoPoints.size() < 2){ + if (geoPoints.size() < 2) { continue; } - double width =randomCutoffAngle(); + double width = randomCutoffAngle(); try { - GeoPath path = GeoPathFactory.makeGeoPath(planetModel, width, geoPoints.toArray(new GeoPoint[geoPoints.size()])); + GeoPath path = + GeoPathFactory.makeGeoPath( + planetModel, width, geoPoints.toArray(new GeoPoint[geoPoints.size()])); if (!constraints.valid(path)) { continue; } @@ -488,8 +509,8 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a GeoCompositeMembershipShape under given constraints. Returns - * NULL if it cannot build the GGeoCompositeMembershipShape under the given constraints. + * Method that returns a random generated a GeoCompositeMembershipShape under given constraints. + * Returns NULL if it cannot build the GGeoCompositeMembershipShape under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -499,15 +520,15 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; - int numberShapes = random().nextInt(3) + 2; + int numberShapes = random().nextInt(3) + 2; GeoCompositeAreaShape collection = new GeoCompositeAreaShape(planetModel); - for(int i=0; i geoPoints = points(vertexCount,planetModel, constraints); - if (geoPoints.size() < 3){ + List geoPoints = points(vertexCount, planetModel, constraints); + if (geoPoints.size() < 3) { continue; } List orderedGeoPoints = orderPoints(geoPoints); @@ -547,8 +568,8 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a convex GeoPolygon with holes under given constraints. Returns - * NULL if it cannot build the GeoPolygon with holes under the given constraints. + * Method that returns a random generated a convex GeoPolygon with holes under given constraints. + * Returns NULL if it cannot build the GeoPolygon with holes under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -559,38 +580,39 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; - List geoPoints = points(vertexCount,planetModel, constraints); - if (geoPoints.size() < 3){ + List geoPoints = points(vertexCount, planetModel, constraints); + if (geoPoints.size() < 3) { continue; } List orderedGeoPoints = orderPoints(geoPoints); try { GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, orderedGeoPoints); - //polygon should comply with all constraints except disjoint as we have holes + // polygon should comply with all constraints except disjoint as we have holes Constraints polygonConstraints = new Constraints(); polygonConstraints.putAll(constraints.getContains()); polygonConstraints.putAll(constraints.getWithin()); polygonConstraints.putAll(constraints.getDisjoint()); - if (!polygonConstraints.valid(polygon) || isConcave(planetModel, polygon)){ + if (!polygonConstraints.valid(polygon) || isConcave(planetModel, polygon)) { continue; } - //hole must overlap with polygon and comply with any CONTAINS constraint. + // hole must overlap with polygon and comply with any CONTAINS constraint. Constraints holeConstraints = new Constraints(); holeConstraints.putAll(constraints.getContains()); - holeConstraints.put(polygon,GeoArea.OVERLAPS); - //Points must be with in the polygon and must comply + holeConstraints.put(polygon, GeoArea.OVERLAPS); + // Points must be with in the polygon and must comply // CONTAINS and DISJOINT constraints Constraints pointsConstraints = new Constraints(); - pointsConstraints.put(polygon,GeoArea.WITHIN); + pointsConstraints.put(polygon, GeoArea.WITHIN); pointsConstraints.putAll(constraints.getContains()); pointsConstraints.putAll(constraints.getDisjoint()); - List holes = concavePolygonHoles(planetModel, polygon, holeConstraints, pointsConstraints); - //we should have at least one hole - if (holes.size() == 0){ + List holes = + concavePolygonHoles(planetModel, polygon, holeConstraints, pointsConstraints); + // we should have at least one hole + if (holes.size() == 0) { continue; } - polygon = GeoPolygonFactory.makeGeoPolygon(planetModel,orderedGeoPoints,holes); - if (!constraints.valid(polygon) || isConcave(planetModel, polygon)){ + polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, orderedGeoPoints, holes); + if (!constraints.valid(polygon) || isConcave(planetModel, polygon)) { continue; } return polygon; @@ -602,8 +624,8 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random list if concave GeoPolygons under given constraints. Method - * use to generate convex holes. Note that constraints for points and holes are different, + * Method that returns a random list if concave GeoPolygons under given constraints. Method use to + * generate convex holes. Note that constraints for points and holes are different, * * @param planetModel The planet model. * @param polygon The polygon where the holes are within. @@ -611,27 +633,28 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * @param pointConstraints The given constraints that a point must comply. * @return The random generated GeoPolygon. */ - private List concavePolygonHoles(PlanetModel planetModel, - GeoPolygon polygon, - Constraints holeConstraints, - Constraints pointConstraints) { - int iterations =0; + private List concavePolygonHoles( + PlanetModel planetModel, + GeoPolygon polygon, + Constraints holeConstraints, + Constraints pointConstraints) { + int iterations = 0; int holesCount = random().nextInt(3) + 1; List holes = new ArrayList<>(); while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; int vertexCount = random().nextInt(3) + 3; List geoPoints = points(vertexCount, planetModel, pointConstraints); - if (geoPoints.size() < 3){ + if (geoPoints.size() < 3) { continue; } geoPoints = orderPoints(geoPoints); - GeoPolygon inversePolygon = GeoPolygonFactory.makeGeoPolygon(planetModel, geoPoints); - //The convex polygon must be within + GeoPolygon inversePolygon = GeoPolygonFactory.makeGeoPolygon(planetModel, geoPoints); + // The convex polygon must be within if (inversePolygon == null || polygon.getRelationship(inversePolygon) != GeoArea.WITHIN) { continue; } - //make it concave + // make it concave Collections.reverse(geoPoints); try { GeoPolygon hole = GeoPolygonFactory.makeGeoPolygon(planetModel, geoPoints); @@ -639,7 +662,7 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { continue; } holes.add(hole); - if (holes.size() == holesCount){ + if (holes.size() == holesCount) { return holes; } pointConstraints.put(hole, GeoArea.DISJOINT); @@ -664,8 +687,8 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; - List geoPoints = points(vertexCount,planetModel, constraints); - if (geoPoints.size() < 3){ + List geoPoints = points(vertexCount, planetModel, constraints); + if (geoPoints.size() < 3) { continue; } List orderedGeoPoints = orderPoints(geoPoints); @@ -684,9 +707,9 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a concave GeoPolygon with holes under given constraints. Returns - * NULL if it cannot build the GeoPolygon under the given constraints. Note that the final GeoPolygon is - * convex as the hole wraps the convex GeoPolygon. + * Method that returns a random generated a concave GeoPolygon with holes under given constraints. + * Returns NULL if it cannot build the GeoPolygon under the given constraints. Note that the final + * GeoPolygon is convex as the hole wraps the convex GeoPolygon. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -697,14 +720,14 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; - //we first build the hole. We consider all constraints except + // we first build the hole. We consider all constraints except // disjoint as we have a hole Constraints holeConstraints = new Constraints(); holeConstraints.putAll(constraints.getContains()); holeConstraints.putAll(constraints.getWithin()); holeConstraints.putAll(constraints.getOverlaps()); GeoPolygon hole = convexPolygon(planetModel, holeConstraints); - if (hole == null){ + if (hole == null) { continue; } // Now we get points for polygon. Must we within the hole @@ -712,21 +735,23 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { Constraints pointConstraints = new Constraints(); pointConstraints.put(hole, GeoArea.WITHIN); pointConstraints.putAll(constraints.getContains()); - List geoPoints = points(vertexCount,planetModel, pointConstraints); - if (geoPoints.size() < 3){ + List geoPoints = points(vertexCount, planetModel, pointConstraints); + if (geoPoints.size() < 3) { continue; } try { List orderedGeoPoints = orderPoints(geoPoints); - GeoPolygon inversePolygon = GeoPolygonFactory.makeGeoPolygon(planetModel, geoPoints); - //The convex polygon must be within the hole + GeoPolygon inversePolygon = GeoPolygonFactory.makeGeoPolygon(planetModel, geoPoints); + // The convex polygon must be within the hole if (inversePolygon == null || hole.getRelationship(inversePolygon) != GeoArea.WITHIN) { continue; } Collections.reverse(orderedGeoPoints); - GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, orderedGeoPoints, Collections.singletonList(hole)); - //final polygon must be convex - if (!constraints.valid(polygon) || isConcave(planetModel,polygon)) { + GeoPolygon polygon = + GeoPolygonFactory.makeGeoPolygon( + planetModel, orderedGeoPoints, Collections.singletonList(hole)); + // final polygon must be convex + if (!constraints.valid(polygon) || isConcave(planetModel, polygon)) { continue; } return polygon; @@ -738,30 +763,30 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated complex GeoPolygon under given constraints. Returns - * NULL if it cannot build the complex GeoPolygon under the given constraints. + * Method that returns a random generated complex GeoPolygon under given constraints. Returns NULL + * if it cannot build the complex GeoPolygon under the given constraints. * * @param planetModel The planet model. * @param constraints The given constraints. * @return The random generated GeoPolygon. */ private GeoPolygon complexPolygon(PlanetModel planetModel, Constraints constraints) { - int polygonsCount =random().nextInt(2) + 1; + int polygonsCount = random().nextInt(2) + 1; int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; List polDescription = new ArrayList<>(); - while(polDescription.size() < polygonsCount){ + while (polDescription.size() < polygonsCount) { int vertexCount = random().nextInt(14) + 3; - List geoPoints = points(vertexCount,planetModel, constraints); - if (geoPoints.size() < 3){ + List geoPoints = points(vertexCount, planetModel, constraints); + if (geoPoints.size() < 3) { break; } orderPoints(geoPoints); polDescription.add(new GeoPolygonFactory.PolygonDescription(geoPoints)); } try { - GeoPolygon polygon = GeoPolygonFactory.makeLargeGeoPolygon(planetModel,polDescription); + GeoPolygon polygon = GeoPolygonFactory.makeLargeGeoPolygon(planetModel, polDescription); if (!constraints.valid(polygon)) { continue; } @@ -774,9 +799,9 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a concave square GeoPolygon under given constraints. Returns - * NULL if it cannot build the concave GeoPolygon under the given constraints. This shape is an utility - * to build constraints. + * Method that returns a random generated a concave square GeoPolygon under given constraints. + * Returns NULL if it cannot build the concave GeoPolygon under the given constraints. This shape + * is an utility to build constraints. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -786,14 +811,14 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { int iterations = 0; while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; - List points = points(3,planetModel,constraints); - if (points.size() < 3){ + List points = points(3, planetModel, constraints); + if (points.size() < 3) { continue; } points = orderPoints(points); try { - GeoPolygon polygon = GeoPolygonFactory.makeGeoConvexPolygon(planetModel, points); - if(!constraints.valid(polygon) || isConcave(planetModel,polygon)){ + GeoPolygon polygon = GeoPolygonFactory.makeGeoConvexPolygon(planetModel, points); + if (!constraints.valid(polygon) || isConcave(planetModel, polygon)) { continue; } return polygon; @@ -805,9 +830,9 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random generated a convex square GeoPolygon under given constraints. Returns - * NULL if it cannot build the convex GeoPolygon under the given constraints. This shape is an utility - * to build constraints. + * Method that returns a random generated a convex square GeoPolygon under given constraints. + * Returns NULL if it cannot build the convex GeoPolygon under the given constraints. This shape + * is an utility to build constraints. * * @param planetModel The planet model. * @param constraints The given constraints. @@ -818,14 +843,14 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { while (iterations < MAX_SHAPE_ITERATIONS) { iterations++; List points = points(3, planetModel, constraints); - if (points.size() < 3){ + if (points.size() < 3) { continue; } points = orderPoints(points); Collections.reverse(points); try { - GeoPolygon polygon = GeoPolygonFactory.makeGeoConcavePolygon(planetModel, points); - if(!constraints.valid(polygon) || isConvex(planetModel, polygon)){ + GeoPolygon polygon = GeoPolygonFactory.makeGeoConcavePolygon(planetModel, points); + if (!constraints.valid(polygon) || isConvex(planetModel, polygon)) { continue; } return polygon; @@ -837,49 +862,49 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Method that returns a random list of generated GeoPoints under given constraints. The - * number of points returned might be lower than the requested. + * Method that returns a random list of generated GeoPoints under given constraints. The number of + * points returned might be lower than the requested. * * @param count The number of points * @param planetModel The planet model. * @param constraints The given constraints. * @return The random generated List of GeoPoints. */ - private List points(int count, PlanetModel planetModel, Constraints constraints){ + private List points(int count, PlanetModel planetModel, Constraints constraints) { List geoPoints = new ArrayList<>(count); - for(int i= 0; i< count; i++) { + for (int i = 0; i < count; i++) { GeoPoint point = randomGeoPoint(planetModel, constraints); - if (point != null){ + if (point != null) { geoPoints.add(point); } } - return geoPoints; + return geoPoints; } /** - * Check if a GeoPolygon is pure concave. Note that our definition for concavity is that the polygon - * contains antipodal points. + * Check if a GeoPolygon is pure concave. Note that our definition for concavity is that the + * polygon contains antipodal points. * * @param planetModel The planet model. * @param shape The polygon to check. * @return True if the polygon contains antipodal points. */ - private boolean isConcave(PlanetModel planetModel, GeoPolygon shape){ - return (shape.isWithin(planetModel.NORTH_POLE) && shape.isWithin(planetModel.SOUTH_POLE))|| - (shape.isWithin(planetModel.MAX_X_POLE) && shape.isWithin(planetModel.MIN_X_POLE)) || - (shape.isWithin(planetModel.MAX_Y_POLE) && shape.isWithin(planetModel.MIN_Y_POLE)); + private boolean isConcave(PlanetModel planetModel, GeoPolygon shape) { + return (shape.isWithin(planetModel.NORTH_POLE) && shape.isWithin(planetModel.SOUTH_POLE)) + || (shape.isWithin(planetModel.MAX_X_POLE) && shape.isWithin(planetModel.MIN_X_POLE)) + || (shape.isWithin(planetModel.MAX_Y_POLE) && shape.isWithin(planetModel.MIN_Y_POLE)); } /** - * Check if a GeoPolygon is pure convex. Note that our definition for convexity is that the polygon - * does not contain antipodal points. + * Check if a GeoPolygon is pure convex. Note that our definition for convexity is that the + * polygon does not contain antipodal points. * * @param planetModel The planet model. * @param shape The polygon to check. * @return True if the polygon dies not contains antipodal points. */ - private boolean isConvex(PlanetModel planetModel, GeoPolygon shape){ - return !isConcave(planetModel,shape); + private boolean isConvex(PlanetModel planetModel, GeoPolygon shape) { + return !isConcave(planetModel, shape); } /** @@ -901,23 +926,30 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { double x = 0; double y = 0; double z = 0; - //get center of mass + // get center of mass for (GeoPoint point : points) { x += point.x; y += point.y; z += point.z; } Map pointWithAngle = new HashMap<>(); - //get angle respect center of mass + // get angle respect center of mass for (GeoPoint point : points) { GeoPoint center = new GeoPoint(x / points.size(), y / points.size(), z / points.size()); - double cs = Math.sin(center.getLatitude()) * Math.sin(point.getLatitude()) - + Math.cos(center.getLatitude()) * Math.cos(point.getLatitude()) * Math.cos(point.getLongitude() - center.getLongitude()); - double posAng = Math.atan2(Math.cos(center.getLatitude()) * Math.cos(point.getLatitude()) * Math.sin(point.getLongitude() - center.getLongitude()), - Math.sin(point.getLatitude()) - Math.sin(center.getLatitude())*cs); + double cs = + Math.sin(center.getLatitude()) * Math.sin(point.getLatitude()) + + Math.cos(center.getLatitude()) + * Math.cos(point.getLatitude()) + * Math.cos(point.getLongitude() - center.getLongitude()); + double posAng = + Math.atan2( + Math.cos(center.getLatitude()) + * Math.cos(point.getLatitude()) + * Math.sin(point.getLongitude() - center.getLongitude()), + Math.sin(point.getLatitude()) - Math.sin(center.getLatitude()) * cs); pointWithAngle.put(posAng, point); } - //order points + // order points List angles = new ArrayList<>(pointWithAngle.keySet()); Collections.sort(angles); Collections.reverse(angles); @@ -929,11 +961,10 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { } /** - * Class that holds the constraints that are given to - * build shapes. It consists in a list of GeoAreaShapes - * and relationships the new shape needs to satisfy. + * Class that holds the constraints that are given to build shapes. It consists in a list of + * GeoAreaShapes and relationships the new shape needs to satisfy. */ - class Constraints extends HashMap{ + class Constraints extends HashMap { /** * Check if the shape is valid under the constraints. @@ -942,7 +973,7 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * @return true if the shape satisfy the constraints, else false. */ public boolean valid(GeoShape shape) { - if (shape == null){ + if (shape == null) { return false; } for (GeoAreaShape constraint : keySet()) { @@ -977,7 +1008,7 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * @return true if the point satisfy the constraint, else false. */ private boolean validPoint(GeoPoint point, GeoShape shape, int relationship) { - //For GeoCompositeMembershipShape we only consider the first shape to help + // For GeoCompositeMembershipShape we only consider the first shape to help // converging if (relationship == GeoArea.WITHIN && shape instanceof GeoCompositeMembershipShape) { shape = (((GeoCompositeMembershipShape) shape).shapes.get(0)); @@ -1001,7 +1032,7 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * * @return the CONTAINS constraints. */ - public Constraints getContains(){ + public Constraints getContains() { return getConstraintsOfType(GeoArea.CONTAINS); } @@ -1010,7 +1041,7 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * * @return the WITHIN constraints. */ - public Constraints getWithin(){ + public Constraints getWithin() { return getConstraintsOfType(GeoArea.WITHIN); } @@ -1019,7 +1050,7 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * * @return the OVERLAPS constraints. */ - public Constraints getOverlaps(){ + public Constraints getOverlaps() { return getConstraintsOfType(GeoArea.OVERLAPS); } @@ -1028,11 +1059,11 @@ public class RandomGeo3dShapeGenerator extends LuceneTestCase { * * @return the DISJOINT constraints. */ - public Constraints getDisjoint(){ + public Constraints getDisjoint() { return getConstraintsOfType(GeoArea.DISJOINT); } - private Constraints getConstraintsOfType(int type){ + private Constraints getConstraintsOfType(int type) { Constraints constraints = new Constraints(); for (GeoAreaShape constraint : keySet()) { if (type == get(constraint)) { diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestCompositeGeoPolygonRelationships.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestCompositeGeoPolygonRelationships.java index db857e5e535..37d71dee283 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestCompositeGeoPolygonRelationships.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestCompositeGeoPolygonRelationships.java @@ -17,61 +17,70 @@ package org.apache.lucene.spatial3d.geom; +import static org.junit.Assert.assertEquals; + import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.junit.Test; -import static org.junit.Assert.assertEquals; - /** - * Check relationship between polygon and GeoShapes of composite polygons. Normally we construct - * the composite polygon (when possible) and the complex one. + * Check relationship between polygon and GeoShapes of composite polygons. Normally we construct the + * composite polygon (when possible) and the complex one. */ public class TestCompositeGeoPolygonRelationships { @Test public void testGeoCompositePolygon1() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 21 -61,19.845091 -60.452631)) - GeoPolygon originalConvexPol = buildGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713, - 21, -61); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 21 -61,19.845091 -60.452631)) + GeoPolygon originalConvexPol = + buildGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713, + 21, + -61); - //POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 -61.655652, 19.845091 -60.452631)) - GeoPolygon originalConcavePol = buildGeoPolygon(19.84509, -60.452631, - 21, -61, - 22.820804, -60.257713, - 23.207901, -61.453298, - 20.119948, -61.655652); + // POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 + // -61.655652, 19.845091 -60.452631)) + GeoPolygon originalConcavePol = + buildGeoPolygon( + 19.84509, + -60.452631, + 21, + -61, + 22.820804, + -60.257713, + 23.207901, + -61.453298, + 20.119948, + -61.655652); - GeoPolygon polConvex = buildGeoPolygon(20.0, -60.4, - 20.1, -60.4, - 20.1, -60.3, - 20.0, -60.3, - 20.0, -60.3); + GeoPolygon polConvex = + buildGeoPolygon(20.0, -60.4, 20.1, -60.4, 20.1, -60.3, 20.0, -60.3, 20.0, -60.3); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.0, -60.4, - 20.1, -60.4, - 20.1, -60.3, - 20.0, -60.3); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.0, -60.4, 20.1, -60.4, 20.1, -60.3, 20.0, -60.3); - //convex + // convex int rel = originalConvexPol.getRelationship(polConvex); assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalConvexPol); assertEquals(GeoArea.DISJOINT, rel); - rel = originalConvexPol.getRelationship(polConcave); assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(originalConvexPol); assertEquals(GeoArea.WITHIN, rel); - //concave + // concave rel = originalConcavePol.getRelationship(polConvex); assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalConcavePol); @@ -86,33 +95,44 @@ public class TestCompositeGeoPolygonRelationships { @Test public void testGeoCompositePolygon2() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 21 -61,19.845091 -60.452631)) - GeoPolygon originalConvexPol = buildGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713, - 21, -61); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 21 -61,19.845091 -60.452631)) + GeoPolygon originalConvexPol = + buildGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713, + 21, + -61); - //POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 -61.655652, 19.845091 -60.452631)) - GeoPolygon originalConcavePol = buildGeoPolygon(19.84509, -60.452631, - 21, -61, - 22.820804, -60.257713, - 23.207901, -61.453298, - 20.119948, -61.655652); + // POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 + // -61.655652, 19.845091 -60.452631)) + GeoPolygon originalConcavePol = + buildGeoPolygon( + 19.84509, + -60.452631, + 21, + -61, + 22.820804, + -60.257713, + 23.207901, + -61.453298, + 20.119948, + -61.655652); - //POLYGON ((20.9 -60.8, 21.1 -60.8, 21.1 -60.6, 20.9 -60.6,20.9 -60.8)) - GeoPolygon polConvex = buildGeoPolygon(20.9, -60.8, - 21.1, -60.8, - 21.1, -60.6, - 20.9, -60.6, - 20.9, -60.6); + // POLYGON ((20.9 -60.8, 21.1 -60.8, 21.1 -60.6, 20.9 -60.6,20.9 -60.8)) + GeoPolygon polConvex = + buildGeoPolygon(20.9, -60.8, 21.1, -60.8, 21.1, -60.6, 20.9, -60.6, 20.9, -60.6); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.9, -60.8, - 21.1, -60.8, - 21.1, -60.6, - 20.9, -60.6); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.9, -60.8, 21.1, -60.8, 21.1, -60.6, 20.9, -60.6); - //convex + // convex int rel = originalConvexPol.getRelationship(polConvex); assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalConvexPol); @@ -123,7 +143,7 @@ public class TestCompositeGeoPolygonRelationships { rel = polConcave.getRelationship(originalConvexPol); assertEquals(GeoArea.WITHIN, rel); - //concave + // concave rel = originalConcavePol.getRelationship(polConvex); assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalConcavePol); @@ -138,33 +158,44 @@ public class TestCompositeGeoPolygonRelationships { @Test public void testGeoCompositePolygon3() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 21 -61,19.845091 -60.452631)) - GeoPolygon originalConvexPol = buildGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713, - 21, -61); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 21 -61,19.845091 -60.452631)) + GeoPolygon originalConvexPol = + buildGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713, + 21, + -61); - //POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 -61.655652, 19.845091 -60.452631)) - GeoPolygon originalConcavePol = buildGeoPolygon(19.84509, -60.452631, - 21, -61, - 22.820804, -60.257713, - 23.207901, -61.453298, - 20.119948, -61.655652); + // POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 + // -61.655652, 19.845091 -60.452631)) + GeoPolygon originalConcavePol = + buildGeoPolygon( + 19.84509, + -60.452631, + 21, + -61, + 22.820804, + -60.257713, + 23.207901, + -61.453298, + 20.119948, + -61.655652); - //POLYGON ((20.9 -61.1, 21.1 -61.1, 21.1 -60.9, 20.9 -60.9,20.9 -61.1)) - GeoPolygon polConvex = buildGeoPolygon(20.9, -61.1, - 21.1, -61.1, - 21.1, -60.9, - 20.9, -60.9, - 20.9, -60.9); + // POLYGON ((20.9 -61.1, 21.1 -61.1, 21.1 -60.9, 20.9 -60.9,20.9 -61.1)) + GeoPolygon polConvex = + buildGeoPolygon(20.9, -61.1, 21.1, -61.1, 21.1, -60.9, 20.9, -60.9, 20.9, -60.9); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.9, -61.1, - 21.1, -61.1, - 21.1, -60.9, - 20.9, -60.9); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.9, -61.1, 21.1, -61.1, 21.1, -60.9, 20.9, -60.9); - //convex + // convex int rel = originalConvexPol.getRelationship(polConvex); assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConvexPol); @@ -175,7 +206,7 @@ public class TestCompositeGeoPolygonRelationships { rel = polConcave.getRelationship(originalConvexPol); assertEquals(GeoArea.OVERLAPS, rel); - //concave + // concave rel = originalConcavePol.getRelationship(polConvex); assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConcavePol); @@ -190,33 +221,44 @@ public class TestCompositeGeoPolygonRelationships { @Test public void testGeoCompositePolygon4() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 21 -61,19.845091 -60.452631)) - GeoPolygon originalConvexPol = buildGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713, - 21, -61); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 21 -61,19.845091 -60.452631)) + GeoPolygon originalConvexPol = + buildGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713, + 21, + -61); - //POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 -61.655652, 19.845091 -60.452631)) - GeoPolygon originalConcavePol = buildGeoPolygon(19.84509, -60.452631, - 21, -61, - 22.820804, -60.257713, - 23.207901, -61.453298, - 20.119948, -61.655652); + // POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 + // -61.655652, 19.845091 -60.452631)) + GeoPolygon originalConcavePol = + buildGeoPolygon( + 19.84509, + -60.452631, + 21, + -61, + 22.820804, + -60.257713, + 23.207901, + -61.453298, + 20.119948, + -61.655652); - //POLYGON ((20.9 -61.4, 21.1 -61.4, 21.1 -61.2, 20.9 -61.2,20.9 -61.4)) - GeoPolygon polConvex = buildGeoPolygon(20.9, -61.4, - 21.1, -61.4, - 21.1, -61.2, - 20.9, -61.2, - 20.9, -61.2); + // POLYGON ((20.9 -61.4, 21.1 -61.4, 21.1 -61.2, 20.9 -61.2,20.9 -61.4)) + GeoPolygon polConvex = + buildGeoPolygon(20.9, -61.4, 21.1, -61.4, 21.1, -61.2, 20.9, -61.2, 20.9, -61.2); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.9, -61.4, - 21.1, -61.4, - 21.1, -61.2, - 20.9, -61.2); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.9, -61.4, 21.1, -61.4, 21.1, -61.2, 20.9, -61.2); - //convex + // convex int rel = originalConvexPol.getRelationship(polConvex); assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalConvexPol); @@ -227,7 +269,7 @@ public class TestCompositeGeoPolygonRelationships { rel = polConcave.getRelationship(originalConvexPol); assertEquals(GeoArea.OVERLAPS, rel); - //concave + // concave rel = originalConcavePol.getRelationship(polConvex); assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalConcavePol); @@ -237,39 +279,47 @@ public class TestCompositeGeoPolygonRelationships { assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(originalConcavePol); assertEquals(GeoArea.WITHIN, rel); - } @Test public void testGeoCompositePolygon5() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 21 -61,19.845091 -60.452631)) - GeoPolygon originaConvexlPol = buildGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713, - 21, -61); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 21 -61,19.845091 -60.452631)) + GeoPolygon originaConvexlPol = + buildGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713, + 21, + -61); - //POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 -61.655652, 19.845091 -60.452631)) - GeoPolygon originalConcavePol = buildGeoPolygon(19.84509, -60.452631, - 21, -61, - 22.820804, -60.257713, - 23.207901, -61.453298, - 20.119948, -61.655652); + // POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 + // -61.655652, 19.845091 -60.452631)) + GeoPolygon originalConcavePol = + buildGeoPolygon( + 19.84509, + -60.452631, + 21, + -61, + 22.820804, + -60.257713, + 23.207901, + -61.453298, + 20.119948, + -61.655652); - //POLYGON ((19 -62, 23 -62, 23 -60, 19 -60,19 -62)) - GeoPolygon polConvex = buildGeoPolygon(19, -62, - 23, -62, - 23, -60, - 19, -60, - 19, -60); + // POLYGON ((19 -62, 23 -62, 23 -60, 19 -60,19 -62)) + GeoPolygon polConvex = buildGeoPolygon(19, -62, 23, -62, 23, -60, 19, -60, 19, -60); - GeoPolygon polConcave = buildConcaveGeoPolygon(19, -62, - 23, -62, - 23, -60, - 19, -60); + GeoPolygon polConcave = buildConcaveGeoPolygon(19, -62, 23, -62, 23, -60, 19, -60); - //convex + // convex int rel = originaConvexlPol.getRelationship(polConvex); assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originaConvexlPol); @@ -280,7 +330,7 @@ public class TestCompositeGeoPolygonRelationships { rel = polConcave.getRelationship(originaConvexlPol); assertEquals(GeoArea.OVERLAPS, rel); - //concave + // concave rel = originalConcavePol.getRelationship(polConvex); assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConcavePol); @@ -295,33 +345,42 @@ public class TestCompositeGeoPolygonRelationships { @Test public void testGeoCompositePolygon6() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 21 -61,19.845091 -60.452631)) - GeoPolygon originalConvexPol = buildGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713, - 21, -61); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 21 -61,19.845091 -60.452631)) + GeoPolygon originalConvexPol = + buildGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713, + 21, + -61); - //POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 -61.655652, 19.845091 -60.452631)) - GeoPolygon originalConcavePol = buildGeoPolygon(19.84509, -60.452631, - 21, -61, - 22.820804, -60.257713, - 23.207901, -61.453298, - 20.119948, -61.655652); + // POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 + // -61.655652, 19.845091 -60.452631)) + GeoPolygon originalConcavePol = + buildGeoPolygon( + 19.84509, + -60.452631, + 21, + -61, + 22.820804, + -60.257713, + 23.207901, + -61.453298, + 20.119948, + -61.655652); - //POLYGON ((19 -62, 24 -62, 24 -60, 19 -60,19 -62)) - GeoPolygon polConvex = buildGeoPolygon(19, -62, - 24, -62, - 24, -60, - 19, -60, - 19, -60); + // POLYGON ((19 -62, 24 -62, 24 -60, 19 -60,19 -62)) + GeoPolygon polConvex = buildGeoPolygon(19, -62, 24, -62, 24, -60, 19, -60, 19, -60); - GeoPolygon polConcave = buildConcaveGeoPolygon(19, -62, - 24, -62, - 24, -60, - 19, -60); + GeoPolygon polConcave = buildConcaveGeoPolygon(19, -62, 24, -62, 24, -60, 19, -60); - //convex + // convex int rel = originalConvexPol.getRelationship(polConvex); assertEquals(GeoArea.CONTAINS, rel); rel = polConvex.getRelationship(originalConvexPol); @@ -332,7 +391,7 @@ public class TestCompositeGeoPolygonRelationships { rel = polConcave.getRelationship(originalConvexPol); assertEquals(GeoArea.DISJOINT, rel); - //concave + // concave rel = originalConcavePol.getRelationship(polConvex); assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConcavePol); @@ -344,38 +403,47 @@ public class TestCompositeGeoPolygonRelationships { assertEquals(GeoArea.CONTAINS, rel); } - - @Test public void testGeoCompositePolygon7() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 21 -61,19.845091 -60.452631)) - GeoPolygon originalConvexPol = buildGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713, - 21, -61); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 21 -61,19.845091 -60.452631)) + GeoPolygon originalConvexPol = + buildGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713, + 21, + -61); - //POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 -61.655652, 19.845091 -60.452631)) - GeoPolygon originalConcavePol = buildGeoPolygon(19.84509, -60.452631, - 21, -61, - 22.820804, -60.257713, - 23.207901, -61.453298, - 20.119948, -61.655652); + // POLYGON ((19.845091 -60.452631, 21 -61,22.820804 -60.257713,23.207901 -61.453298, 20.119948 + // -61.655652, 19.845091 -60.452631)) + GeoPolygon originalConcavePol = + buildGeoPolygon( + 19.84509, + -60.452631, + 21, + -61, + 22.820804, + -60.257713, + 23.207901, + -61.453298, + 20.119948, + -61.655652); - //POLYGON ((20.2 -61.4, 20.5 -61.4, 20.5 -60.8, 20.2 -60.8,20.2 -61.4)) - GeoPolygon polConvex = buildGeoPolygon(20.2, -61.4, - 20.5, -61.4, - 20.5, -60.8, - 20.2, -60.8, - 20.2, -60.8); + // POLYGON ((20.2 -61.4, 20.5 -61.4, 20.5 -60.8, 20.2 -60.8,20.2 -61.4)) + GeoPolygon polConvex = + buildGeoPolygon(20.2, -61.4, 20.5, -61.4, 20.5, -60.8, 20.2, -60.8, 20.2, -60.8); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.2, -61.4, - 20.5, -61.4, - 20.5, -60.8, - 20.2, -60.8); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.2, -61.4, 20.5, -61.4, 20.5, -60.8, 20.2, -60.8); - //convex + // convex int rel = originalConvexPol.getRelationship(polConvex); assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalConvexPol); @@ -386,7 +454,7 @@ public class TestCompositeGeoPolygonRelationships { rel = polConcave.getRelationship(originalConvexPol); assertEquals(GeoArea.OVERLAPS, rel); - //concave + // concave rel = originalConcavePol.getRelationship(polConvex); assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalConvexPol); @@ -401,39 +469,39 @@ public class TestCompositeGeoPolygonRelationships { @Test public void testGeoCompositePolygon8() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713,21 -61, 19.845091 -60.452631)) - GeoPolygon originalPol = buildGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713, - 21, -61); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713,21 -61, 19.845091 -60.452631)) + GeoPolygon originalPol = + buildGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713, + 21, + -61); - - GeoShape shape = getInsideCompositeShape(); + GeoShape shape = getInsideCompositeShape(); int rel = originalPol.getRelationship(shape); assertEquals(GeoArea.WITHIN, rel); - } - @Test public void testGeoPolygonPole1() { - //POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) - GeoPolygon compositePol= getCompositePolygon(); - GeoPolygon complexPol= getComplexPolygon(); + // POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) + GeoPolygon compositePol = getCompositePolygon(); + GeoPolygon complexPol = getComplexPolygon(); - //POLYGON ((20.9 -61.4, 21.1 -61.4, 21.1 -61.2, 20.9 -61.2,20.9 -61.4)) - GeoPolygon polConvex = buildGeoPolygon(20.9, -61.4, - 21.1, -61.4, - 21.1, -61.2, - 20.9, -61.2, - 20.9, -61.2); + // POLYGON ((20.9 -61.4, 21.1 -61.4, 21.1 -61.2, 20.9 -61.2,20.9 -61.4)) + GeoPolygon polConvex = + buildGeoPolygon(20.9, -61.4, 21.1, -61.4, 21.1, -61.2, 20.9, -61.2, 20.9, -61.2); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.9, -61.4, - 21.1, -61.4, - 21.1, -61.2, - 20.9, -61.2); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.9, -61.4, 21.1, -61.4, 21.1, -61.2, 20.9, -61.2); int rel = compositePol.getRelationship(polConvex); assertEquals(GeoArea.DISJOINT, rel); @@ -458,21 +526,14 @@ public class TestCompositeGeoPolygonRelationships { @Test public void testGeoPolygonPole2() { - //POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) - GeoPolygon compositePol= getCompositePolygon(); - GeoPolygon complexPol= getComplexPolygon(); + // POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) + GeoPolygon compositePol = getCompositePolygon(); + GeoPolygon complexPol = getComplexPolygon(); - //POLYGON((-1 81, -1 79,1 79,1 81, -1 81)) - GeoPolygon polConvex = buildGeoPolygon(-1,81, - -1,79, - 1,79, - 1,81, - 1,81); + // POLYGON((-1 81, -1 79,1 79,1 81, -1 81)) + GeoPolygon polConvex = buildGeoPolygon(-1, 81, -1, 79, 1, 79, 1, 81, 1, 81); - GeoPolygon polConcave = buildConcaveGeoPolygon(-1,81, - -1,79, - 1,79, - 1,81); + GeoPolygon polConcave = buildConcaveGeoPolygon(-1, 81, -1, 79, 1, 79, 1, 81); int rel = compositePol.getRelationship(polConvex); assertEquals(GeoArea.OVERLAPS, rel); @@ -497,21 +558,14 @@ public class TestCompositeGeoPolygonRelationships { @Test public void testGeoPolygonPole3() { - //POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) - GeoPolygon compositePol= getCompositePolygon(); - GeoPolygon complexPol= getComplexPolygon(); + // POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) + GeoPolygon compositePol = getCompositePolygon(); + GeoPolygon complexPol = getComplexPolygon(); - //POLYGON((-1 86, -1 84,1 84,1 86, -1 86)) - GeoPolygon polConvex = buildGeoPolygon(-1,86, - -1,84, - 1,84, - 1,86, - 1,86); + // POLYGON((-1 86, -1 84,1 84,1 86, -1 86)) + GeoPolygon polConvex = buildGeoPolygon(-1, 86, -1, 84, 1, 84, 1, 86, 1, 86); - GeoPolygon polConcave = buildConcaveGeoPolygon(-1,86, - -1,84, - 1,84, - 1,86); + GeoPolygon polConcave = buildConcaveGeoPolygon(-1, 86, -1, 84, 1, 84, 1, 86); int rel = compositePol.getRelationship(polConvex); assertEquals(GeoArea.WITHIN, rel); @@ -536,180 +590,180 @@ public class TestCompositeGeoPolygonRelationships { @Test public void testMultiPolygon1() { - //MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), - //((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) - GeoPolygon multiPol= getMultiPolygon(); + // MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 + // -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), + // ((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, + // -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) + GeoPolygon multiPol = getMultiPolygon(); - //POLYGON((-145.8555 -5.13, -145.8540 -5.13, -145.8540 -5.12, -145.8555 -5.12, -145.8555 -5.13)) - GeoPolygon polConvex = buildGeoPolygon(-145.8555, -5.13, - -145.8540, -5.13, - -145.8540, -5.12, - -145.8555, -5.12, - -145.8555, -5.12); + // POLYGON((-145.8555 -5.13, -145.8540 -5.13, -145.8540 -5.12, -145.8555 -5.12, -145.8555 + // -5.13)) + GeoPolygon polConvex = + buildGeoPolygon( + -145.8555, -5.13, -145.8540, -5.13, -145.8540, -5.12, -145.8555, -5.12, -145.8555, + -5.12); - GeoPolygon polConcave = buildConcaveGeoPolygon(-145.8555, -5.13, - -145.8540, -5.13, - -145.8540, -5.12, - -145.8555, -5.12); + GeoPolygon polConcave = + buildConcaveGeoPolygon( + -145.8555, -5.13, -145.8540, -5.13, -145.8540, -5.12, -145.8555, -5.12); int rel = multiPol.getRelationship(polConvex); assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(multiPol); assertEquals(GeoArea.DISJOINT, rel); - assertEquals(false,multiPol.intersects(polConvex)); - assertEquals(false,polConvex.intersects(multiPol)); + assertEquals(false, multiPol.intersects(polConvex)); + assertEquals(false, polConvex.intersects(multiPol)); rel = multiPol.getRelationship(polConcave); assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(multiPol); assertEquals(GeoArea.WITHIN, rel); - assertEquals(false,multiPol.intersects(polConcave)); - assertEquals(false,polConcave.intersects(multiPol)); + assertEquals(false, multiPol.intersects(polConcave)); + assertEquals(false, polConcave.intersects(multiPol)); } @Test public void testMultiPolygon2() { - //MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), - //((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) - GeoPolygon multiPol= getMultiPolygon(); + // MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 + // -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), + // ((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, + // -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) + GeoPolygon multiPol = getMultiPolygon(); - //POLYGON((-145.8555 -5.13, -145.85 -5.13, -145.85 -5.12, -145.8555 -5.12, -145.8555 -5.13)) - GeoPolygon polConvex = buildGeoPolygon(-145.8555, -5.13, - -145.85, -5.13, - -145.85, -5.12, - -145.8555, -5.12, - -145.8555, -5.12); + // POLYGON((-145.8555 -5.13, -145.85 -5.13, -145.85 -5.12, -145.8555 -5.12, -145.8555 -5.13)) + GeoPolygon polConvex = + buildGeoPolygon( + -145.8555, -5.13, -145.85, -5.13, -145.85, -5.12, -145.8555, -5.12, -145.8555, -5.12); - GeoPolygon polConcave = buildConcaveGeoPolygon(-145.8555, -5.13, - -145.85, -5.13, - -145.85, -5.12, - -145.8555, -5.12); + GeoPolygon polConcave = + buildConcaveGeoPolygon(-145.8555, -5.13, -145.85, -5.13, -145.85, -5.12, -145.8555, -5.12); int rel = multiPol.getRelationship(polConvex); assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(multiPol); assertEquals(GeoArea.OVERLAPS, rel); - assertEquals(true,multiPol.intersects(polConvex)); - assertEquals(true,polConvex.intersects(multiPol)); + assertEquals(true, multiPol.intersects(polConvex)); + assertEquals(true, polConvex.intersects(multiPol)); rel = multiPol.getRelationship(polConcave); assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(multiPol); assertEquals(GeoArea.OVERLAPS, rel); - assertEquals(true,multiPol.intersects(polConcave)); - assertEquals(true,polConcave.intersects(multiPol)); + assertEquals(true, multiPol.intersects(polConcave)); + assertEquals(true, polConcave.intersects(multiPol)); } @Test public void testMultiPolygon3() { - //MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), - //((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) - GeoPolygon multiPol= getMultiPolygon(); + // MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 + // -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), + // ((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, + // -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) + GeoPolygon multiPol = getMultiPolygon(); - //POLYGON((-146 -5.18, -145.854 -5.18, -145.854 -5.11, -146 -5.11, -146 -5.18)) - //Case overlapping one of the polygons so intersection is false! - GeoPolygon polConvex = buildGeoPolygon(-146, -5.18, - -145.854, -5.18, - -145.854, -5.11, - -146, -5.11, - -146, -5.11); + // POLYGON((-146 -5.18, -145.854 -5.18, -145.854 -5.11, -146 -5.11, -146 -5.18)) + // Case overlapping one of the polygons so intersection is false! + GeoPolygon polConvex = + buildGeoPolygon(-146, -5.18, -145.854, -5.18, -145.854, -5.11, -146, -5.11, -146, -5.11); - GeoPolygon polConcave = buildConcaveGeoPolygon(-146, -5.18, - -145.854, -5.18, - -145.854, -5.11, - -146, -5.11); + GeoPolygon polConcave = + buildConcaveGeoPolygon(-146, -5.18, -145.854, -5.18, -145.854, -5.11, -146, -5.11); int rel = multiPol.getRelationship(polConvex); assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(multiPol); assertEquals(GeoArea.OVERLAPS, rel); - assertEquals(false,multiPol.intersects(polConvex)); - assertEquals(false,polConvex.intersects(multiPol)); + assertEquals(false, multiPol.intersects(polConvex)); + assertEquals(false, polConvex.intersects(multiPol)); rel = multiPol.getRelationship(polConcave); assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(multiPol); assertEquals(GeoArea.OVERLAPS, rel); - assertEquals(false,multiPol.intersects(polConcave)); - assertEquals(false,polConcave.intersects(multiPol)); - + assertEquals(false, multiPol.intersects(polConcave)); + assertEquals(false, polConcave.intersects(multiPol)); } @Test public void testMultiPolygon4() { - //MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), - //((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) - GeoPolygon multiPol= getMultiPolygon(); + // MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 + // -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), + // ((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, + // -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) + GeoPolygon multiPol = getMultiPolygon(); - //POLYGON((-145.88 -5.13, -145.87 -5.13, -145.87 -5.12, -145.88 -5.12, -145.88 -5.13)) - GeoPolygon polConvex = buildGeoPolygon(-145.88, -5.13, - -145.87, -5.13, - -145.87, -5.12, - -145.88, -5.12, - -145.88, -5.12); + // POLYGON((-145.88 -5.13, -145.87 -5.13, -145.87 -5.12, -145.88 -5.12, -145.88 -5.13)) + GeoPolygon polConvex = + buildGeoPolygon( + -145.88, -5.13, -145.87, -5.13, -145.87, -5.12, -145.88, -5.12, -145.88, -5.12); - GeoPolygon polConcave = buildConcaveGeoPolygon(-145.88, -5.13, - -145.87, -5.13, - -145.87, -5.12, - -145.88, -5.12); + GeoPolygon polConcave = + buildConcaveGeoPolygon(-145.88, -5.13, -145.87, -5.13, -145.87, -5.12, -145.88, -5.12); int rel = multiPol.getRelationship(polConvex); assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(multiPol); assertEquals(GeoArea.CONTAINS, rel); - assertEquals(false,multiPol.intersects(polConvex)); - assertEquals(false,polConvex.intersects(multiPol)); + assertEquals(false, multiPol.intersects(polConvex)); + assertEquals(false, polConvex.intersects(multiPol)); rel = multiPol.getRelationship(polConcave); assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(multiPol); assertEquals(GeoArea.OVERLAPS, rel); - assertEquals(false,multiPol.intersects(polConcave)); - assertEquals(false,polConcave.intersects(multiPol)); + assertEquals(false, multiPol.intersects(polConcave)); + assertEquals(false, polConcave.intersects(multiPol)); } @Test public void testMultiPolygon5() { - //MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), - //((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) - GeoPolygon multiPol= getMultiPolygon(); + // MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 + // -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), + // ((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, + // -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) + GeoPolygon multiPol = getMultiPolygon(); - //POLYGON((-146 -5.18, -145 -5.18, -145 -5.11, -146 -5.11, -146 -5.18)) - GeoPolygon polConvex = buildGeoPolygon(-146, -5.18, - -145, -5.18, - -145, -5.11, - -146, -5.11, - -146, -5.11); + // POLYGON((-146 -5.18, -145 -5.18, -145 -5.11, -146 -5.11, -146 -5.18)) + GeoPolygon polConvex = + buildGeoPolygon(-146, -5.18, -145, -5.18, -145, -5.11, -146, -5.11, -146, -5.11); - GeoPolygon polConcave = buildConcaveGeoPolygon(-146, -5.18, - -145, -5.18, - -145, -5.11, - -146, -5.11); + GeoPolygon polConcave = + buildConcaveGeoPolygon(-146, -5.18, -145, -5.18, -145, -5.11, -146, -5.11); int rel = multiPol.getRelationship(polConvex); assertEquals(GeoArea.CONTAINS, rel); rel = polConvex.getRelationship(multiPol); assertEquals(GeoArea.WITHIN, rel); - assertEquals(false,multiPol.intersects(polConvex)); + assertEquals(false, multiPol.intersects(polConvex)); rel = multiPol.getRelationship(polConcave); assertEquals(GeoArea.DISJOINT, rel); rel = polConcave.getRelationship(multiPol); assertEquals(GeoArea.DISJOINT, rel); - assertEquals(false,multiPol.intersects(polConcave)); + assertEquals(false, multiPol.intersects(polConcave)); } - private GeoPolygon buildGeoPolygon(double lon1,double lat1, - double lon2,double lat2, - double lon3,double lat3, - double lon4,double lat4, - double lon5,double lat5) - { - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); - GeoPoint point5 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat5), Geo3DUtil.fromDegrees(lon5)); + private GeoPolygon buildGeoPolygon( + double lon1, + double lat1, + double lon2, + double lat2, + double lon3, + double lat3, + double lon4, + double lat4, + double lon5, + double lat5) { + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); + GeoPoint point5 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat5), Geo3DUtil.fromDegrees(lon5)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); @@ -719,33 +773,49 @@ public class TestCompositeGeoPolygonRelationships { return GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); } - private GeoPolygon buildConcaveGeoPolygon(double lon1,double lat1, - double lon2,double lat2, - double lon3,double lat3, - double lon4,double lat4) - { - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); + private GeoPolygon buildConcaveGeoPolygon( + double lon1, + double lat1, + double lon2, + double lat2, + double lon3, + double lat3, + double lon4, + double lat4) { + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); points.add(point3); points.add(point4); - return GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE,points); + return GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE, points); } - private GeoPolygon getCompositePolygon(){ - //POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(0)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(45)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(90)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(135)); - GeoPoint point5 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(180)); - GeoPoint point6 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(-135)); - GeoPoint point7 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(-90)); - GeoPoint point8 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(-45)); + private GeoPolygon getCompositePolygon() { + // POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(0)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(45)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(90)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(135)); + GeoPoint point5 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(180)); + GeoPoint point6 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(-135)); + GeoPoint point7 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(-90)); + GeoPoint point8 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(-45)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); @@ -758,16 +828,24 @@ public class TestCompositeGeoPolygonRelationships { return GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); } - private GeoPolygon getComplexPolygon(){ - //POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(0)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(45)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(90)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(135)); - GeoPoint point5 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(180)); - GeoPoint point6 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(-135)); - GeoPoint point7 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(-90)); - GeoPoint point8 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(-45)); + private GeoPolygon getComplexPolygon() { + // POLYGON((0 80, 45 85 ,90 80,135 85,180 80, -135 85, -90 80, -45 85,0 80)) + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(0)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(45)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(90)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(135)); + GeoPoint point5 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(180)); + GeoPoint point6 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(-135)); + GeoPoint point7 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(80), Geo3DUtil.fromDegrees(-90)); + GeoPoint point8 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(85), Geo3DUtil.fromDegrees(-45)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); @@ -781,23 +859,57 @@ public class TestCompositeGeoPolygonRelationships { return GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(pd)); } - private GeoPolygon getMultiPolygon(){ - //MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), - //((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.17543698881), Geo3DUtil.fromDegrees(-145.790967486)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.11348060995), Geo3DUtil.fromDegrees(-145.790854979)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.11339421216), Geo3DUtil.fromDegrees(-145.853073512)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.17535061936), Geo3DUtil.fromDegrees(-145.853192037)); + private GeoPolygon getMultiPolygon() { + // MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 + // -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), + // ((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, + // -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) + GeoPoint point1 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.17543698881), + Geo3DUtil.fromDegrees(-145.790967486)); + GeoPoint point2 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.11348060995), + Geo3DUtil.fromDegrees(-145.790854979)); + GeoPoint point3 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.11339421216), + Geo3DUtil.fromDegrees(-145.853073512)); + GeoPoint point4 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.17535061936), + Geo3DUtil.fromDegrees(-145.853192037)); final List points1 = new ArrayList<>(); points1.add(point1); points1.add(point2); points1.add(point3); points1.add(point4); GeoPolygonFactory.PolygonDescription pd1 = new GeoPolygonFactory.PolygonDescription(points1); - GeoPoint point5 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.17527125408), Geo3DUtil.fromDegrees(-145.8563923)); - GeoPoint point6 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.11332154814), Geo3DUtil.fromDegrees(-145.856222168)); - GeoPoint point7 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.11317773171), Geo3DUtil.fromDegrees(-145.918433943)); - GeoPoint point8 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.17512738429), Geo3DUtil.fromDegrees(-145.918610092)); + GeoPoint point5 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.17527125408), + Geo3DUtil.fromDegrees(-145.8563923)); + GeoPoint point6 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.11332154814), + Geo3DUtil.fromDegrees(-145.856222168)); + GeoPoint point7 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.11317773171), + Geo3DUtil.fromDegrees(-145.918433943)); + GeoPoint point8 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.17512738429), + Geo3DUtil.fromDegrees(-145.918610092)); final List points2 = new ArrayList<>(); points2.add(point5); points2.add(point6); @@ -810,22 +922,43 @@ public class TestCompositeGeoPolygonRelationships { return GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, pds); } - public GeoShape getInsideCompositeShape(){ - //MULTIPOLYGON(((19.945091 -60.552631, 20.319948 -61.555652, 20.9 -61.5, 20.9 -61, 19.945091 -60.552631)), + public GeoShape getInsideCompositeShape() { + // MULTIPOLYGON(((19.945091 -60.552631, 20.319948 -61.555652, 20.9 -61.5, 20.9 -61, 19.945091 + // -60.552631)), // ((21.1 -61.5, 23.107901 -61.253298, 22.720804 -60.457713,21.1 -61, 21.1 -61.5))) - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-60.552631), Geo3DUtil.fromDegrees(19.945091)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61.555652), Geo3DUtil.fromDegrees(20.319948)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61.5), Geo3DUtil.fromDegrees(20.9)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61), Geo3DUtil.fromDegrees(20.9)); + GeoPoint point1 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-60.552631), + Geo3DUtil.fromDegrees(19.945091)); + GeoPoint point2 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-61.555652), + Geo3DUtil.fromDegrees(20.319948)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61.5), Geo3DUtil.fromDegrees(20.9)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61), Geo3DUtil.fromDegrees(20.9)); final List points1 = new ArrayList<>(); points1.add(point1); points1.add(point2); points1.add(point3); points1.add(point4); - GeoPoint point5 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61.5), Geo3DUtil.fromDegrees(21.1)); - GeoPoint point6 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61.253298), Geo3DUtil.fromDegrees(23.107901)); - GeoPoint point7 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-60.457713), Geo3DUtil.fromDegrees(22.720804)); - GeoPoint point8 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61), Geo3DUtil.fromDegrees(21.1)); + GeoPoint point5 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61.5), Geo3DUtil.fromDegrees(21.1)); + GeoPoint point6 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-61.253298), + Geo3DUtil.fromDegrees(23.107901)); + GeoPoint point7 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-60.457713), + Geo3DUtil.fromDegrees(22.720804)); + GeoPoint point8 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61), Geo3DUtil.fromDegrees(21.1)); final List points2 = new ArrayList<>(); points2.add(point5); points2.add(point6); @@ -833,10 +966,10 @@ public class TestCompositeGeoPolygonRelationships { points2.add(point8); GeoPolygon p1 = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points1); GeoPolygon p2 = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points2); - GeoCompositeMembershipShape compositeMembershipShape = new GeoCompositeMembershipShape(PlanetModel.SPHERE); + GeoCompositeMembershipShape compositeMembershipShape = + new GeoCompositeMembershipShape(PlanetModel.SPHERE); compositeMembershipShape.addShape(p1); compositeMembershipShape.addShape(p2); return compositeMembershipShape; } - } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoBBox.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoBBox.java index 67eb8d2c41d..749c0923a3e 100755 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoBBox.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoBBox.java @@ -16,15 +16,14 @@ */ package org.apache.lucene.spatial3d.geom; -import java.util.ArrayList; -import java.util.List; - -import org.junit.Test; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.ArrayList; +import java.util.List; +import org.junit.Test; + public class TestGeoBBox { protected static final double DEGREES_TO_RADIANS = Math.PI / 180.0; @@ -35,15 +34,29 @@ public class TestGeoBBox { GeoConvexPolygon cp; int relationship; List points = new ArrayList(); - points.add(new GeoPoint(PlanetModel.SPHERE, -49 * DEGREES_TO_RADIANS, -176 * DEGREES_TO_RADIANS)); - points.add(new GeoPoint(PlanetModel.SPHERE, -11 * DEGREES_TO_RADIANS, 101 * DEGREES_TO_RADIANS)); + points.add( + new GeoPoint(PlanetModel.SPHERE, -49 * DEGREES_TO_RADIANS, -176 * DEGREES_TO_RADIANS)); + points.add( + new GeoPoint(PlanetModel.SPHERE, -11 * DEGREES_TO_RADIANS, 101 * DEGREES_TO_RADIANS)); points.add(new GeoPoint(PlanetModel.SPHERE, 24 * DEGREES_TO_RADIANS, -30 * DEGREES_TO_RADIANS)); GeoMembershipShape shape = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); - box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, -64 * DEGREES_TO_RADIANS, -64 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS, 180 * DEGREES_TO_RADIANS); + box = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, + -64 * DEGREES_TO_RADIANS, + -64 * DEGREES_TO_RADIANS, + -180 * DEGREES_TO_RADIANS, + 180 * DEGREES_TO_RADIANS); relationship = box.getRelationship(shape); assertEquals(GeoArea.CONTAINS, relationship); - box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, -61.85 * DEGREES_TO_RADIANS, -67.5 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS, -168.75 * DEGREES_TO_RADIANS); - //System.out.println("Shape = " + shape + " Rect = " + box); + box = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, + -61.85 * DEGREES_TO_RADIANS, + -67.5 * DEGREES_TO_RADIANS, + -180 * DEGREES_TO_RADIANS, + -168.75 * DEGREES_TO_RADIANS); + // System.out.println("Shape = " + shape + " Rect = " + box); relationship = box.getRelationship(shape); assertEquals(GeoArea.CONTAINS, relationship); } @@ -65,12 +78,14 @@ public class TestGeoBBox { assertFalse(box.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -1.1); assertFalse(box.isWithin(gp)); - assertEquals(0.1,box.computeOutsideDistance(DistanceStyle.ARC,gp),1e-2); - assertEquals(0.1,box.computeOutsideDistance(DistanceStyle.NORMAL,gp),1e-2); - assertEquals(0.1,box.computeOutsideDistance(DistanceStyle.NORMAL,gp),1e-2); + assertEquals(0.1, box.computeOutsideDistance(DistanceStyle.ARC, gp), 1e-2); + assertEquals(0.1, box.computeOutsideDistance(DistanceStyle.NORMAL, gp), 1e-2); + assertEquals(0.1, box.computeOutsideDistance(DistanceStyle.NORMAL, gp), 1e-2); // Standard normal Rect box, crossing dateline - box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, Math.PI - 1.0, -Math.PI + 1.0); + box = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, Math.PI - 1.0, -Math.PI + 1.0); gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI); assertTrue(box.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, 0.1, -Math.PI); @@ -96,7 +111,9 @@ public class TestGeoBBox { assertTrue(box.isWithin(gp)); // World - box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, Math.PI); + box = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, Math.PI); gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI); assertTrue(box.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, 0.1, -Math.PI); @@ -107,7 +124,6 @@ public class TestGeoBBox { assertTrue(box.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, -0.1, (-Math.PI - 1.1) + Math.PI * 2.0); assertTrue(box.isWithin(gp)); - } @Test @@ -143,23 +159,57 @@ public class TestGeoBBox { GeoArea solid; GeoPoint point; int relationship; - - c= GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.7570958596622309, -0.7458670829264561, -0.9566079379002148, 1.4802570961901191); - solid = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE,0.10922258701604912,0.1248184603754517,-0.8172414690802067,0.9959041483215542,-0.6136586624726926,0.6821740363641521); + + c = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, + 0.7570958596622309, + -0.7458670829264561, + -0.9566079379002148, + 1.4802570961901191); + solid = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + 0.10922258701604912, + 0.1248184603754517, + -0.8172414690802067, + 0.9959041483215542, + -0.6136586624726926, + 0.6821740363641521); point = new GeoPoint(PlanetModel.SPHERE, 0.3719987557178081, 1.4529582778845198); assertTrue(c.isWithin(point)); assertTrue(solid.isWithin(point)); relationship = solid.getRelationship(c); - assertTrue(relationship == GeoArea.OVERLAPS || relationship == GeoArea.CONTAINS || relationship == GeoArea.WITHIN); + assertTrue( + relationship == GeoArea.OVERLAPS + || relationship == GeoArea.CONTAINS + || relationship == GeoArea.WITHIN); - c= GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.006607096847842122, -0.002828135860810422, -0.0012934461873348349, 0.006727418645092394); - solid = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE,0.9999995988328008,1.0000000002328306,-0.0012934708508166816,0.006727393021214471,-0.002828157275369464,0.006607074060760007); + c = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, + 0.006607096847842122, + -0.002828135860810422, + -0.0012934461873348349, + 0.006727418645092394); + solid = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + 0.9999995988328008, + 1.0000000002328306, + -0.0012934708508166816, + 0.006727393021214471, + -0.002828157275369464, + 0.006607074060760007); point = new GeoPoint(PlanetModel.SPHERE, -5.236470872437899E-4, 3.992578692654256E-4); assertTrue(c.isWithin(point)); assertTrue(solid.isWithin(point)); relationship = solid.getRelationship(c); - assertTrue(relationship == GeoArea.OVERLAPS || relationship == GeoArea.CONTAINS || relationship == GeoArea.WITHIN); - + assertTrue( + relationship == GeoArea.OVERLAPS + || relationship == GeoArea.CONTAINS + || relationship == GeoArea.WITHIN); + c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.25, -Math.PI * 0.25, -1.0, 1.0); b = new LatLonBounds(); c.getBounds(b); @@ -178,14 +228,16 @@ public class TestGeoBBox { assertEquals(0.841471, xyzb.getMaximumY(), 0.000001); assertEquals(-0.707107, xyzb.getMinimumZ(), 0.000001); assertEquals(0.707107, xyzb.getMaximumZ(), 0.000001); - - GeoArea area = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, - xyzb.getMinimumX() - 2.0 * Vector.MINIMUM_RESOLUTION, - xyzb.getMaximumX() + 2.0 * Vector.MINIMUM_RESOLUTION, - xyzb.getMinimumY() - 2.0 * Vector.MINIMUM_RESOLUTION, - xyzb.getMaximumY() + 2.0 * Vector.MINIMUM_RESOLUTION, - xyzb.getMinimumZ() - 2.0 * Vector.MINIMUM_RESOLUTION, - xyzb.getMaximumZ() + 2.0 * Vector.MINIMUM_RESOLUTION); + + GeoArea area = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + xyzb.getMinimumX() - 2.0 * Vector.MINIMUM_RESOLUTION, + xyzb.getMaximumX() + 2.0 * Vector.MINIMUM_RESOLUTION, + xyzb.getMinimumY() - 2.0 * Vector.MINIMUM_RESOLUTION, + xyzb.getMaximumY() + 2.0 * Vector.MINIMUM_RESOLUTION, + xyzb.getMinimumZ() - 2.0 * Vector.MINIMUM_RESOLUTION, + xyzb.getMaximumZ() + 2.0 * Vector.MINIMUM_RESOLUTION); assertEquals(GeoArea.WITHIN, area.getRelationship(c)); c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, -1.0, 1.0); @@ -216,8 +268,8 @@ public class TestGeoBBox { assertTrue(b.checkNoLongitudeBound()); assertFalse(b.checkNoTopLatitudeBound()); assertFalse(b.checkNoBottomLatitudeBound()); - //assertEquals(1.0,b.getLeftLongitude(),0.000001); - //assertEquals(-1.0,b.getRightLongitude(),0.000001); + // assertEquals(1.0,b.getLeftLongitude(),0.000001); + // assertEquals(-1.0,b.getRightLongitude(),0.000001); assertEquals(-Math.PI * 0.25, b.getMinLatitude(), 0.000001); assertEquals(0.0, b.getMaxLatitude(), 0.000001); assertEquals(-1.0, xyzb.getMinimumX(), 0.000001); @@ -227,7 +279,6 @@ public class TestGeoBBox { assertEquals(-0.707107, xyzb.getMinimumZ(), 0.000001); assertEquals(0.0, xyzb.getMaximumZ(), 0.000001); - c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -1.0, 1.0); b = new LatLonBounds(); @@ -237,8 +288,8 @@ public class TestGeoBBox { assertTrue(b.checkNoLongitudeBound()); assertTrue(b.checkNoTopLatitudeBound()); assertTrue(b.checkNoBottomLatitudeBound()); - //assertEquals(-1.0, b.getLeftLongitude(), 0.000001); - //assertEquals(1.0, b.getRightLongitude(), 0.000001); + // assertEquals(-1.0, b.getLeftLongitude(), 0.000001); + // assertEquals(1.0, b.getRightLongitude(), 0.000001); assertEquals(0.0, xyzb.getMinimumX(), 0.000001); assertEquals(1.0, xyzb.getMaximumX(), 0.000001); assertEquals(-0.841471, xyzb.getMinimumY(), 0.000001); @@ -255,8 +306,8 @@ public class TestGeoBBox { assertTrue(b.checkNoLongitudeBound()); assertTrue(b.checkNoTopLatitudeBound()); assertTrue(b.checkNoBottomLatitudeBound()); - //assertEquals(1.0,b.getLeftLongitude(),0.000001); - //assertEquals(-1.0,b.getRightLongitude(),0.000001); + // assertEquals(1.0,b.getLeftLongitude(),0.000001); + // assertEquals(-1.0,b.getRightLongitude(),0.000001); assertEquals(-1.0, xyzb.getMinimumX(), 0.000001); assertEquals(0.540303, xyzb.getMaximumX(), 0.000001); assertEquals(-1.0, xyzb.getMinimumY(), 0.000001); @@ -266,19 +317,23 @@ public class TestGeoBBox { // Check wide variants of rectangle and longitude slice - c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, -Math.PI + 0.1, Math.PI - 0.1); + c = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, -Math.PI + 0.1, Math.PI - 0.1); b = new LatLonBounds(); c.getBounds(b); assertTrue(b.checkNoLongitudeBound()); assertFalse(b.checkNoTopLatitudeBound()); assertFalse(b.checkNoBottomLatitudeBound()); - //assertEquals(-Math.PI+0.1,b.getLeftLongitude(),0.000001); - //assertEquals(Math.PI-0.1,b.getRightLongitude(),0.000001); + // assertEquals(-Math.PI+0.1,b.getLeftLongitude(),0.000001); + // assertEquals(Math.PI-0.1,b.getRightLongitude(),0.000001); assertEquals(-Math.PI * 0.25, b.getMinLatitude(), 0.000001); assertEquals(0.0, b.getMaxLatitude(), 0.000001); - c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, Math.PI - 0.1, -Math.PI + 0.1); + c = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, Math.PI - 0.1, -Math.PI + 0.1); b = new LatLonBounds(); c.getBounds(b); @@ -290,25 +345,29 @@ public class TestGeoBBox { assertEquals(-Math.PI * 0.25, b.getMinLatitude(), 0.000001); assertEquals(0.0, b.getMaxLatitude(), 0.000001); - c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI + 0.1, Math.PI - 0.1); + c = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI + 0.1, Math.PI - 0.1); b = new LatLonBounds(); c.getBounds(b); assertTrue(b.checkNoLongitudeBound()); assertTrue(b.checkNoTopLatitudeBound()); assertTrue(b.checkNoBottomLatitudeBound()); - //assertEquals(-Math.PI+0.1,b.getLeftLongitude(),0.000001); - //assertEquals(Math.PI-0.1,b.getRightLongitude(),0.000001); + // assertEquals(-Math.PI+0.1,b.getLeftLongitude(),0.000001); + // assertEquals(Math.PI-0.1,b.getRightLongitude(),0.000001); - c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, Math.PI - 0.1, -Math.PI + 0.1); + c = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, Math.PI - 0.1, -Math.PI + 0.1); b = new LatLonBounds(); c.getBounds(b); assertTrue(b.checkNoLongitudeBound()); assertTrue(b.checkNoTopLatitudeBound()); assertTrue(b.checkNoBottomLatitudeBound()); - //assertEquals(Math.PI - 0.1, b.getLeftLongitude(), 0.000001); - //assertEquals(-Math.PI + 0.1, b.getRightLongitude(), 0.000001); + // assertEquals(Math.PI - 0.1, b.getLeftLongitude(), 0.000001); + // assertEquals(-Math.PI + 0.1, b.getRightLongitude(), 0.000001); // Check latitude zone c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 1.0, -1.0, -Math.PI, Math.PI); @@ -325,8 +384,11 @@ public class TestGeoBBox { GeoBBox c1; GeoBBox c2; - c1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, 0.0); - c2 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI); + c1 = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, 0.0); + c2 = + GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI); b = new LatLonBounds(); c1.getBounds(b); @@ -335,8 +397,12 @@ public class TestGeoBBox { assertTrue(b.checkNoTopLatitudeBound()); assertTrue(b.checkNoBottomLatitudeBound()); - c1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, 0.0); - c2 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI * 0.5); + c1 = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, 0.0); + c2 = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI * 0.5); b = new LatLonBounds(); c1.getBounds(b); @@ -344,11 +410,14 @@ public class TestGeoBBox { assertTrue(b.checkNoLongitudeBound()); assertTrue(b.checkNoTopLatitudeBound()); assertTrue(b.checkNoBottomLatitudeBound()); - //assertEquals(-Math.PI,b.getLeftLongitude(),0.000001); - //assertEquals(Math.PI*0.5,b.getRightLongitude(),0.000001); + // assertEquals(-Math.PI,b.getLeftLongitude(),0.000001); + // assertEquals(Math.PI*0.5,b.getRightLongitude(),0.000001); - c1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI * 0.5, 0.0); - c2 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI); + c1 = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI * 0.5, 0.0); + c2 = + GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI); b = new LatLonBounds(); c1.getBounds(b); @@ -356,35 +425,63 @@ public class TestGeoBBox { assertTrue(b.checkNoLongitudeBound()); assertTrue(b.checkNoTopLatitudeBound()); assertTrue(b.checkNoBottomLatitudeBound()); - //assertEquals(-Math.PI * 0.5,b.getLeftLongitude(),0.000001); - //assertEquals(Math.PI,b.getRightLongitude(),0.000001); + // assertEquals(-Math.PI * 0.5,b.getLeftLongitude(),0.000001); + // assertEquals(Math.PI,b.getRightLongitude(),0.000001); } - + @Test public void testFailureCase1() { - final GeoPoint point = new GeoPoint(-0.017413370801260174, -2.132522881412925E-18, 0.9976113450663769); - final GeoBBox box = new GeoNorthRectangle(PlanetModel.WGS84, 0.35451471030934045, 9.908337057950734E-15, 2.891004593509811E-11); + final GeoPoint point = + new GeoPoint(-0.017413370801260174, -2.132522881412925E-18, 0.9976113450663769); + final GeoBBox box = + new GeoNorthRectangle( + PlanetModel.WGS84, 0.35451471030934045, 9.908337057950734E-15, 2.891004593509811E-11); final XYZBounds bounds = new XYZBounds(); box.getBounds(bounds); - final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds.getMinimumX(), bounds.getMaximumX(), bounds.getMinimumY(), bounds.getMaximumY(), bounds.getMinimumZ(), bounds.getMaximumZ()); - - assertTrue(box.isWithin(point)?solid.isWithin(point):true); + final XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + bounds.getMinimumX(), + bounds.getMaximumX(), + bounds.getMinimumY(), + bounds.getMaximumY(), + bounds.getMinimumZ(), + bounds.getMaximumZ()); + + assertTrue(box.isWithin(point) ? solid.isWithin(point) : true); } - + @Test public void testFailureCase2() { - //final GeoPoint point = new GeoPoint(-0.7375647084975573, -2.3309121299774915E-10, 0.6746626163258577); - final GeoPoint point = new GeoPoint(-0.737564708579924, -9.032562595264542E-17, 0.6746626165197899); - final GeoBBox box = new GeoRectangle(PlanetModel.WGS84, 0.7988584710911523, 0.25383311815493353, -1.2236144735575564E-12, 7.356011300929654E-49); + // final GeoPoint point = new GeoPoint(-0.7375647084975573, -2.3309121299774915E-10, + // 0.6746626163258577); + final GeoPoint point = + new GeoPoint(-0.737564708579924, -9.032562595264542E-17, 0.6746626165197899); + final GeoBBox box = + new GeoRectangle( + PlanetModel.WGS84, + 0.7988584710911523, + 0.25383311815493353, + -1.2236144735575564E-12, + 7.356011300929654E-49); final XYZBounds bounds = new XYZBounds(); box.getBounds(bounds); - final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds.getMinimumX(), bounds.getMaximumX(), bounds.getMinimumY(), bounds.getMaximumY(), bounds.getMinimumZ(), bounds.getMaximumZ()); + final XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + bounds.getMinimumX(), + bounds.getMaximumX(), + bounds.getMinimumY(), + bounds.getMaximumY(), + bounds.getMinimumZ(), + bounds.getMaximumZ()); - //System.out.println("Is within Y value? "+(point.y >= bounds.getMinimumY() && point.y <= bounds.getMaximumY())); - //System.out.println("Shape = "+box+" is within? "+box.isWithin(point)); - //System.out.println("XYZBounds = "+bounds+" is within? "+solid.isWithin(point)+" solid="+solid); + // System.out.println("Is within Y value? "+(point.y >= bounds.getMinimumY() && point.y <= + // bounds.getMaximumY())); + // System.out.println("Shape = "+box+" is within? "+box.isWithin(point)); + // System.out.println("XYZBounds = "+bounds+" is within? "+solid.isWithin(point)+" + // solid="+solid); assertTrue(box.isWithin(point) == solid.isWithin(point)); } - } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoCircle.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoCircle.java index 01d1031dbf5..cf5e31f82c4 100755 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoCircle.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoCircle.java @@ -20,24 +20,24 @@ import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; public class TestGeoCircle extends LuceneTestCase { - + @Test public void testCircleDistance() { GeoCircle c; GeoPoint gp; c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, 0.0, -0.5, 0.1); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0); - assertEquals(Double.POSITIVE_INFINITY, c.computeDistance(DistanceStyle.ARC,gp), 0.0); - assertEquals(Double.POSITIVE_INFINITY, c.computeDistance(DistanceStyle.NORMAL,gp), 0.0); - assertEquals(Double.POSITIVE_INFINITY, c.computeDistance(DistanceStyle.NORMAL,gp), 0.0); + assertEquals(Double.POSITIVE_INFINITY, c.computeDistance(DistanceStyle.ARC, gp), 0.0); + assertEquals(Double.POSITIVE_INFINITY, c.computeDistance(DistanceStyle.NORMAL, gp), 0.0); + assertEquals(Double.POSITIVE_INFINITY, c.computeDistance(DistanceStyle.NORMAL, gp), 0.0); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5); - assertEquals(0.0, c.computeDistance(DistanceStyle.ARC,gp), 0.000001); - assertEquals(0.0, c.computeDistance(DistanceStyle.NORMAL,gp), 0.000001); - assertEquals(0.0, c.computeDistance(DistanceStyle.NORMAL,gp), 0.000001); + assertEquals(0.0, c.computeDistance(DistanceStyle.ARC, gp), 0.000001); + assertEquals(0.0, c.computeDistance(DistanceStyle.NORMAL, gp), 0.000001); + assertEquals(0.0, c.computeDistance(DistanceStyle.NORMAL, gp), 0.000001); gp = new GeoPoint(PlanetModel.SPHERE, 0.05, -0.5); - assertEquals(0.05, c.computeDistance(DistanceStyle.ARC,gp), 0.000001); - assertEquals(0.049995, c.computeDistance(DistanceStyle.LINEAR,gp), 0.000001); - assertEquals(0.049979, c.computeDistance(DistanceStyle.NORMAL,gp), 0.000001); + assertEquals(0.05, c.computeDistance(DistanceStyle.ARC, gp), 0.000001); + assertEquals(0.049995, c.computeDistance(DistanceStyle.LINEAR, gp), 0.000001); + assertEquals(0.049979, c.computeDistance(DistanceStyle.NORMAL, gp), 0.000001); } @Test @@ -71,9 +71,9 @@ public class TestGeoCircle extends LuceneTestCase { c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, 0.0, -0.5, 0.1); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0); assertFalse(c.isWithin(gp)); - assertEquals(0.4,c.computeOutsideDistance(DistanceStyle.ARC,gp),1e-12); - assertEquals(0.12,c.computeOutsideDistance(DistanceStyle.NORMAL,gp),0.01); - assertEquals(0.4,c.computeOutsideDistance(DistanceStyle.LINEAR,gp),0.01); + assertEquals(0.4, c.computeOutsideDistance(DistanceStyle.ARC, gp), 1e-12); + assertEquals(0.12, c.computeOutsideDistance(DistanceStyle.NORMAL, gp), 0.01); + assertEquals(0.4, c.computeOutsideDistance(DistanceStyle.LINEAR, gp), 0.01); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5); assertTrue(c.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.55); @@ -97,8 +97,18 @@ public class TestGeoCircle extends LuceneTestCase { int relationship; // ... - c = GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84, -0.005931145568901605, -0.001942031539653079, 1.2991918568260272E-4); - area = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84, 1.001098377143621, 1.001100011578687, -0.00207467080358696, -0.0018136665346280983, -0.006067808248760161, -0.005807683665759485); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.WGS84, -0.005931145568901605, -0.001942031539653079, 1.2991918568260272E-4); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.WGS84, + 1.001098377143621, + 1.001100011578687, + -0.00207467080358696, + -0.0018136665346280983, + -0.006067808248760161, + -0.005807683665759485); p1 = new GeoPoint(PlanetModel.WGS84, -0.00591253844632244, -0.0020069187259065093); p2 = new GeoPoint(1.001099185736782, -0.0020091272069679327, -0.005919118245803968); assertTrue(c.isWithin(p1)); @@ -107,75 +117,151 @@ public class TestGeoCircle extends LuceneTestCase { assertTrue(relationship != GeoArea.DISJOINT); // Twelfth BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84,-0.00824379317765984,-0.0011677469001838581,0.0011530035396910402); - p1 = new GeoPoint(PlanetModel.WGS84,-0.006505092992723671,0.007654282718327381); - p2 = new GeoPoint(1.0010681673665647,0.007662608264336381,-0.006512324005914593); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.WGS84, -0.00824379317765984, -0.0011677469001838581, 0.0011530035396910402); + p1 = new GeoPoint(PlanetModel.WGS84, -0.006505092992723671, 0.007654282718327381); + p2 = new GeoPoint(1.0010681673665647, 0.007662608264336381, -0.006512324005914593); assertTrue(!c.isWithin(p1)); assertTrue(!c.isWithin(p2)); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.WGS84, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); relationship = area.getRelationship(c); assertTrue(relationship == GeoArea.OVERLAPS || relationship == GeoArea.WITHIN); - + // Eleventh BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE,-0.004431288600558495,-0.003687846671278374,1.704543429364245E-8); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, -0.004431288600558495, -0.003687846671278374, 1.704543429364245E-8); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - //System.err.println(area); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + // System.err.println(area); relationship = area.getRelationship(c); assertTrue(GeoArea.WITHIN == relationship || GeoArea.OVERLAPS == relationship); // Tenth BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84,-0.0018829770647349636,-0.001969499061382591,1.3045439293158305E-5); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.WGS84, + -0.0018829770647349636, + -0.001969499061382591, + 1.3045439293158305E-5); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - //System.err.println(area); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.WGS84, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + // System.err.println(area); relationship = area.getRelationship(c); assertTrue(GeoArea.WITHIN == relationship || GeoArea.OVERLAPS == relationship); // Ninth BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE,-4.211990380885122E-5,-0.0022958453508173044,1.4318475623498535E-5); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, + -4.211990380885122E-5, + -0.0022958453508173044, + 1.4318475623498535E-5); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - //System.err.println(area); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + // System.err.println(area); relationship = area.getRelationship(c); assertTrue(GeoArea.WITHIN == relationship || GeoArea.OVERLAPS == relationship); - + // Eighth BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE,0.005321278689117842,-0.00216937368755372,1.5306034422500785E-4); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, 0.005321278689117842, -0.00216937368755372, 1.5306034422500785E-4); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - //System.err.println(area); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + // System.err.println(area); relationship = area.getRelationship(c); assertTrue(GeoArea.WITHIN == relationship || GeoArea.OVERLAPS == relationship); // Seventh BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE,-0.0021627146783861745, -0.0017298167021592304,2.0818312293195752E-4); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, + -0.0021627146783861745, + -0.0017298167021592304, + 2.0818312293195752E-4); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - //System.err.println(area); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + // System.err.println(area); relationship = area.getRelationship(c); assertTrue(GeoArea.WITHIN == relationship || GeoArea.OVERLAPS == relationship); // Sixth BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84,-0.006450320645814321,0.004660694205115142,0.00489710732634323); - //xyzb = new XYZBounds(); - //zScaling.getBounds(xyzb); - //System.err.println("xmin="+xyzb.getMinimumX()+", xmax="+xyzb.getMaximumX()+",ymin="+xyzb.getMinimumY()+", ymax="+xyzb.getMaximumY()+",zmin="+xyzb.getMinimumZ()+", zmax="+xyzb.getMaximumZ()); - //xmin=1.0010356621420726, xmax=1.0011141249179447,ymin=-2.5326643901354566E-4, ymax=0.009584741915757169,zmin=-0.011359874956269283, zmax=-0.0015549504447452225 - area = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84,1.0010822580620098,1.0010945779732867,0.007079167343247293,0.007541006774427837,-0.0021855011220022575,-0.001896122718181518); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.WGS84, -0.006450320645814321, 0.004660694205115142, 0.00489710732634323); + // xyzb = new XYZBounds(); + // zScaling.getBounds(xyzb); + // System.err.println("xmin="+xyzb.getMinimumX()+", + // xmax="+xyzb.getMaximumX()+",ymin="+xyzb.getMinimumY()+", + // ymax="+xyzb.getMaximumY()+",zmin="+xyzb.getMinimumZ()+", zmax="+xyzb.getMaximumZ()); + // xmin=1.0010356621420726, xmax=1.0011141249179447,ymin=-2.5326643901354566E-4, + // ymax=0.009584741915757169,zmin=-0.011359874956269283, zmax=-0.0015549504447452225 + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.WGS84, + 1.0010822580620098, + 1.0010945779732867, + 0.007079167343247293, + 0.007541006774427837, + -0.0021855011220022575, + -0.001896122718181518); assertTrue(GeoArea.CONTAINS != area.getRelationship(c)); /* p1 = new GeoPoint(1.0010893045436076,0.007380935180644008,-0.002140671370616495); @@ -189,36 +275,73 @@ public class TestGeoCircle extends LuceneTestCase { assertTrue(PlanetModel.WGS84.pointOnSurface(p1)); // This fails assertTrue(!area.isWithin(p1)); // This fails */ - + // Fifth BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, -0.004282454525970269, -1.6739831367422277E-4, 1.959639723134033E-6); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, + -0.004282454525970269, + -1.6739831367422277E-4, + 1.959639723134033E-6); assertTrue(c.isWithin(c.getEdgePoints()[0])); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - assertTrue(GeoArea.WITHIN == area.getRelationship(c) || GeoArea.OVERLAPS == area.getRelationship(c)); - + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + assertTrue( + GeoArea.WITHIN == area.getRelationship(c) || GeoArea.OVERLAPS == area.getRelationship(c)); + // Fourth BKD discovered failure - c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, -0.0048795517261255, 0.004053904306995974, 5.93699764258874E-6); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, -0.0048795517261255, 0.004053904306995974, 5.93699764258874E-6); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - assertTrue(GeoArea.WITHIN == area.getRelationship(c) || GeoArea.OVERLAPS == area.getRelationship(c)); - + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + assertTrue( + GeoArea.WITHIN == area.getRelationship(c) || GeoArea.OVERLAPS == area.getRelationship(c)); + // Another test case from BKD - c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, -0.005955031040627789, -0.0029274772647399153, 1.601488279374338E-5); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, + -0.005955031040627789, + -0.0029274772647399153, + 1.601488279374338E-5); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.SPHERE, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + relationship = area.getRelationship(c); assertTrue(relationship == GeoArea.WITHIN || relationship == GeoArea.OVERLAPS); - + // Test case from BKD - c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, -0.765816119338, 0.991848766844, 0.8153163226330487); + c = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, -0.765816119338, 0.991848766844, 0.8153163226330487); p1 = new GeoPoint(0.7692262265236023, -0.055089298115534646, -0.6365973465711254); assertTrue(c.isWithin(p1)); xyzb = new XYZBounds(); @@ -226,7 +349,7 @@ public class TestGeoCircle extends LuceneTestCase { assertTrue(p1.x >= xyzb.getMinimumX() && p1.x <= xyzb.getMaximumX()); assertTrue(p1.y >= xyzb.getMinimumY() && p1.y <= xyzb.getMaximumY()); assertTrue(p1.z >= xyzb.getMinimumZ() && p1.z <= xyzb.getMaximumZ()); - + // Vertical circle cases c = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, 0.0, -0.5, 0.1); b = new LatLonBounds(); @@ -385,44 +508,67 @@ public class TestGeoCircle extends LuceneTestCase { assertEquals(-0.09, b.getMinLatitude(), 0.000001); assertEquals(-0.6, b.getLeftLongitude(), 0.00001); assertEquals(-0.4, b.getRightLongitude(), 0.00001); - } @Test public void testBoundsFailureCase1() { - // lat=2.7399499693409367E-13, lon=-3.141592653589793([X=-1.0011188539924791, Y=-1.226017000107956E-16, Z=2.743015573303327E-13])], radius=2.1814042682464985 - final GeoCircle gc = GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84, 2.7399499693409367E-13, -3.141592653589793, 2.1814042682464985); + // lat=2.7399499693409367E-13, lon=-3.141592653589793([X=-1.0011188539924791, + // Y=-1.226017000107956E-16, Z=2.743015573303327E-13])], radius=2.1814042682464985 + final GeoCircle gc = + GeoCircleFactory.makeGeoCircle( + PlanetModel.WGS84, 2.7399499693409367E-13, -3.141592653589793, 2.1814042682464985); // With a circle like this, zmin should equal zmax, and xmin should be PlanetModel.minimumX. - final GeoPoint gp = new GeoPoint(0.0054866241253590815, -0.004009749293376541, 0.997739304376186); + final GeoPoint gp = + new GeoPoint(0.0054866241253590815, -0.004009749293376541, 0.997739304376186); final GeoPoint gpOnSurface = PlanetModel.WGS84.createSurfacePoint(gp); final XYZBounds bounds = new XYZBounds(); gc.getBounds(bounds); - //System.out.println("Bounds: "+bounds); - final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds.getMinimumX(), bounds.getMaximumX(), bounds.getMinimumY(), bounds.getMaximumY(), bounds.getMinimumZ(), bounds.getMaximumZ()); + // System.out.println("Bounds: "+bounds); + final XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + bounds.getMinimumX(), + bounds.getMaximumX(), + bounds.getMinimumY(), + bounds.getMaximumY(), + bounds.getMinimumZ(), + bounds.getMaximumZ()); assertTrue(gc.isWithin(gpOnSurface)); assertTrue(gc.isWithin(gp)); assertTrue(solid.isWithin(gpOnSurface)); // This fails assertTrue(solid.isWithin(gp)); } - + @Test public void testBoundsFailureCase2() { - final GeoCircle gc = GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84, -2.7574435614238194E-13, 0.0, 1.5887859182593391); + final GeoCircle gc = + GeoCircleFactory.makeGeoCircle( + PlanetModel.WGS84, -2.7574435614238194E-13, 0.0, 1.5887859182593391); final GeoPoint gp = new GeoPoint(PlanetModel.WGS84, 0.7980359504429014, 1.5964981068121482); final XYZBounds bounds = new XYZBounds(); gc.getBounds(bounds); - System.out.println("Bounds = "+bounds); - System.out.println("Point = "+gp); - final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds.getMinimumX(), bounds.getMaximumX(), bounds.getMinimumY(), bounds.getMaximumY(), bounds.getMinimumZ(), bounds.getMaximumZ()); - - assert gc.isWithin(gp)?solid.isWithin(gp):true; - + System.out.println("Bounds = " + bounds); + System.out.println("Point = " + gp); + final XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + bounds.getMinimumX(), + bounds.getMaximumX(), + bounds.getMinimumY(), + bounds.getMaximumY(), + bounds.getMinimumZ(), + bounds.getMaximumZ()); + + assert gc.isWithin(gp) ? solid.isWithin(gp) : true; } @Test public void testWholeWorld() { - final GeoCircle circle1 = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, 0.0, 0.0, 3.1415926535897913); - final GeoCircle circle2 = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, Math.PI * 0.5, Math.PI, 3.141592653589792); + final GeoCircle circle1 = + GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, 0.0, 0.0, 3.1415926535897913); + final GeoCircle circle2 = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, Math.PI * 0.5, Math.PI, 3.141592653589792); assertTrue(circle1.getRelationship(circle2) == GeoArea.OVERLAPS); } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoConvexPolygon.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoConvexPolygon.java index 53ac8d37308..831cffa5eaa 100755 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoConvexPolygon.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoConvexPolygon.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; -import org.junit.Test; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -public class TestGeoConvexPolygon { +import org.junit.Test; +public class TestGeoConvexPolygon { @Test public void testPolygonPointWithin() { @@ -48,9 +47,9 @@ public class TestGeoConvexPolygon { // Sample some nearby points outside, and compute distance-to-shape for them as well gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.65); assertFalse(c.isWithin(gp)); - assertEquals(0.05,c.computeOutsideDistance(DistanceStyle.ARC,gp),1e-12); - assertEquals(0.05,c.computeOutsideDistance(DistanceStyle.NORMAL,gp),1e-3); - assertEquals(0.05,c.computeOutsideDistance(DistanceStyle.LINEAR,gp),1e-3); + assertEquals(0.05, c.computeOutsideDistance(DistanceStyle.ARC, gp), 1e-12); + assertEquals(0.05, c.computeOutsideDistance(DistanceStyle.NORMAL, gp), 1e-3); + assertEquals(0.05, c.computeOutsideDistance(DistanceStyle.LINEAR, gp), 1e-3); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.35); assertFalse(c.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, -0.15, -0.5); @@ -87,5 +86,4 @@ public class TestGeoConvexPolygon { assertEquals(-0.1, b.getMinLatitude(), 0.000001); assertEquals(0.1, b.getMaxLatitude(), 0.000001); } - } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoExactCircle.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoExactCircle.java index 0476901d78a..d3e354d14d8 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoExactCircle.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoExactCircle.java @@ -20,10 +20,8 @@ package org.apache.lucene.spatial3d.geom; import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.junit.Test; -/** - * Tests for GeoExactCircle. - */ -public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ +/** Tests for GeoExactCircle. */ +public class TestGeoExactCircle extends RandomGeo3dShapeGenerator { @Test public void testExactCircle() { @@ -53,15 +51,16 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ } @Test - public void testSurfacePointOnBearingScale(){ + public void testSurfacePointOnBearingScale() { PlanetModel p1 = PlanetModel.WGS84; - PlanetModel p2 = new PlanetModel(0.5 * PlanetModel.WGS84.xyScaling, 0.5 * PlanetModel.WGS84.zScaling); + PlanetModel p2 = + new PlanetModel(0.5 * PlanetModel.WGS84.xyScaling, 0.5 * PlanetModel.WGS84.zScaling); GeoPoint point1P1 = new GeoPoint(p1, 0, 0); - GeoPoint point2P1 = new GeoPoint(p1, 1, 1); + GeoPoint point2P1 = new GeoPoint(p1, 1, 1); GeoPoint point1P2 = new GeoPoint(p2, point1P1.getLatitude(), point1P1.getLongitude()); GeoPoint point2P2 = new GeoPoint(p2, point2P1.getLatitude(), point2P1.getLongitude()); - double dist = 0.2* Math.PI; + double dist = 0.2 * Math.PI; double bearing = 0.2 * Math.PI; GeoPoint new1 = p1.surfacePointOnBearing(point2P1, dist, bearing); @@ -69,7 +68,7 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ assertEquals(new1.getLatitude(), new2.getLatitude(), 1e-12); assertEquals(new1.getLongitude(), new2.getLongitude(), 1e-12); - //This is true if surfaceDistance return results always in radians + // This is true if surfaceDistance return results always in radians double d1 = p1.surfaceDistance(point1P1, point2P1); double d2 = p2.surfaceDistance(point1P2, point2P2); assertEquals(d1, d2, 1e-12); @@ -77,7 +76,7 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ @Test @Repeat(iterations = 100) - public void RandomPointBearingWGS84Test(){ + public void RandomPointBearingWGS84Test() { PlanetModel planetModel = PlanetModel.WGS84; RandomGeo3dShapeGenerator generator = new RandomGeo3dShapeGenerator(); GeoPoint center = generator.randomGeoPoint(planetModel); @@ -90,9 +89,9 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ @Test @Repeat(iterations = 100) - public void RandomPointBearingCardinalTest(){ - //surface distance calculations methods start not converging when - //planet scaledFlattening > 0.4 + public void RandomPointBearingCardinalTest() { + // surface distance calculations methods start not converging when + // planet scaledFlattening > 0.4 PlanetModel planetModel; do { double ab = random().nextDouble() * 2; @@ -104,17 +103,24 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ } } while (Math.abs(planetModel.scaledFlattening) > 0.4); GeoPoint center = randomGeoPoint(planetModel); - double radius = random().nextDouble() * 0.9 * planetModel.minimumPoleDistance; + double radius = random().nextDouble() * 0.9 * planetModel.minimumPoleDistance; checkBearingPoint(planetModel, center, radius, 0); checkBearingPoint(planetModel, center, radius, 0.5 * Math.PI); checkBearingPoint(planetModel, center, radius, Math.PI); checkBearingPoint(planetModel, center, radius, 1.5 * Math.PI); } - private void checkBearingPoint(PlanetModel planetModel, GeoPoint center, double radius, double bearingAngle) { + private void checkBearingPoint( + PlanetModel planetModel, GeoPoint center, double radius, double bearingAngle) { GeoPoint point = planetModel.surfacePointOnBearing(center, radius, bearingAngle); double surfaceDistance = planetModel.surfaceDistance(center, point); - assertTrue(planetModel.toString() + " " + Double.toString(surfaceDistance - radius) + " " + Double.toString(radius), surfaceDistance - radius < Vector.MINIMUM_ANGULAR_RESOLUTION); + assertTrue( + planetModel.toString() + + " " + + Double.toString(surfaceDistance - radius) + + " " + + Double.toString(radius), + surfaceDistance - radius < Vector.MINIMUM_ANGULAR_RESOLUTION); } @Test @@ -137,17 +143,21 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ } @Test - public void exactCircleLargeTest(){ + public void exactCircleLargeTest() { boolean success = true; try { - GeoCircle circle = GeoCircleFactory.makeExactGeoCircle(new PlanetModel(0.99, 1.05), 0.25 * Math.PI, 0,0.35 * Math.PI, 1e-12); + GeoCircle circle = + GeoCircleFactory.makeExactGeoCircle( + new PlanetModel(0.99, 1.05), 0.25 * Math.PI, 0, 0.35 * Math.PI, 1e-12); } catch (IllegalArgumentException e) { success = false; } assertTrue(success); success = false; try { - GeoCircle circle = GeoCircleFactory.makeExactGeoCircle(PlanetModel.WGS84, 0.25 * Math.PI, 0,0.9996 * Math.PI, 1e-12); + GeoCircle circle = + GeoCircleFactory.makeExactGeoCircle( + PlanetModel.WGS84, 0.25 * Math.PI, 0, 0.9996 * Math.PI, 1e-12); } catch (IllegalArgumentException e) { success = true; } @@ -158,7 +168,13 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ public void testExactCircleDoesNotFit() { boolean exception = false; try { - GeoCircle circle = GeoCircleFactory.makeExactGeoCircle(PlanetModel.WGS84, 1.5633796542562415, -1.0387149580695152,3.1409865861032844, 1e-12); + GeoCircle circle = + GeoCircleFactory.makeExactGeoCircle( + PlanetModel.WGS84, + 1.5633796542562415, + -1.0387149580695152, + 3.1409865861032844, + 1e-12); } catch (IllegalArgumentException e) { exception = true; } @@ -166,18 +182,23 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ } public void testBigCircleInSphere() { - //In Planet model Sphere if circle is close to Math.PI we can get the situation where - //circle slice planes are bigger than half of a hemisphere. We need to make - //sure we divide the circle in at least 4 slices. - GeoCircle circle1 = GeoCircleFactory.makeExactGeoCircle(PlanetModel.SPHERE, 1.1306735252307394, -0.7374283438171261, 3.1415760537549234, 4.816939220262406E-12); + // In Planet model Sphere if circle is close to Math.PI we can get the situation where + // circle slice planes are bigger than half of a hemisphere. We need to make + // sure we divide the circle in at least 4 slices. + GeoCircle circle1 = + GeoCircleFactory.makeExactGeoCircle( + PlanetModel.SPHERE, + 1.1306735252307394, + -0.7374283438171261, + 3.1415760537549234, + 4.816939220262406E-12); GeoPoint point = new GeoPoint(PlanetModel.SPHERE, -1.5707963267948966, 0.0); assertTrue(circle1.isWithin(point)); } /** - * in LUCENE-8054 we have problems with exact circles that have - * edges that are close together. This test creates those circles with the same - * center and slightly different radius. + * in LUCENE-8054 we have problems with exact circles that have edges that are close together. + * This test creates those circles with the same center and slightly different radius. */ @Test @Repeat(iterations = 100) @@ -185,57 +206,108 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ PlanetModel planetModel = randomPlanetModel(); GeoCircle circle1 = (GeoCircle) randomGeoAreaShape(EXACT_CIRCLE, planetModel); // new radius, a bit smaller than the generated one! - double radius = circle1.getRadius() * (1 - 0.01 * random().nextDouble()); - //circle with same center and new radius - GeoCircle circle2 = GeoCircleFactory.makeExactGeoCircle(planetModel, - circle1.getCenter().getLatitude(), - circle1.getCenter().getLongitude(), - radius, 1e-5 ); + double radius = circle1.getRadius() * (1 - 0.01 * random().nextDouble()); + // circle with same center and new radius + GeoCircle circle2 = + GeoCircleFactory.makeExactGeoCircle( + planetModel, + circle1.getCenter().getLatitude(), + circle1.getCenter().getLongitude(), + radius, + 1e-5); StringBuilder b = new StringBuilder(); b.append("circle1: " + circle1 + "\n"); b.append("circle2: " + circle2); - //It cannot be disjoint, same center! + // It cannot be disjoint, same center! assertTrue(b.toString(), circle1.getRelationship(circle2) != GeoArea.DISJOINT); } @Test - public void testLUCENE8054(){ - GeoCircle circle1 = GeoCircleFactory.makeExactGeoCircle(PlanetModel.WGS84, -1.0394053553992673, -1.9037325881389144, 1.1546166170607672, 4.231100485201301E-4); - GeoCircle circle2 = GeoCircleFactory.makeExactGeoCircle(PlanetModel.WGS84, -1.3165961602008989, -1.887137823746273, 1.432516663588956, 3.172052880854355E-4); - // Relationship between circles must be different than DISJOINT as centers are closer than the radius. + public void testLUCENE8054() { + GeoCircle circle1 = + GeoCircleFactory.makeExactGeoCircle( + PlanetModel.WGS84, + -1.0394053553992673, + -1.9037325881389144, + 1.1546166170607672, + 4.231100485201301E-4); + GeoCircle circle2 = + GeoCircleFactory.makeExactGeoCircle( + PlanetModel.WGS84, + -1.3165961602008989, + -1.887137823746273, + 1.432516663588956, + 3.172052880854355E-4); + // Relationship between circles must be different than DISJOINT as centers are closer than the + // radius. int rel = circle1.getRelationship(circle2); assertTrue(rel != GeoArea.DISJOINT); } @Test - public void testLUCENE8056(){ - GeoCircle circle = GeoCircleFactory.makeExactGeoCircle(PlanetModel.WGS84, 0.647941905154693, 0.8542472362428436, 0.8917883700569315, 1.2173787103955335E-8); - GeoBBox bBox = GeoBBoxFactory.makeGeoBBox(PlanetModel.WGS84, 0.5890486225480862, 0.4908738521234052, 1.9634954084936207, 2.159844949342983); - //Center iis out of the shape + public void testLUCENE8056() { + GeoCircle circle = + GeoCircleFactory.makeExactGeoCircle( + PlanetModel.WGS84, + 0.647941905154693, + 0.8542472362428436, + 0.8917883700569315, + 1.2173787103955335E-8); + GeoBBox bBox = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.WGS84, + 0.5890486225480862, + 0.4908738521234052, + 1.9634954084936207, + 2.159844949342983); + // Center iis out of the shape assertFalse(circle.isWithin(bBox.getCenter())); - //Edge point is in the shape + // Edge point is in the shape assertTrue(circle.isWithin(bBox.getEdgePoints()[0])); - //Shape should intersect!!! + // Shape should intersect!!! assertTrue(bBox.getRelationship(circle) == GeoArea.OVERLAPS); } @Test public void testExactCircleLUCENE8054() { // [junit4] > Throwable #1: java.lang.AssertionError: circle1: GeoExactCircle: - // {planetmodel=PlanetModel.WGS84, center=[lat=-1.2097332228999564, lon=0.749061883738567([X=0.25823775418663625, Y=0.2401212674846636, Z=-0.9338185278804293])], + // {planetmodel=PlanetModel.WGS84, center=[lat=-1.2097332228999564, + // lon=0.749061883738567([X=0.25823775418663625, Y=0.2401212674846636, Z=-0.9338185278804293])], // radius=0.20785254459485322(11.909073566339822), accuracy=6.710701666727661E-9} - // [junit4] > circle2: GeoExactCircle: {planetmodel=PlanetModel.WGS84, center=[lat=-1.2097332228999564, lon=0.749061883738567([X=0.25823775418663625, Y=0.2401212674846636, Z=-0.9338185278804293])], + // [junit4] > circle2: GeoExactCircle: {planetmodel=PlanetModel.WGS84, + // center=[lat=-1.2097332228999564, lon=0.749061883738567([X=0.25823775418663625, + // Y=0.2401212674846636, Z=-0.9338185278804293])], // radius=0.20701584142315682(11.861134005896407), accuracy=1.0E-5} - final GeoCircle c1 = new GeoExactCircle(PlanetModel.WGS84, -1.2097332228999564, 0.749061883738567, 0.20785254459485322, 6.710701666727661E-9); - final GeoCircle c2 = new GeoExactCircle(PlanetModel.WGS84, -1.2097332228999564, 0.749061883738567, 0.20701584142315682, 1.0E-5); + final GeoCircle c1 = + new GeoExactCircle( + PlanetModel.WGS84, + -1.2097332228999564, + 0.749061883738567, + 0.20785254459485322, + 6.710701666727661E-9); + final GeoCircle c2 = + new GeoExactCircle( + PlanetModel.WGS84, -1.2097332228999564, 0.749061883738567, 0.20701584142315682, 1.0E-5); assertTrue("cannot be disjoint", c1.getRelationship(c2) != GeoArea.DISJOINT); } @Test - public void testLUCENE8065(){ - //Circle planes are convex - GeoCircle circle1 = GeoCircleFactory.makeExactGeoCircle(PlanetModel.WGS84, 0.03186456479560385, -2.2254294002683617, 1.5702573535090856, 8.184299676008562E-6); - GeoCircle circle2 = GeoCircleFactory.makeExactGeoCircle(PlanetModel.WGS84, 0.03186456479560385, -2.2254294002683617 , 1.5698163157923914, 1.0E-5); + public void testLUCENE8065() { + // Circle planes are convex + GeoCircle circle1 = + GeoCircleFactory.makeExactGeoCircle( + PlanetModel.WGS84, + 0.03186456479560385, + -2.2254294002683617, + 1.5702573535090856, + 8.184299676008562E-6); + GeoCircle circle2 = + GeoCircleFactory.makeExactGeoCircle( + PlanetModel.WGS84, + 0.03186456479560385, + -2.2254294002683617, + 1.5698163157923914, + 1.0E-5); assertTrue(circle1.getRelationship(circle2) != GeoArea.DISJOINT); } @@ -243,11 +315,12 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator{ PlanetModel planetModel = new PlanetModel(1.6304230055804751, 1.0199671157571204); boolean fail = false; try { - GeoCircle circle = GeoCircleFactory.makeExactGeoCircle(planetModel, 0.8853814403571284, 0.9784990176851283, 0.9071033527030907, 1e-11); + GeoCircle circle = + GeoCircleFactory.makeExactGeoCircle( + planetModel, 0.8853814403571284, 0.9784990176851283, 0.9071033527030907, 1e-11); } catch (IllegalArgumentException e) { fail = true; } assertTrue(fail); } - } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoModel.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoModel.java index db92574a70a..72a04ac4ce9 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoModel.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoModel.java @@ -16,28 +16,27 @@ */ package org.apache.lucene.spatial3d.geom; -import org.junit.Test; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -/** - * Test basic plane functionality. - */ +import org.junit.Test; + +/** Test basic plane functionality. */ public class TestGeoModel { - protected final static PlanetModel scaledModel = new PlanetModel(1.2,1.5); - + protected static final PlanetModel scaledModel = new PlanetModel(1.2, 1.5); + @Test public void testBasicCircle() { - // The point of this test is just to make sure nothing blows up doing normal things with a quite non-spherical model + // The point of this test is just to make sure nothing blows up doing normal things with a quite + // non-spherical model // Make sure that the north pole is in the circle, and south pole isn't final GeoPoint northPole = new GeoPoint(scaledModel, Math.PI * 0.5, 0.0); final GeoPoint southPole = new GeoPoint(scaledModel, -Math.PI * 0.5, 0.0); final GeoPoint point1 = new GeoPoint(scaledModel, Math.PI * 0.25, 0.0); final GeoPoint point2 = new GeoPoint(scaledModel, Math.PI * 0.125, 0.0); - + GeoCircle circle = new GeoStandardCircle(scaledModel, Math.PI * 0.5, 0.0, 0.01); assertTrue(circle.isWithin(northPole)); assertFalse(circle.isWithin(southPole)); @@ -78,7 +77,6 @@ public class TestGeoModel { assertEquals(Math.PI * 0.125 + 0.01, bounds.getMaxLatitude(), 0.00001); assertEquals(-0.0089, bounds.getLeftLongitude(), 0.0001); assertEquals(0.0089, bounds.getRightLongitude(), 0.0001); - } @Test @@ -104,7 +102,4 @@ public class TestGeoModel { assertEquals(1.0, bounds.getRightLongitude(), 0.00001); assertEquals(0.0, bounds.getLeftLongitude(), 0.00001); } - } - - diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPath.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPath.java index d945bb69455..262a622fc94 100755 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPath.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPath.java @@ -16,9 +16,8 @@ */ package org.apache.lucene.spatial3d.geom; -import org.junit.Test; - import org.apache.lucene.util.LuceneTestCase; +import org.junit.Test; public class TestGeoPath extends LuceneTestCase { @@ -33,17 +32,17 @@ public class TestGeoPath extends LuceneTestCase { p.addPoint(0.0, 0.2); p.done(); gp = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.5, 0.15); - assertEquals(Double.POSITIVE_INFINITY, p.computeDistance(DistanceStyle.ARC,gp), 0.0); + assertEquals(Double.POSITIVE_INFINITY, p.computeDistance(DistanceStyle.ARC, gp), 0.0); gp = new GeoPoint(PlanetModel.SPHERE, 0.05, 0.15); - assertEquals(0.15 + 0.05, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); + assertEquals(0.15 + 0.05, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.12); - assertEquals(0.12 + 0.0, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); + assertEquals(0.12 + 0.0, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); gp = new GeoPoint(PlanetModel.SPHERE, -0.15, 0.05); - assertEquals(Double.POSITIVE_INFINITY, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); + assertEquals(Double.POSITIVE_INFINITY, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.25); - assertEquals(0.20 + 0.05, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); + assertEquals(0.20 + 0.05, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.05); - assertEquals(0.0 + 0.05, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); + assertEquals(0.0 + 0.05, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); // Compute path distances now p = new GeoStandardPath(PlanetModel.SPHERE, 0.1); @@ -52,27 +51,27 @@ public class TestGeoPath extends LuceneTestCase { p.addPoint(0.0, 0.2); p.done(); gp = new GeoPoint(PlanetModel.SPHERE, 0.05, 0.15); - assertEquals(0.15 + 0.05, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); - assertEquals(0.15, p.computeNearestDistance(DistanceStyle.ARC,gp), 0.000001); - assertEquals(0.10, p.computeDeltaDistance(DistanceStyle.ARC,gp), 0.000001); + assertEquals(0.15 + 0.05, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); + assertEquals(0.15, p.computeNearestDistance(DistanceStyle.ARC, gp), 0.000001); + assertEquals(0.10, p.computeDeltaDistance(DistanceStyle.ARC, gp), 0.000001); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.12); - assertEquals(0.12, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); - assertEquals(0.12, p.computeNearestDistance(DistanceStyle.ARC,gp), 0.000001); - assertEquals(0.0, p.computeDeltaDistance(DistanceStyle.ARC,gp), 0.000001); - + assertEquals(0.12, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); + assertEquals(0.12, p.computeNearestDistance(DistanceStyle.ARC, gp), 0.000001); + assertEquals(0.0, p.computeDeltaDistance(DistanceStyle.ARC, gp), 0.000001); + // Now try a vertical path, and make sure distances are as expected p = new GeoStandardPath(PlanetModel.SPHERE, 0.1); p.addPoint(-Math.PI * 0.25, -0.5); p.addPoint(Math.PI * 0.25, -0.5); p.done(); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0); - assertEquals(Double.POSITIVE_INFINITY, p.computeDistance(DistanceStyle.ARC,gp), 0.0); + assertEquals(Double.POSITIVE_INFINITY, p.computeDistance(DistanceStyle.ARC, gp), 0.0); gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -1.0); - assertEquals(Double.POSITIVE_INFINITY, p.computeDistance(DistanceStyle.ARC,gp), 0.0); + assertEquals(Double.POSITIVE_INFINITY, p.computeDistance(DistanceStyle.ARC, gp), 0.0); gp = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.25 + 0.05, -0.5); - assertEquals(Math.PI * 0.5 + 0.05, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); + assertEquals(Math.PI * 0.5 + 0.05, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.25 - 0.05, -0.5); - assertEquals(0.0 + 0.05, p.computeDistance(DistanceStyle.ARC,gp), 0.000001); + assertEquals(0.0 + 0.05, p.computeDistance(DistanceStyle.ARC, gp), 0.000001); } @Test @@ -123,7 +122,6 @@ public class TestGeoPath extends LuceneTestCase { assertFalse(p.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0); assertFalse(p.isWithin(gp)); - } @Test @@ -136,17 +134,25 @@ public class TestGeoPath extends LuceneTestCase { int relationship; GeoArea area; PlanetModel planetModel; - + planetModel = new PlanetModel(1.151145876105594, 0.8488541238944061); c = new GeoStandardPath(planetModel, 0.008726646259971648); c.addPoint(-0.6925658899376476, 0.6316613927914589); c.addPoint(0.27828548161836364, 0.6785795524104564); c.done(); - point = new GeoPoint(planetModel,-0.49298555067758226, 0.9892440995026406); + point = new GeoPoint(planetModel, -0.49298555067758226, 0.9892440995026406); pointApprox = new GeoPoint(0.5110940362119821, 0.7774603209946239, -0.49984312299556544); - area = GeoAreaFactory.makeGeoArea(planetModel, 0.49937141144985997, 0.5161765426256085, 0.3337218719537796,0.8544419570901649, -0.6347692823688085, 0.3069696588119369); + area = + GeoAreaFactory.makeGeoArea( + planetModel, + 0.49937141144985997, + 0.5161765426256085, + 0.3337218719537796, + 0.8544419570901649, + -0.6347692823688085, + 0.3069696588119369); assertTrue(!c.isWithin(point)); - + // Start by testing the basic kinds of relationship, increasing in order of difficulty. p = new GeoStandardPath(PlanetModel.SPHERE, 0.1); @@ -175,7 +181,6 @@ public class TestGeoPath extends LuceneTestCase { assertEquals(GeoArea.OVERLAPS, rect.getRelationship(p)); rect = new GeoRectangle(PlanetModel.SPHERE, 0.5, -0.5, -0.5, -0.45); assertEquals(GeoArea.DISJOINT, rect.getRelationship(p)); - } @Test @@ -187,25 +192,32 @@ public class TestGeoPath extends LuceneTestCase { int relationship; GeoArea area; PlanetModel planetModel; - - planetModel = new PlanetModel(0.751521665790406,1.248478334209594); + + planetModel = new PlanetModel(0.751521665790406, 1.248478334209594); c = new GeoStandardPath(planetModel, 0.7504915783575618); c.addPoint(0.10869761172400265, 0.08895880215465272); c.addPoint(0.22467878641991612, 0.10972973084229565); c.addPoint(-0.7398772468744732, -0.4465812941383364); c.addPoint(-0.18462055300079366, -0.6713857796763727); c.done(); - point = new GeoPoint(planetModel,-0.626645355125733,-1.409304625439381); + point = new GeoPoint(planetModel, -0.626645355125733, -1.409304625439381); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(planetModel, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); + area = + GeoAreaFactory.makeGeoArea( + planetModel, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); relationship = area.getRelationship(c); assertTrue(relationship == GeoArea.WITHIN || relationship == GeoArea.OVERLAPS); assertTrue(area.isWithin(point)); // No longer true due to fixed GeoStandardPath waypoints. - //assertTrue(zScaling.isWithin(point)); - + // assertTrue(zScaling.isWithin(point)); + c = new GeoStandardPath(PlanetModel.WGS84, 0.6894050545377601); c.addPoint(-0.0788176065762948, 0.9431251741731624); c.addPoint(0.510387871458147, 0.5327078872484678); @@ -213,18 +225,27 @@ public class TestGeoPath extends LuceneTestCase { c.addPoint(-0.5025171434638661, -0.5895998642788894); c.done(); point = new GeoPoint(PlanetModel.WGS84, 0.023652082107211682, 0.023131910152748437); - //System.err.println("Point.x = "+point.x+"; point.y="+point.y+"; point.z="+point.z); + // System.err.println("Point.x = "+point.x+"; point.y="+point.y+"; point.z="+point.z); assertTrue(c.isWithin(point)); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - //System.err.println("minx="+xyzb.getMinimumX()+" maxx="+xyzb.getMaximumX()+" miny="+xyzb.getMinimumY()+" maxy="+xyzb.getMaximumY()+" minz="+xyzb.getMinimumZ()+" maxz="+xyzb.getMaximumZ()); - //System.err.println("point.x="+point.x+" point.y="+point.y+" point.z="+point.z); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.WGS84, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + // System.err.println("minx="+xyzb.getMinimumX()+" maxx="+xyzb.getMaximumX()+" + // miny="+xyzb.getMinimumY()+" maxy="+xyzb.getMaximumY()+" minz="+xyzb.getMinimumZ()+" + // maxz="+xyzb.getMaximumZ()); + // System.err.println("point.x="+point.x+" point.y="+point.y+" point.z="+point.z); relationship = area.getRelationship(c); assertTrue(relationship == GeoArea.WITHIN || relationship == GeoArea.OVERLAPS); assertTrue(area.isWithin(point)); - + c = new GeoStandardPath(PlanetModel.WGS84, 0.7766715171374766); c.addPoint(-0.2751718361148076, -0.7786721269011477); c.addPoint(0.5728375851539309, -1.2700115736820465); @@ -233,10 +254,19 @@ public class TestGeoPath extends LuceneTestCase { assertTrue(c.isWithin(point)); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - //System.err.println("minx="+xyzb.getMinimumX()+" maxx="+xyzb.getMaximumX()+" miny="+xyzb.getMinimumY()+" maxy="+xyzb.getMaximumY()+" minz="+xyzb.getMinimumZ()+" maxz="+xyzb.getMaximumZ()); - //System.err.println("point.x="+point.x+" point.y="+point.y+" point.z="+point.z); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.WGS84, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + // System.err.println("minx="+xyzb.getMinimumX()+" maxx="+xyzb.getMaximumX()+" + // miny="+xyzb.getMinimumY()+" maxy="+xyzb.getMaximumY()+" minz="+xyzb.getMinimumZ()+" + // maxz="+xyzb.getMaximumZ()); + // System.err.println("point.x="+point.x+" point.y="+point.y+" point.z="+point.z); relationship = area.getRelationship(c); assertTrue(relationship == GeoArea.WITHIN || relationship == GeoArea.OVERLAPS); assertTrue(area.isWithin(point)); @@ -255,67 +285,82 @@ public class TestGeoPath extends LuceneTestCase { assertEquals(0.4046919, b.getRightLongitude(), 0.000001); assertEquals(-0.3999999, b.getMinLatitude(), 0.000001); assertEquals(0.3999999, b.getMaxLatitude(), 0.000001); - } @Test public void testCoLinear() { // p1: (12,-90), p2: (11, -55), (129, -90) GeoStandardPath p = new GeoStandardPath(PlanetModel.SPHERE, 0.1); - p.addPoint(Math.toRadians(-90), Math.toRadians(12));//south pole + p.addPoint(Math.toRadians(-90), Math.toRadians(12)); // south pole p.addPoint(Math.toRadians(-55), Math.toRadians(11)); - p.addPoint(Math.toRadians(-90), Math.toRadians(129));//south pole again - p.done();//at least test this doesn't bomb like it used too -- LUCENE-6520 + p.addPoint(Math.toRadians(-90), Math.toRadians(129)); // south pole again + p.done(); // at least test this doesn't bomb like it used too -- LUCENE-6520 } @Test public void testFailure1() { /* - GeoStandardPath: {planetmodel=PlanetModel.WGS84, width=1.117010721276371(64.0), points={[ - [lat=2.18531083006635E-12, lon=-3.141592653589793([X=-1.0011188539924791, Y=-1.226017000107956E-16, Z=2.187755873813378E-12])], - [lat=0.0, lon=-3.141592653589793([X=-1.0011188539924791, Y=-1.226017000107956E-16, Z=0.0])]]}} - */ - final GeoPoint[] points = new GeoPoint[]{ - new GeoPoint(PlanetModel.WGS84, 2.18531083006635E-12, -3.141592653589793), - new GeoPoint(PlanetModel.WGS84, 0.0, -3.141592653589793)}; - + GeoStandardPath: {planetmodel=PlanetModel.WGS84, width=1.117010721276371(64.0), points={[ + [lat=2.18531083006635E-12, lon=-3.141592653589793([X=-1.0011188539924791, Y=-1.226017000107956E-16, Z=2.187755873813378E-12])], + [lat=0.0, lon=-3.141592653589793([X=-1.0011188539924791, Y=-1.226017000107956E-16, Z=0.0])]]}} + */ + final GeoPoint[] points = + new GeoPoint[] { + new GeoPoint(PlanetModel.WGS84, 2.18531083006635E-12, -3.141592653589793), + new GeoPoint(PlanetModel.WGS84, 0.0, -3.141592653589793) + }; + final GeoPath path; try { - path = GeoPathFactory.makeGeoPath(PlanetModel.WGS84, - 1.117010721276371, points); + path = GeoPathFactory.makeGeoPath(PlanetModel.WGS84, 1.117010721276371, points); } catch (IllegalArgumentException e) { return; } assertTrue(false); - - final GeoPoint point = new GeoPoint(PlanetModel.WGS84, -2.848117399637174E-91, -1.1092122135274942); - System.err.println("point = "+point); - + + final GeoPoint point = + new GeoPoint(PlanetModel.WGS84, -2.848117399637174E-91, -1.1092122135274942); + System.err.println("point = " + point); + final XYZBounds bounds = new XYZBounds(); path.getBounds(bounds); - - final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, - bounds.getMinimumX(), bounds.getMaximumX(), - bounds.getMinimumY(), bounds.getMaximumY(), - bounds.getMinimumZ(), bounds.getMaximumZ()); - + + final XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + bounds.getMinimumX(), + bounds.getMaximumX(), + bounds.getMinimumY(), + bounds.getMaximumY(), + bounds.getMinimumZ(), + bounds.getMaximumZ()); + assertTrue(path.isWithin(point)); assertTrue(solid.isWithin(point)); } - + @Test public void testInterpolation() { final double lat = 52.51607; final double lon = 13.37698; - final double[] pathLats = new double[] {52.5355,52.54,52.5626,52.5665,52.6007,52.6135,52.6303,52.6651,52.7074}; - final double[] pathLons = new double[] {13.3634,13.3704,13.3307,13.3076,13.2806,13.2484,13.2406,13.241,13.1926}; + final double[] pathLats = + new double[] { + 52.5355, 52.54, 52.5626, 52.5665, 52.6007, 52.6135, 52.6303, 52.6651, 52.7074 + }; + final double[] pathLons = + new double[] { + 13.3634, 13.3704, 13.3307, 13.3076, 13.2806, 13.2484, 13.2406, 13.241, 13.1926 + }; // Set up a point in the right way - final GeoPoint carPoint = new GeoPoint(PlanetModel.SPHERE, Math.toRadians(lat), Math.toRadians(lon)); + final GeoPoint carPoint = + new GeoPoint(PlanetModel.SPHERE, Math.toRadians(lat), Math.toRadians(lon)); // Create the path, but use a tiny width (e.g. zero) final GeoPoint[] pathPoints = new GeoPoint[pathLats.length]; for (int i = 0; i < pathPoints.length; i++) { - pathPoints[i] = new GeoPoint(PlanetModel.SPHERE, Math.toRadians(pathLats[i]), Math.toRadians(pathLons[i])); + pathPoints[i] = + new GeoPoint( + PlanetModel.SPHERE, Math.toRadians(pathLats[i]), Math.toRadians(pathLons[i])); } // Construct a path with no width final GeoPath thisPath = GeoPathFactory.makeGeoPath(PlanetModel.SPHERE, 0.0, pathPoints); @@ -334,7 +379,7 @@ public class TestGeoPath extends LuceneTestCase { assertEquals(legacyDistance, distance, 1e-12); assertEquals(oldFormulaLegacyDistance, oldFormulaDistance, 1e-12); // This isn't true because example search center is off of the path. - //assertEquals(oldFormulaDistance, distance, 1e-12); + // assertEquals(oldFormulaDistance, distance, 1e-12); } @@ -342,25 +387,34 @@ public class TestGeoPath extends LuceneTestCase { public void testInterpolation2() { final double lat = 52.5665; final double lon = 13.3076; - final double[] pathLats = new double[] {52.5355,52.54,52.5626,52.5665,52.6007,52.6135,52.6303,52.6651,52.7074}; - final double[] pathLons = new double[] {13.3634,13.3704,13.3307,13.3076,13.2806,13.2484,13.2406,13.241,13.1926}; + final double[] pathLats = + new double[] { + 52.5355, 52.54, 52.5626, 52.5665, 52.6007, 52.6135, 52.6303, 52.6651, 52.7074 + }; + final double[] pathLons = + new double[] { + 13.3634, 13.3704, 13.3307, 13.3076, 13.2806, 13.2484, 13.2406, 13.241, 13.1926 + }; - final GeoPoint carPoint = new GeoPoint(PlanetModel.SPHERE, Math.toRadians(lat), Math.toRadians(lon)); + final GeoPoint carPoint = + new GeoPoint(PlanetModel.SPHERE, Math.toRadians(lat), Math.toRadians(lon)); final GeoPoint[] pathPoints = new GeoPoint[pathLats.length]; for (int i = 0; i < pathPoints.length; i++) { - pathPoints[i] = new GeoPoint(PlanetModel.SPHERE, Math.toRadians(pathLats[i]), Math.toRadians(pathLons[i])); + pathPoints[i] = + new GeoPoint( + PlanetModel.SPHERE, Math.toRadians(pathLats[i]), Math.toRadians(pathLons[i])); } - + // Construct a path with no width final GeoPath thisPath = GeoPathFactory.makeGeoPath(PlanetModel.SPHERE, 0.0, pathPoints); // Construct a path with a width final GeoPath legacyPath = GeoPathFactory.makeGeoPath(PlanetModel.SPHERE, 1e-6, pathPoints); - + // Compute the inside distance to the atPoint using zero-width path final double distance = thisPath.computeNearestDistance(DistanceStyle.ARC, carPoint); // Compute the inside distance using legacy path final double legacyDistance = legacyPath.computeNearestDistance(DistanceStyle.ARC, carPoint); - + // Compute the inside distance using the legacy formula final double oldFormulaDistance = thisPath.computeDistance(DistanceStyle.ARC, carPoint); // Compute the inside distance using the legacy formula with the legacy shape @@ -368,12 +422,11 @@ public class TestGeoPath extends LuceneTestCase { // These should be about the same assertEquals(legacyDistance, distance, 1e-12); - + assertEquals(oldFormulaLegacyDistance, oldFormulaDistance, 1e-12); - + // Since the point we picked is actually on the path, this should also be true assertEquals(oldFormulaDistance, distance, 1e-12); - } @Test @@ -381,45 +434,50 @@ public class TestGeoPath extends LuceneTestCase { PlanetModel planetModel = PlanetModel.WGS84; GeoPoint point1 = new GeoPoint(planetModel, 1.5707963267948963, -2.4818290647609542E-148); GeoPoint point2 = new GeoPoint(planetModel, 1.570796326794895, -3.5E-323); - GeoPoint point3 = new GeoPoint(planetModel,4.4E-323, -3.1415926535897896); - GeoPath path = GeoPathFactory.makeGeoPath(planetModel, 0, new GeoPoint[] {point1, point2, point3}); - GeoPoint point = new GeoPoint(planetModel, -1.5707963267948952,2.369064805649877E-284); - //If not filtered the point is wrongly in set + GeoPoint point3 = new GeoPoint(planetModel, 4.4E-323, -3.1415926535897896); + GeoPath path = + GeoPathFactory.makeGeoPath(planetModel, 0, new GeoPoint[] {point1, point2, point3}); + GeoPoint point = new GeoPoint(planetModel, -1.5707963267948952, 2.369064805649877E-284); + // If not filtered the point is wrongly in set assertFalse(path.isWithin(point)); - //If not filtered it throws error + // If not filtered it throws error path = GeoPathFactory.makeGeoPath(planetModel, 1e-6, new GeoPoint[] {point1, point2, point3}); assertFalse(path.isWithin(point)); GeoPoint point4 = new GeoPoint(planetModel, 1.5, 0); GeoPoint point5 = new GeoPoint(planetModel, 1.5, 0); - GeoPoint point6 = new GeoPoint(planetModel,4.4E-323, -3.1415926535897896); - //If not filtered creates a degenerated Vector + GeoPoint point6 = new GeoPoint(planetModel, 4.4E-323, -3.1415926535897896); + // If not filtered creates a degenerated Vector path = GeoPathFactory.makeGeoPath(planetModel, 0, new GeoPoint[] {point4, point5, point6}); path = GeoPathFactory.makeGeoPath(planetModel, 0.5, new GeoPoint[] {point4, point5, point6}); - } @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8696") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8696") public void testLUCENE8696() { GeoPoint[] points = new GeoPoint[4]; points[0] = new GeoPoint(PlanetModel.WGS84, 2.4457272005608357E-47, 0.017453291479645996); points[1] = new GeoPoint(PlanetModel.WGS84, 2.4457272005608357E-47, 0.8952476719156919); points[2] = new GeoPoint(PlanetModel.WGS84, 2.4457272005608357E-47, 0.6491968536639036); points[3] = new GeoPoint(PlanetModel.WGS84, -0.7718789008737459, 0.9236607495528212); - GeoPath path = GeoPathFactory.makeGeoPath(PlanetModel.WGS84, 1.3439035240356338, points); + GeoPath path = GeoPathFactory.makeGeoPath(PlanetModel.WGS84, 1.3439035240356338, points); GeoPoint check = new GeoPoint(0.02071783020158524, 0.9523290535474472, 0.30699177256064203); // Map to surface point, to remove that source of confusion GeoPoint surfaceCheck = PlanetModel.WGS84.createSurfacePoint(check); /* - [junit4] 1> cycle: cell=12502 parentCellID=12500 x: -1658490249 TO 2147483041, y: 2042111310 TO 2147483041, z: -2140282940 TO 2140277970, splits: 1 queue.size()=1 - [junit4] 1> minx=-0.7731590077686981 maxx=1.0011188539924791 miny=0.9519964046486451 maxy=1.0011188539924791 minz=-0.9977622932859775 maxz=0.9977599768255027 - [junit4] 1> GeoArea.CONTAINS: now addAll - */ - XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, - -0.7731590077686981, 1.0011188539924791, - 0.9519964046486451, 1.0011188539924791, - -0.9977622932859775, 0.9977599768255027); + [junit4] 1> cycle: cell=12502 parentCellID=12500 x: -1658490249 TO 2147483041, y: 2042111310 TO 2147483041, z: -2140282940 TO 2140277970, splits: 1 queue.size()=1 + [junit4] 1> minx=-0.7731590077686981 maxx=1.0011188539924791 miny=0.9519964046486451 maxy=1.0011188539924791 minz=-0.9977622932859775 maxz=0.9977599768255027 + [junit4] 1> GeoArea.CONTAINS: now addAll + */ + XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + -0.7731590077686981, + 1.0011188539924791, + 0.9519964046486451, + 1.0011188539924791, + -0.9977622932859775, + 0.9977599768255027); // Verify that the point is within it assertTrue(solid.isWithin(surfaceCheck)); // Check the (surface) relationship @@ -429,7 +487,5 @@ public class TestGeoPath extends LuceneTestCase { // If point is within solid, it must be within shape assertTrue(path.isWithin(surfaceCheck)); } - } - } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPoint.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPoint.java index d55f1b08207..326b0051156 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPoint.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPoint.java @@ -16,16 +16,14 @@ */ package org.apache.lucene.spatial3d.geom; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomFloat; + import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomFloat; - -/** - * Test basic GeoPoint functionality. - */ +/** Test basic GeoPoint functionality. */ public class TestGeoPoint extends LuceneTestCase { - static final double DEGREES_TO_RADIANS = Math.PI / 180; + static final double DEGREES_TO_RADIANS = Math.PI / 180; @Test public void testConversion() { @@ -38,12 +36,17 @@ public class TestGeoPoint extends LuceneTestCase { for (int i = 0; i < times; i++) { final double pLat = (randomFloat() * 180.0 - 90.0) * DEGREES_TO_RADIANS; final double pLon = (randomFloat() * 360.0 - 180.0) * DEGREES_TO_RADIANS; - testPointRoundTrip(PlanetModel.SPHERE, pLat, pLon, 1e-6);//1e-6 since there's a square root in there (Karl says) + testPointRoundTrip( + PlanetModel.SPHERE, + pLat, + pLon, + 1e-6); // 1e-6 since there's a square root in there (Karl says) testPointRoundTrip(PlanetModel.WGS84, pLat, pLon, 1e-6); } } - protected void testPointRoundTrip(PlanetModel planetModel, double pLat, double pLon, double epsilon) { + protected void testPointRoundTrip( + PlanetModel planetModel, double pLat, double pLon, double epsilon) { final GeoPoint p1 = new GeoPoint(planetModel, pLat, pLon); // In order to force the reverse conversion, we have to construct a geopoint from just x,y,z final GeoPoint p2 = new GeoPoint(p1.x, p1.y, p1.z); @@ -65,23 +68,35 @@ public class TestGeoPoint extends LuceneTestCase { final GeoPoint p2 = new GeoPoint(PlanetModel.SPHERE, p2Lat, p2Lon); final double arcDistance = p1.arcDistance(p2); // Compute ellipsoid distance; it should agree for a sphere - final double surfaceDistance = PlanetModel.SPHERE.surfaceDistance(p1,p2); + final double surfaceDistance = PlanetModel.SPHERE.surfaceDistance(p1, p2); assertEquals(arcDistance, surfaceDistance, 1e-6); } // Now try some WGS84 points (taken randomly and compared against a known-good implementation) - assertEquals(1.1444648695765323, PlanetModel.WGS84.surfaceDistance( - new GeoPoint(PlanetModel.WGS84, 0.038203808753702884, -0.6701260455506466), - new GeoPoint(PlanetModel.WGS84, -0.8453720422675458, 0.1737353153814496)), 1e-6); - assertEquals(1.4345148695890722, PlanetModel.WGS84.surfaceDistance( - new GeoPoint(PlanetModel.WGS84, 0.5220926323378574, 0.6758041581907408), - new GeoPoint(PlanetModel.WGS84, -0.8453720422675458, 0.1737353153814496)), 1e-6); - assertEquals(2.32418144616446, PlanetModel.WGS84.surfaceDistance( - new GeoPoint(PlanetModel.WGS84, 0.09541335760967473, 1.2091829760623236), - new GeoPoint(PlanetModel.WGS84, -0.8501591797459979, -2.3044806381627594)), 1e-6); - assertEquals(2.018421047005435, PlanetModel.WGS84.surfaceDistance( - new GeoPoint(PlanetModel.WGS84, 0.3402853531962009, -0.43544195327249957), - new GeoPoint(PlanetModel.WGS84, -0.8501591797459979, -2.3044806381627594)), 1e-6); + assertEquals( + 1.1444648695765323, + PlanetModel.WGS84.surfaceDistance( + new GeoPoint(PlanetModel.WGS84, 0.038203808753702884, -0.6701260455506466), + new GeoPoint(PlanetModel.WGS84, -0.8453720422675458, 0.1737353153814496)), + 1e-6); + assertEquals( + 1.4345148695890722, + PlanetModel.WGS84.surfaceDistance( + new GeoPoint(PlanetModel.WGS84, 0.5220926323378574, 0.6758041581907408), + new GeoPoint(PlanetModel.WGS84, -0.8453720422675458, 0.1737353153814496)), + 1e-6); + assertEquals( + 2.32418144616446, + PlanetModel.WGS84.surfaceDistance( + new GeoPoint(PlanetModel.WGS84, 0.09541335760967473, 1.2091829760623236), + new GeoPoint(PlanetModel.WGS84, -0.8501591797459979, -2.3044806381627594)), + 1e-6); + assertEquals( + 2.018421047005435, + PlanetModel.WGS84.surfaceDistance( + new GeoPoint(PlanetModel.WGS84, 0.3402853531962009, -0.43544195327249957), + new GeoPoint(PlanetModel.WGS84, -0.8501591797459979, -2.3044806381627594)), + 1e-6); } @Test @@ -102,7 +117,7 @@ public class TestGeoPoint extends LuceneTestCase { } } } - + @Test(expected = IllegalArgumentException.class) public void testBadLatLon() { new GeoPoint(PlanetModel.SPHERE, 50.0, 32.2); diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPolygon.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPolygon.java index 0598abc9df9..95164910fc3 100755 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPolygon.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPolygon.java @@ -17,12 +17,10 @@ package org.apache.lucene.spatial3d.geom; import java.util.ArrayList; -import java.util.List; import java.util.BitSet; import java.util.Collections; - +import java.util.List; import org.apache.lucene.util.LuceneTestCase; - import org.junit.Test; public class TestGeoPolygon extends LuceneTestCase { @@ -34,7 +32,7 @@ public class TestGeoPolygon extends LuceneTestCase { final GeoPoint point3 = new GeoPoint(PlanetModel.WGS84, 0.0, 0.0); final GeoPoint point4 = new GeoPoint(PlanetModel.WGS84, Math.PI * 0.5, 0.0); final GeoPoint point5 = new GeoPoint(PlanetModel.WGS84, 1.0, 0.0); - + // First: duplicate points in the middle { final List originalPoints = new ArrayList<>(); @@ -42,7 +40,8 @@ public class TestGeoPolygon extends LuceneTestCase { originalPoints.add(point2); originalPoints.add(point2); originalPoints.add(point3); - final List filteredPoints = GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); + final List filteredPoints = + GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); assertEquals(3, filteredPoints.size()); assertEquals(point1, filteredPoints.get(0)); assertEquals(point2, filteredPoints.get(1)); @@ -55,7 +54,8 @@ public class TestGeoPolygon extends LuceneTestCase { originalPoints.add(point1); originalPoints.add(point3); originalPoints.add(point2); - final List filteredPoints = GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); + final List filteredPoints = + GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); assertEquals(3, filteredPoints.size()); assertEquals(point2, filteredPoints.get(0)); assertEquals(point1, filteredPoints.get(1)); @@ -69,7 +69,8 @@ public class TestGeoPolygon extends LuceneTestCase { originalPoints.add(point3); originalPoints.add(point4); originalPoints.add(point5); - final List filteredPoints = GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); + final List filteredPoints = + GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); assertEquals(3, filteredPoints.size()); assertEquals(point1, filteredPoints.get(0)); assertEquals(point3, filteredPoints.get(1)); @@ -82,18 +83,18 @@ public class TestGeoPolygon extends LuceneTestCase { originalPoints.add(point1); originalPoints.add(point3); originalPoints.add(point4); - final List filteredPoints = GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); + final List filteredPoints = + GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); assertEquals(3, filteredPoints.size()); assertEquals(point5, filteredPoints.get(0)); assertEquals(point1, filteredPoints.get(1)); assertEquals(point3, filteredPoints.get(2)); } - } @Test public void testPolygonPointFiltering2() { - //all coplanar + // all coplanar GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, 1.1264101919629863, -0.9108307879480759); GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, 1.1264147298190414, -0.9108309624810013); GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, 1.1264056541069312, -0.9108306134151508); @@ -101,11 +102,11 @@ public class TestGeoPolygon extends LuceneTestCase { originalPoints.add(point1); originalPoints.add(point2); originalPoints.add(point3); - final List filteredPoints = GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); + final List filteredPoints = + GeoPolygonFactory.filterEdges(GeoPolygonFactory.filterPoints(originalPoints), 0.0); assertEquals(null, filteredPoints); } - @Test public void testPolygonClockwise() { GeoPolygon c; @@ -113,7 +114,7 @@ public class TestGeoPolygon extends LuceneTestCase { List points; List shapes; - // Points go counterclockwise, so + // Points go counterclockwise, so points = new ArrayList(); points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6)); @@ -122,39 +123,38 @@ public class TestGeoPolygon extends LuceneTestCase { GeoPolygonFactory.PolygonDescription pd = new GeoPolygonFactory.PolygonDescription(points); c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, pd); - //System.out.println(zScaling); - + // System.out.println(zScaling); + // Middle point should NOT be within!! gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5); assertTrue(!c.isWithin(gp)); shapes = new ArrayList<>(); shapes.add(pd); - + c = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, shapes); assertTrue(!c.isWithin(gp)); - + // Now, go clockwise points = new ArrayList(); points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5)); - points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6)); + points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6)); points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5)); pd = new GeoPolygonFactory.PolygonDescription(points); c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, pd); - //System.out.println(zScaling); - + // System.out.println(zScaling); + // Middle point should be within!! gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5); assertTrue(c.isWithin(gp)); shapes = new ArrayList<>(); shapes.add(pd); - + c = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, shapes); assertTrue(c.isWithin(gp)); - } @Test @@ -164,7 +164,7 @@ public class TestGeoPolygon extends LuceneTestCase { List shapes; XYZBounds xyzBounds; XYZSolid xyzSolid; - + points = new ArrayList(); points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5)); @@ -172,37 +172,84 @@ public class TestGeoPolygon extends LuceneTestCase { points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5)); GeoPolygonFactory.PolygonDescription pd = new GeoPolygonFactory.PolygonDescription(points); - + c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, pd); xyzBounds = new XYZBounds(); c.getBounds(xyzBounds); - xyzSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.SPHERE, xyzBounds.getMinimumX(), xyzBounds.getMaximumX(), xyzBounds.getMinimumY(), xyzBounds.getMaximumY(), xyzBounds.getMinimumZ(), xyzBounds.getMaximumZ()); + xyzSolid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.SPHERE, + xyzBounds.getMinimumX(), + xyzBounds.getMaximumX(), + xyzBounds.getMinimumY(), + xyzBounds.getMaximumY(), + xyzBounds.getMinimumZ(), + xyzBounds.getMaximumZ()); assertEquals(GeoArea.WITHIN, xyzSolid.getRelationship(c)); - xyzSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.SPHERE, xyzBounds.getMinimumY(), xyzBounds.getMaximumY(), xyzBounds.getMinimumZ(), xyzBounds.getMaximumZ(), xyzBounds.getMinimumX(), xyzBounds.getMaximumX()); + xyzSolid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.SPHERE, + xyzBounds.getMinimumY(), + xyzBounds.getMaximumY(), + xyzBounds.getMinimumZ(), + xyzBounds.getMaximumZ(), + xyzBounds.getMinimumX(), + xyzBounds.getMaximumX()); assertEquals(GeoArea.DISJOINT, xyzSolid.getRelationship(c)); shapes = new ArrayList<>(); shapes.add(pd); - + c = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, shapes); // Same bounds should work - xyzSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.SPHERE, xyzBounds.getMinimumX(), xyzBounds.getMaximumX(), xyzBounds.getMinimumY(), xyzBounds.getMaximumY(), xyzBounds.getMinimumZ(), xyzBounds.getMaximumZ()); + xyzSolid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.SPHERE, + xyzBounds.getMinimumX(), + xyzBounds.getMaximumX(), + xyzBounds.getMinimumY(), + xyzBounds.getMaximumY(), + xyzBounds.getMinimumZ(), + xyzBounds.getMaximumZ()); assertEquals(GeoArea.WITHIN, xyzSolid.getRelationship(c)); - xyzSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.SPHERE, xyzBounds.getMinimumY(), xyzBounds.getMaximumY(), xyzBounds.getMinimumZ(), xyzBounds.getMaximumZ(), xyzBounds.getMinimumX(), xyzBounds.getMaximumX()); + xyzSolid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.SPHERE, + xyzBounds.getMinimumY(), + xyzBounds.getMaximumY(), + xyzBounds.getMinimumZ(), + xyzBounds.getMaximumZ(), + xyzBounds.getMinimumX(), + xyzBounds.getMaximumX()); assertEquals(GeoArea.DISJOINT, xyzSolid.getRelationship(c)); // Bounds we obtain from the large polygon also should work. xyzBounds = new XYZBounds(); c.getBounds(xyzBounds); - xyzSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.SPHERE, xyzBounds.getMinimumX(), xyzBounds.getMaximumX(), xyzBounds.getMinimumY(), xyzBounds.getMaximumY(), xyzBounds.getMinimumZ(), xyzBounds.getMaximumZ()); + xyzSolid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.SPHERE, + xyzBounds.getMinimumX(), + xyzBounds.getMaximumX(), + xyzBounds.getMinimumY(), + xyzBounds.getMaximumY(), + xyzBounds.getMinimumZ(), + xyzBounds.getMaximumZ()); assertEquals(GeoArea.WITHIN, xyzSolid.getRelationship(c)); - xyzSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.SPHERE, xyzBounds.getMinimumY(), xyzBounds.getMaximumY(), xyzBounds.getMinimumZ(), xyzBounds.getMaximumZ(), xyzBounds.getMinimumX(), xyzBounds.getMaximumX()); + xyzSolid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.SPHERE, + xyzBounds.getMinimumY(), + xyzBounds.getMaximumY(), + xyzBounds.getMinimumZ(), + xyzBounds.getMaximumZ(), + xyzBounds.getMinimumX(), + xyzBounds.getMaximumX()); assertEquals(GeoArea.DISJOINT, xyzSolid.getRelationship(c)); - } - + @Test public void testPolygonPointWithin() { GeoPolygon c; @@ -249,10 +296,10 @@ public class TestGeoPolygon extends LuceneTestCase { // Now, same thing for large polygon shapes = new ArrayList<>(); shapes.add(pd); - + c = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, shapes); - //System.out.println("Large polygon = "+zScaling); - + // System.out.println("Large polygon = "+zScaling); + // Sample some points within gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.45); assertTrue(c.isWithin(gp)); @@ -291,14 +338,14 @@ public class TestGeoPolygon extends LuceneTestCase { points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.7)); points.add(new GeoPoint(PlanetModel.SPHERE, -0.01, -0.6)); points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5)); - + pd = new GeoPolygonFactory.PolygonDescription(points); - /* - System.out.println("Points: "); - for (GeoPoint p : points) { - System.out.println(" "+p); - } - */ + /* + System.out.println("Points: "); + for (GeoPoint p : points) { + System.out.println(" "+p); + } + */ c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, pd); // Sample some points within @@ -332,7 +379,7 @@ public class TestGeoPolygon extends LuceneTestCase { // Now, same thing for large polygon shapes = new ArrayList<>(); shapes.add(pd); - + c = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, shapes); // Sample some points within gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5); @@ -361,7 +408,6 @@ public class TestGeoPolygon extends LuceneTestCase { assertFalse(c.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, Math.PI); assertFalse(c.isWithin(gp)); - } @Test @@ -372,24 +418,31 @@ public class TestGeoPolygon extends LuceneTestCase { XYZBounds xyzb; GeoPoint point; GeoArea area; - + // BKD failure points = new ArrayList(); points.add(new GeoPoint(PlanetModel.WGS84, -0.36716183577912814, 1.4836349969188696)); points.add(new GeoPoint(PlanetModel.WGS84, 0.7846038240742979, -0.02743348424931823)); points.add(new GeoPoint(PlanetModel.WGS84, -0.7376479402362607, -0.5072961758807019)); points.add(new GeoPoint(PlanetModel.WGS84, -0.3760415907667887, 1.4970455334565513)); - + c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, points); - + point = new GeoPoint(PlanetModel.WGS84, -0.01580760332365284, -0.03956004622490505); assertTrue(c.isWithin(point)); xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.WGS84, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); assertTrue(area.isWithin(point)); - + points = new ArrayList(); points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5)); @@ -418,7 +471,7 @@ public class TestGeoPolygon extends LuceneTestCase { GeoPoint point1; GeoPoint point2; GeoArea area; - + // Build the polygon points = new ArrayList<>(); points.add(new GeoPoint(PlanetModel.WGS84, 0.7769776943105245, -2.157536559188766)); @@ -427,35 +480,44 @@ public class TestGeoPolygon extends LuceneTestCase { points.add(new GeoPoint(PlanetModel.WGS84, -1.4459804612164617, -1.2970934639728127)); c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, points); // GeoCompositeMembershipShape: {[GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points= - // [[lat=0.17644522781457245, lon=2.4225312555674967], - // [lat=-1.4459804612164617, lon=-1.2970934639728127], - // [lat=0.7769776943105245, lon=-2.157536559188766]]}, + // [[lat=0.17644522781457245, lon=2.4225312555674967], + // [lat=-1.4459804612164617, lon=-1.2970934639728127], + // [lat=0.7769776943105245, lon=-2.157536559188766]]}, // GeoConcavePolygon: {planetmodel=PlanetModel.WGS84, points= // [[lat=-0.9796549195552824, lon=-0.25078026625235256], - // [lat=0.17644522781457245, lon=2.4225312555674967], + // [lat=0.17644522781457245, lon=2.4225312555674967], // [lat=0.7769776943105245, lon=-2.157536559188766]]}]} point1 = new GeoPoint(PlanetModel.WGS84, -1.2013743680763862, 0.48458963747230094); point2 = new GeoPoint(0.3189285805649921, 0.16790264636909197, -0.9308557496413026); - + assertTrue(c.isWithin(point1)); assertTrue(c.isWithin(point2)); - + // Now try bounds xyzb = new XYZBounds(); c.getBounds(xyzb); - area = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - + area = + GeoAreaFactory.makeGeoArea( + PlanetModel.WGS84, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + assertTrue(area.isWithin(point1)); assertTrue(area.isWithin(point2)); } - + @Test public void testGeoPolygonBoundsCase2() { - // [junit4] 1> TEST: iter=23 shape=GeoCompositeMembershipShape: {[GeoConvexPolygon: {planetmodel=PlanetModel(xyScaling=0.7563871189161702 zScaling=1.2436128810838298), points= + // [junit4] 1> TEST: iter=23 shape=GeoCompositeMembershipShape: {[GeoConvexPolygon: + // {planetmodel=PlanetModel(xyScaling=0.7563871189161702 zScaling=1.2436128810838298), points= // [[lat=0.014071770744627236, lon=0.011030818292803128], // [lat=0.006772117088906782, lon=-0.0012531892445234592], - // [lat=0.0022201615609504792, lon=0.005941293187389326]]}, GeoConcavePolygon: {planetmodel=PlanetModel(xyScaling=0.7563871189161702 zScaling=1.2436128810838298), points= + // [lat=0.0022201615609504792, lon=0.005941293187389326]]}, GeoConcavePolygon: + // {planetmodel=PlanetModel(xyScaling=0.7563871189161702 zScaling=1.2436128810838298), points= // [[lat=-0.005507100238396111, lon=-0.008487706131259667], // [lat=0.014071770744627236, lon=0.011030818292803128], // [lat=0.0022201615609504792, lon=0.005941293187389326]]}]} @@ -476,19 +538,28 @@ public class TestGeoPolygon extends LuceneTestCase { BitSet p2bits = new BitSet(); p2bits.set(1, true); c.addShape(new GeoConcavePolygon(pm, points2, p2bits, false)); - //System.out.println(zScaling); - + // System.out.println(zScaling); + // [junit4] 1> point=[lat=0.003540694517552105, lon=-9.99517927901697E-4] - // [junit4] 1> quantized=[X=0.7563849869428783, Y=-7.560204674780763E-4, Z=0.0026781405884151086] + // [junit4] 1> quantized=[X=0.7563849869428783, Y=-7.560204674780763E-4, + // Z=0.0026781405884151086] GeoPoint point = new GeoPoint(pm, 0.003540694517552105, -9.99517927901697E-4); - GeoPoint pointQuantized = new GeoPoint(0.7563849869428783, -7.560204674780763E-4, 0.0026781405884151086); - + GeoPoint pointQuantized = + new GeoPoint(0.7563849869428783, -7.560204674780763E-4, 0.0026781405884151086); + // Now try bounds XYZBounds xyzb = new XYZBounds(); c.getBounds(xyzb); - GeoArea area = GeoAreaFactory.makeGeoArea(pm, - xyzb.getMinimumX(), xyzb.getMaximumX(), xyzb.getMinimumY(), xyzb.getMaximumY(), xyzb.getMinimumZ(), xyzb.getMaximumZ()); - + GeoArea area = + GeoAreaFactory.makeGeoArea( + pm, + xyzb.getMinimumX(), + xyzb.getMaximumX(), + xyzb.getMinimumY(), + xyzb.getMaximumY(), + xyzb.getMinimumZ(), + xyzb.getMaximumZ()); + assertTrue(c.isWithin(point)); assertTrue(c.isWithin(pointQuantized)); // This fails!! @@ -499,26 +570,26 @@ public class TestGeoPolygon extends LuceneTestCase { @Test public void testGeoConcaveRelationshipCase1() { /* - [junit4] 1> doc=906 matched but should not - [junit4] 1> point=[lat=-0.9825762558001477, lon=2.4832136904725273] - [junit4] 1> quantized=[X=-0.4505446160475436, Y=0.34850109186970535, Z=-0.8539966368663765] + [junit4] 1> doc=906 matched but should not + [junit4] 1> point=[lat=-0.9825762558001477, lon=2.4832136904725273] + [junit4] 1> quantized=[X=-0.4505446160475436, Y=0.34850109186970535, Z=-0.8539966368663765] -doc=906 added here: + doc=906 added here: - [junit4] 1> cycle: cell=107836 parentCellID=107835 x: -1147288468 TO -742350917, y: -1609508490 TO 1609508490, z: -2147483647 TO 2147483647, splits: 3 queue.size()=1 - [junit4] 1> minx=-0.6107484000858642 maxx=-0.39518364125756916 miny=-0.8568069517709872 maxy=0.8568069517709872 minz=-1.1431930485939341 maxz=1.1431930485939341 - [junit4] 1> GeoArea.CONTAINS: now addAll + [junit4] 1> cycle: cell=107836 parentCellID=107835 x: -1147288468 TO -742350917, y: -1609508490 TO 1609508490, z: -2147483647 TO 2147483647, splits: 3 queue.size()=1 + [junit4] 1> minx=-0.6107484000858642 maxx=-0.39518364125756916 miny=-0.8568069517709872 maxy=0.8568069517709872 minz=-1.1431930485939341 maxz=1.1431930485939341 + [junit4] 1> GeoArea.CONTAINS: now addAll -shape: - [junit4] 1> TEST: iter=18 shape=GeoCompositeMembershipShape: {[GeoConvexPolygon: { - planetmodel=PlanetModel(xyScaling=0.8568069516722363 zScaling=1.1431930483277637), points= - [[lat=1.1577814487635816, lon=1.6283601832010004], - [lat=0.6664570999069251, lon=2.0855825542851574], - [lat=-0.23953537010974632, lon=1.8498724094352876]]}, GeoConcavePolygon: {planetmodel=PlanetModel(xyScaling=0.8568069516722363 zScaling=1.1431930483277637), points= - [[lat=1.1577814487635816, lon=1.6283601832010004], - [lat=-0.23953537010974632, lon=1.8498724094352876], - [lat=-1.1766904875978805, lon=-2.1346828411344436]]}]} - */ + shape: + [junit4] 1> TEST: iter=18 shape=GeoCompositeMembershipShape: {[GeoConvexPolygon: { + planetmodel=PlanetModel(xyScaling=0.8568069516722363 zScaling=1.1431930483277637), points= + [[lat=1.1577814487635816, lon=1.6283601832010004], + [lat=0.6664570999069251, lon=2.0855825542851574], + [lat=-0.23953537010974632, lon=1.8498724094352876]]}, GeoConcavePolygon: {planetmodel=PlanetModel(xyScaling=0.8568069516722363 zScaling=1.1431930483277637), points= + [[lat=1.1577814487635816, lon=1.6283601832010004], + [lat=-0.23953537010974632, lon=1.8498724094352876], + [lat=-1.1766904875978805, lon=-2.1346828411344436]]}]} + */ PlanetModel pm = new PlanetModel(0.8568069516722363, 1.1431930483277637); // Build the polygon GeoCompositeMembershipShape c = new GeoCompositeMembershipShape(pm); @@ -535,44 +606,52 @@ shape: BitSet p2bits = new BitSet(); p2bits.set(1, true); c.addShape(new GeoConcavePolygon(pm, points2, p2bits, false)); - //System.out.println(zScaling); - + // System.out.println(zScaling); + GeoPoint point = new GeoPoint(pm, -0.9825762558001477, 2.4832136904725273); - GeoPoint quantizedPoint = new GeoPoint(-0.4505446160475436, 0.34850109186970535, -0.8539966368663765); - - GeoArea xyzSolid = GeoAreaFactory.makeGeoArea(pm, - -0.6107484000858642, -0.39518364125756916, -0.8568069517709872, 0.8568069517709872, -1.1431930485939341, 1.1431930485939341); - //System.out.println("relationship = "+xyzSolid.getRelationship(zScaling)); + GeoPoint quantizedPoint = + new GeoPoint(-0.4505446160475436, 0.34850109186970535, -0.8539966368663765); + + GeoArea xyzSolid = + GeoAreaFactory.makeGeoArea( + pm, + -0.6107484000858642, + -0.39518364125756916, + -0.8568069517709872, + 0.8568069517709872, + -1.1431930485939341, + 1.1431930485939341); + // System.out.println("relationship = "+xyzSolid.getRelationship(zScaling)); assertTrue(xyzSolid.getRelationship(c) == GeoArea.OVERLAPS); } - + @Test public void testPolygonFactoryCase1() { /* - [junit4] 1> Initial points: - [junit4] 1> [X=-0.17279348371564082, Y=0.24422965662722748, Z=0.9521675605930696] - [junit4] 1> [X=-0.6385022730019092, Y=-0.6294493901210775, Z=0.4438687423720006] - [junit4] 1> [X=-0.9519561011293354, Y=-0.05324061687857965, Z=-0.30423702782227385] - [junit4] 1> [X=-0.30329807815178533, Y=-0.9447434167936289, Z=0.13262941042055737] - [junit4] 1> [X=-0.5367607140926697, Y=0.8179452639396644, Z=0.21163783898691005] - [junit4] 1> [X=0.39285411191111597, Y=0.6369575362013932, Z=0.6627439307500357] - [junit4] 1> [X=-0.44715655239362595, Y=0.8332957749253644, Z=0.3273923501593971] - [junit4] 1> [X=0.33024322515264537, Y=0.6945246730529289, Z=0.6387986432043298] - [junit4] 1> [X=-0.1699323603224724, Y=0.8516746480592872, Z=0.4963385521664198] - [junit4] 1> [X=0.2654788898359613, Y=0.7380222309164597, Z=0.6200740473100581] - [junit4] 1> For start plane, the following points are in/out: - [junit4] 1> [X=-0.17279348371564082, Y=0.24422965662722748, Z=0.9521675605930696] is: in - [junit4] 1> [X=-0.6385022730019092, Y=-0.6294493901210775, Z=0.4438687423720006] is: in - [junit4] 1> [X=-0.9519561011293354, Y=-0.05324061687857965, Z=-0.30423702782227385] is: out - [junit4] 1> [X=-0.30329807815178533, Y=-0.9447434167936289, Z=0.13262941042055737] is: in - [junit4] 1> [X=-0.5367607140926697, Y=0.8179452639396644, Z=0.21163783898691005] is: out - [junit4] 1> [X=0.39285411191111597, Y=0.6369575362013932, Z=0.6627439307500357] is: in - [junit4] 1> [X=-0.44715655239362595, Y=0.8332957749253644, Z=0.3273923501593971] is: out - [junit4] 1> [X=0.33024322515264537, Y=0.6945246730529289, Z=0.6387986432043298] is: in - [junit4] 1> [X=-0.1699323603224724, Y=0.8516746480592872, Z=0.4963385521664198] is: out - [junit4] 1> [X=0.2654788898359613, Y=0.7380222309164597, Z=0.6200740473100581] is: out - */ - + [junit4] 1> Initial points: + [junit4] 1> [X=-0.17279348371564082, Y=0.24422965662722748, Z=0.9521675605930696] + [junit4] 1> [X=-0.6385022730019092, Y=-0.6294493901210775, Z=0.4438687423720006] + [junit4] 1> [X=-0.9519561011293354, Y=-0.05324061687857965, Z=-0.30423702782227385] + [junit4] 1> [X=-0.30329807815178533, Y=-0.9447434167936289, Z=0.13262941042055737] + [junit4] 1> [X=-0.5367607140926697, Y=0.8179452639396644, Z=0.21163783898691005] + [junit4] 1> [X=0.39285411191111597, Y=0.6369575362013932, Z=0.6627439307500357] + [junit4] 1> [X=-0.44715655239362595, Y=0.8332957749253644, Z=0.3273923501593971] + [junit4] 1> [X=0.33024322515264537, Y=0.6945246730529289, Z=0.6387986432043298] + [junit4] 1> [X=-0.1699323603224724, Y=0.8516746480592872, Z=0.4963385521664198] + [junit4] 1> [X=0.2654788898359613, Y=0.7380222309164597, Z=0.6200740473100581] + [junit4] 1> For start plane, the following points are in/out: + [junit4] 1> [X=-0.17279348371564082, Y=0.24422965662722748, Z=0.9521675605930696] is: in + [junit4] 1> [X=-0.6385022730019092, Y=-0.6294493901210775, Z=0.4438687423720006] is: in + [junit4] 1> [X=-0.9519561011293354, Y=-0.05324061687857965, Z=-0.30423702782227385] is: out + [junit4] 1> [X=-0.30329807815178533, Y=-0.9447434167936289, Z=0.13262941042055737] is: in + [junit4] 1> [X=-0.5367607140926697, Y=0.8179452639396644, Z=0.21163783898691005] is: out + [junit4] 1> [X=0.39285411191111597, Y=0.6369575362013932, Z=0.6627439307500357] is: in + [junit4] 1> [X=-0.44715655239362595, Y=0.8332957749253644, Z=0.3273923501593971] is: out + [junit4] 1> [X=0.33024322515264537, Y=0.6945246730529289, Z=0.6387986432043298] is: in + [junit4] 1> [X=-0.1699323603224724, Y=0.8516746480592872, Z=0.4963385521664198] is: out + [junit4] 1> [X=0.2654788898359613, Y=0.7380222309164597, Z=0.6200740473100581] is: out + */ + final List points = new ArrayList<>(); points.add(new GeoPoint(0.17279348371564082, 0.24422965662722748, 0.9521675605930696)); points.add(new GeoPoint(-0.6385022730019092, -0.6294493901210775, 0.4438687423720006)); @@ -597,16 +676,16 @@ shape: @Test public void testPolygonFactoryCase2() { /* - [[lat=-0.48522750470337056, lon=-1.7370471071224087([X=-0.14644023172524287, Y=-0.8727091042681705, Z=-0.4665895520487907])], - [lat=-0.4252164254406539, lon=-1.0929282311747601([X=0.41916238097763436, Y=-0.8093435958043177, Z=-0.4127428785664968])], - [lat=0.2055150822737076, lon=0.8094775925193464([X=0.6760197133035871, Y=0.7093859395658346, Z=0.20427109186920892])], - [lat=-0.504360159046884, lon=-1.27628468850318([X=0.25421329462858633, Y=-0.8380671569889917, Z=-0.4834077932502288])], - [lat=-0.11994023948700858, lon=0.07857194136150605([X=0.9908123546871113, Y=0.07801065055912473, Z=-0.11978097184039621])], - [lat=0.39346633764155237, lon=1.306697331415816([X=0.24124272064589647, Y=0.8921189226448045, Z=0.3836311592666308])], - [lat=-0.07741593942416389, lon=0.5334693210962216([X=0.8594122640512101, Y=0.50755758923985, Z=-0.07742360418968308])], - [lat=0.4654236264787552, lon=1.3013260557429494([X=0.2380080413677112, Y=0.8617612419312584, Z=0.4489988990508502])], - [lat=-1.2964641581620537, lon=-1.487600369139357([X=0.022467282495493006, Y=-0.26942922375508405, Z=-0.960688317984634])]] - */ + [[lat=-0.48522750470337056, lon=-1.7370471071224087([X=-0.14644023172524287, Y=-0.8727091042681705, Z=-0.4665895520487907])], + [lat=-0.4252164254406539, lon=-1.0929282311747601([X=0.41916238097763436, Y=-0.8093435958043177, Z=-0.4127428785664968])], + [lat=0.2055150822737076, lon=0.8094775925193464([X=0.6760197133035871, Y=0.7093859395658346, Z=0.20427109186920892])], + [lat=-0.504360159046884, lon=-1.27628468850318([X=0.25421329462858633, Y=-0.8380671569889917, Z=-0.4834077932502288])], + [lat=-0.11994023948700858, lon=0.07857194136150605([X=0.9908123546871113, Y=0.07801065055912473, Z=-0.11978097184039621])], + [lat=0.39346633764155237, lon=1.306697331415816([X=0.24124272064589647, Y=0.8921189226448045, Z=0.3836311592666308])], + [lat=-0.07741593942416389, lon=0.5334693210962216([X=0.8594122640512101, Y=0.50755758923985, Z=-0.07742360418968308])], + [lat=0.4654236264787552, lon=1.3013260557429494([X=0.2380080413677112, Y=0.8617612419312584, Z=0.4489988990508502])], + [lat=-1.2964641581620537, lon=-1.487600369139357([X=0.022467282495493006, Y=-0.26942922375508405, Z=-0.960688317984634])]] + */ final List points = new ArrayList<>(); points.add(new GeoPoint(PlanetModel.WGS84, -0.48522750470337056, -1.7370471071224087)); points.add(new GeoPoint(PlanetModel.WGS84, -0.4252164254406539, -1.0929282311747601)); @@ -617,7 +696,7 @@ shape: points.add(new GeoPoint(PlanetModel.WGS84, -0.07741593942416389, 0.5334693210962216)); points.add(new GeoPoint(PlanetModel.WGS84, 0.4654236264787552, 1.3013260557429494)); points.add(new GeoPoint(PlanetModel.WGS84, -1.2964641581620537, -1.487600369139357)); - + boolean illegalArgumentException = false; try { final GeoPolygon p = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, points, null); @@ -626,21 +705,21 @@ shape: } assertTrue(illegalArgumentException); } - + @Test public void testPolygonFactoryCase3() throws Exception { /* - This one failed to be detected as convex: + This one failed to be detected as convex: - [junit4] 1> convex part = GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points= - [[lat=0.39346633764155237, lon=1.306697331415816([X=0.24124272064589647, Y=0.8921189226448045, Z=0.3836311592666308])], - [lat=-0.4252164254406539, lon=-1.0929282311747601([X=0.41916238097763436, Y=-0.8093435958043177, Z=-0.4127428785664968])], - [lat=0.4654236264787552, lon=1.3013260557429494([X=0.2380080413677112, Y=0.8617612419312584, Z=0.4489988990508502])]], internalEdges={0, 1, 2}} - */ + [junit4] 1> convex part = GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points= + [[lat=0.39346633764155237, lon=1.306697331415816([X=0.24124272064589647, Y=0.8921189226448045, Z=0.3836311592666308])], + [lat=-0.4252164254406539, lon=-1.0929282311747601([X=0.41916238097763436, Y=-0.8093435958043177, Z=-0.4127428785664968])], + [lat=0.4654236264787552, lon=1.3013260557429494([X=0.2380080413677112, Y=0.8617612419312584, Z=0.4489988990508502])]], internalEdges={0, 1, 2}} + */ final GeoPoint p3 = new GeoPoint(PlanetModel.WGS84, 0.39346633764155237, 1.306697331415816); final GeoPoint p2 = new GeoPoint(PlanetModel.WGS84, -0.4252164254406539, -1.0929282311747601); final GeoPoint p1 = new GeoPoint(PlanetModel.WGS84, 0.4654236264787552, 1.3013260557429494); - + final List points = new ArrayList<>(); points.add(p3); points.add(p2); @@ -649,57 +728,70 @@ shape: final BitSet internal = new BitSet(); final GeoCompositePolygon rval = new GeoCompositePolygon(PlanetModel.WGS84); final GeoPolygonFactory.MutableBoolean mutableBoolean = new GeoPolygonFactory.MutableBoolean(); - - boolean result = GeoPolygonFactory.buildPolygonShape(rval, mutableBoolean, PlanetModel.WGS84, points, internal, 0, 1, - new SidedPlane(p1, p3, p2), new ArrayList(), null); - + + boolean result = + GeoPolygonFactory.buildPolygonShape( + rval, + mutableBoolean, + PlanetModel.WGS84, + points, + internal, + 0, + 1, + new SidedPlane(p1, p3, p2), + new ArrayList(), + null); + assertFalse(mutableBoolean.value); - } - + @Test public void testPolygonFactoryCase4() { - // [[lat=0.897812132711355, lon=0.0025364171887532795([X=0.6227358672251874, Y=0.0015795213449218714, Z=0.7812318690127594])], - // [lat=0.897812132711355, lon=0.0025363997354607595([X=0.6227358672527552, Y=0.001579510476130618, Z=0.7812318690127594])], - // [lat=0.8978120628981849, lon=0.0025362601091206([X=0.6227359221556139, Y=0.0015794236644894651, Z=0.7812318257158789])]] - + // [[lat=0.897812132711355, lon=0.0025364171887532795([X=0.6227358672251874, + // Y=0.0015795213449218714, Z=0.7812318690127594])], + // [lat=0.897812132711355, lon=0.0025363997354607595([X=0.6227358672527552, + // Y=0.001579510476130618, Z=0.7812318690127594])], + // [lat=0.8978120628981849, lon=0.0025362601091206([X=0.6227359221556139, + // Y=0.0015794236644894651, Z=0.7812318257158789])]] + final GeoPoint p1 = new GeoPoint(PlanetModel.WGS84, 0.897812132711355, 0.0025364171887532795); final GeoPoint p2 = new GeoPoint(PlanetModel.WGS84, 0.897812132711355, 0.0025363997354607595); final GeoPoint p3 = new GeoPoint(PlanetModel.WGS84, 0.8978120628981849, 0.0025362601091206); - + final List points = new ArrayList<>(); points.add(p1); points.add(p2); points.add(p3); - + final List shapeList = new ArrayList<>(); - final GeoPolygonFactory.PolygonDescription desc = new GeoPolygonFactory.PolygonDescription(points, new ArrayList()); - + final GeoPolygonFactory.PolygonDescription desc = + new GeoPolygonFactory.PolygonDescription( + points, new ArrayList()); + shapeList.add(desc); - + GeoPolygon p = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, shapeList); - } - + @Test public void testPolygonFactoryCase5() { /* - [junit4] 1> points=[[lat=0.0425265613312593, lon=0.0([X=1.0002076326868337, Y=0.0, Z=0.042561051669501374])], - [lat=0.8894380320379947, lon=-2.8993466885897496([X=-0.6109015457368775, Y=-0.1509528453728308, Z=0.7760109675775679])], - [lat=-0.8298163536994994, lon=-0.1462586594666574([X=0.6673285226073522, Y=-0.09830454048435874, Z=-0.7372817203741138])], - [lat=0.0, lon=-1.7156310907312492E-12([X=1.0011188539924791, Y=-1.7175506314267352E-12, Z=0.0])], - [lat=-0.7766317703682181, lon=3.141592653589793([X=-0.7128972529667801, Y=8.730473389667082E-17, Z=-0.7005064828988063])]] + [junit4] 1> points=[[lat=0.0425265613312593, lon=0.0([X=1.0002076326868337, Y=0.0, Z=0.042561051669501374])], + [lat=0.8894380320379947, lon=-2.8993466885897496([X=-0.6109015457368775, Y=-0.1509528453728308, Z=0.7760109675775679])], + [lat=-0.8298163536994994, lon=-0.1462586594666574([X=0.6673285226073522, Y=-0.09830454048435874, Z=-0.7372817203741138])], + [lat=0.0, lon=-1.7156310907312492E-12([X=1.0011188539924791, Y=-1.7175506314267352E-12, Z=0.0])], + [lat=-0.7766317703682181, lon=3.141592653589793([X=-0.7128972529667801, Y=8.730473389667082E-17, Z=-0.7005064828988063])]] - {[GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points= - [[lat=0.0425265613312593, lon=0.0([X=1.0002076326868337, Y=0.0, Z=0.042561051669501374])], - [lat=0.8894380320379947, lon=-2.8993466885897496([X=-0.6109015457368775, Y=-0.1509528453728308, Z=0.7760109675775679])], - [lat=-0.8298163536994994, lon=-0.1462586594666574([X=0.6673285226073522, Y=-0.09830454048435874, Z=-0.7372817203741138])], - [lat=0.0, lon=-1.7156310907312492E-12([X=1.0011188539924791, Y=-1.7175506314267352E-12, Z=0.0])]], internalEdges={3}}, - GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points= - [[lat=0.0425265613312593, lon=0.0([X=1.0002076326868337, Y=0.0, Z=0.042561051669501374])], - [lat=0.0, lon=-1.7156310907312492E-12([X=1.0011188539924791, Y=-1.7175506314267352E-12, Z=0.0])], - [lat=-0.7766317703682181, lon=3.141592653589793([X=-0.7128972529667801, Y=8.730473389667082E-17, Z=-0.7005064828988063])]], internalEdges={0}}]} - */ + {[GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points= + [[lat=0.0425265613312593, lon=0.0([X=1.0002076326868337, Y=0.0, Z=0.042561051669501374])], + [lat=0.8894380320379947, lon=-2.8993466885897496([X=-0.6109015457368775, Y=-0.1509528453728308, Z=0.7760109675775679])], + [lat=-0.8298163536994994, lon=-0.1462586594666574([X=0.6673285226073522, Y=-0.09830454048435874, Z=-0.7372817203741138])], + [lat=0.0, lon=-1.7156310907312492E-12([X=1.0011188539924791, Y=-1.7175506314267352E-12, Z=0.0])]], internalEdges={3}}, + GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points= + [[lat=0.0425265613312593, lon=0.0([X=1.0002076326868337, Y=0.0, Z=0.042561051669501374])], + [lat=0.0, lon=-1.7156310907312492E-12([X=1.0011188539924791, Y=-1.7175506314267352E-12, Z=0.0])], + [lat=-0.7766317703682181, lon=3.141592653589793([X=-0.7128972529667801, Y=8.730473389667082E-17, Z=-0.7005064828988063])]], internalEdges={0}}]} + */ final GeoPoint p1 = new GeoPoint(PlanetModel.WGS84, 0.0425265613312593, 0.0); final GeoPoint p2 = new GeoPoint(PlanetModel.WGS84, 0.8894380320379947, -2.8993466885897496); final GeoPoint p3 = new GeoPoint(PlanetModel.WGS84, -0.8298163536994994, -0.1462586594666574); @@ -712,24 +804,31 @@ shape: polyList.add(p3); polyList.add(p4); polyList.add(p5); - + GeoPolygon p = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, polyList); - //System.out.println("p = "+p); + // System.out.println("p = "+p); XYZBounds bounds = new XYZBounds(); p.getBounds(bounds); - XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds.getMinimumX(), bounds.getMaximumX(), - bounds.getMinimumY(), bounds.getMaximumY(), - bounds.getMinimumZ(), bounds.getMaximumZ()); + XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + bounds.getMinimumX(), + bounds.getMaximumX(), + bounds.getMinimumY(), + bounds.getMaximumY(), + bounds.getMinimumZ(), + bounds.getMaximumZ()); + + // final List p1List = new ArrayList<>(); + // p1List.add(p1); + // p1List.add(p2); + // p1List.add(p3); + // p1List.add(p4); + // final BitSet p1Internal = new BitSet(); + // final GeoConvexPolygon poly1 = new GeoConvexPolygon(PlanetModel.WGS84, p1List, p1Internal, + // false); - //final List p1List = new ArrayList<>(); - //p1List.add(p1); - //p1List.add(p2); - //p1List.add(p3); - //p1List.add(p4); - //final BitSet p1Internal = new BitSet(); - //final GeoConvexPolygon poly1 = new GeoConvexPolygon(PlanetModel.WGS84, p1List, p1Internal, false); - /* final List p2List = new ArrayList<>(); p2List.add(p1); @@ -738,13 +837,14 @@ shape: final BitSet p2Internal = new BitSet(); final GeoConvexPolygon poly2 = new GeoConvexPolygon(PlanetModel.WGS84, p2List, p2Internal, false); */ - - //XYZBounds bounds1 = new XYZBounds(); - //poly1.getBounds(bounds1); - //XYZSolid solid1 = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds1.getMinimumX(), bounds1.getMaximumX(), + + // XYZBounds bounds1 = new XYZBounds(); + // poly1.getBounds(bounds1); + // XYZSolid solid1 = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds1.getMinimumX(), + // bounds1.getMaximumX(), // bounds1.getMinimumY(), bounds1.getMaximumY(), // bounds1.getMinimumZ(), bounds1.getMaximumZ()); - + /* XYZBounds bounds2 = new XYZBounds(); poly2.getBounds(bounds2); @@ -752,116 +852,119 @@ shape: bounds2.getMinimumY(), bounds2.getMaximumY(), bounds2.getMinimumZ(), bounds2.getMaximumZ()); */ - + final GeoPoint point = new GeoPoint(PlanetModel.WGS84, -0.41518838180529244, 3.141592653589793); - final GeoPoint encodedPoint = new GeoPoint(-0.9155623168963972, 2.3309121299774915E-10, -0.40359240449795253); - - assertTrue(p.isWithin(point)?solid.isWithin(point):true); - + final GeoPoint encodedPoint = + new GeoPoint(-0.9155623168963972, 2.3309121299774915E-10, -0.40359240449795253); + + assertTrue(p.isWithin(point) ? solid.isWithin(point) : true); } - + @Test public void testLargePolygonFailureCase1() { /* - [junit4] > shape=GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=65f193fc, - testPoint=[lat=1.3005550159098878, lon=-2.4043250791032897([X=-0.1972404544647752, Y=-0.17911237095124333, Z=0.9617794725902562])], - testPointInSet=false, - shapes={ - {[lat=0.972005250702484, lon=-1.9776473855435277([X=-0.22278290030997686, Y=-0.5170266140533727, Z=0.8250470449472769])], - [lat=0.5530477484903267, lon=2.5300578442038137([X=-0.6968439858923609, Y=0.4886310878468911, Z=0.5253825248638686])], - [lat=1.5185372097372358, lon=-0.33848566616392867([X=0.04916162127975167, Y=-0.01730656055596007, Z=0.9964092501726799])]}} - [junit4] > bounds=XYZBounds: [xmin=-1.0011188544924792 xmax=0.04916162177975167 ymin=-1.0011188544924792 ymax=1.0011188544924792 zmin=-5.0E-10 zmax=0.99766957331525] - [junit4] > world bounds=( minX=-1.0011188539924791 maxX=1.0011188539924791 minY=-1.0011188539924791 maxY=1.0011188539924791 minZ=-0.9977622920221051 maxZ=0.9977622920221051 - [junit4] > quantized point=[X=0.32866145093230836, Y=0.21519085912590594, Z=0.9177348472123349] within shape? true within bounds? false - [junit4] > unquantized point=[lat=1.166339260547107, lon=0.5797066870374205([X=0.3286614507856878, Y=0.21519085911319938, Z=0.9177348470779726])] within shape? true within bounds? false - [junit4] > docID=10 deleted?=false - [junit4] > query=PointInGeo3DShapeQuery: field=point: Shape: GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=65f193fc, testPoint=[lat=1.3005550159098878, lon=-2.4043250791032897([X=-0.1972404544647752, Y=-0.17911237095124333, Z=0.9617794725902562])], testPointInSet=false, shapes={ {[lat=0.972005250702484, lon=-1.9776473855435277([X=-0.22278290030997686, Y=-0.5170266140533727, Z=0.8250470449472769])], [lat=0.5530477484903267, lon=2.5300578442038137([X=-0.6968439858923609, Y=0.4886310878468911, Z=0.5253825248638686])], [lat=1.5185372097372358, lon=-0.33848566616392867([X=0.04916162127975167, Y=-0.01730656055596007, Z=0.9964092501726799])]}} - [junit4] > explanation: - [junit4] > target is in leaf _0(7.0.0):c13 of full reader StandardDirectoryReader(segments:3:nrt _0(7.0.0):c13) - [junit4] > full BKD path to target doc: - [junit4] > Cell(x=-0.9060562472023252 TO 1.0010658113048514 y=-0.5681445384324596 TO 0.7613281936331098 z=-0.43144274682272304 TO 0.9977622920582089); Shape relationship = OVERLAPS; Quantized point within cell = true; Unquantized point within cell = true - [junit4] > on cell Cell(x=-0.9060562472023252 TO 1.0010658113048514 y=-0.5681445384324596 TO 0.7613281936331098 z=-0.43144274682272304 TO 0.9977622920582089); Shape relationship = OVERLAPS; Quantized point within cell = true; Unquantized point within cell = true, wrapped visitor returned CELL_CROSSES_QUERY - [junit4] > leaf visit docID=10 x=0.32866145093230836 y=0.21519085912590594 z=0.9177348472123349 - */ - final GeoPoint testPoint = new GeoPoint(PlanetModel.WGS84, 1.3005550159098878, -2.4043250791032897); + [junit4] > shape=GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=65f193fc, + testPoint=[lat=1.3005550159098878, lon=-2.4043250791032897([X=-0.1972404544647752, Y=-0.17911237095124333, Z=0.9617794725902562])], + testPointInSet=false, + shapes={ + {[lat=0.972005250702484, lon=-1.9776473855435277([X=-0.22278290030997686, Y=-0.5170266140533727, Z=0.8250470449472769])], + [lat=0.5530477484903267, lon=2.5300578442038137([X=-0.6968439858923609, Y=0.4886310878468911, Z=0.5253825248638686])], + [lat=1.5185372097372358, lon=-0.33848566616392867([X=0.04916162127975167, Y=-0.01730656055596007, Z=0.9964092501726799])]}} + [junit4] > bounds=XYZBounds: [xmin=-1.0011188544924792 xmax=0.04916162177975167 ymin=-1.0011188544924792 ymax=1.0011188544924792 zmin=-5.0E-10 zmax=0.99766957331525] + [junit4] > world bounds=( minX=-1.0011188539924791 maxX=1.0011188539924791 minY=-1.0011188539924791 maxY=1.0011188539924791 minZ=-0.9977622920221051 maxZ=0.9977622920221051 + [junit4] > quantized point=[X=0.32866145093230836, Y=0.21519085912590594, Z=0.9177348472123349] within shape? true within bounds? false + [junit4] > unquantized point=[lat=1.166339260547107, lon=0.5797066870374205([X=0.3286614507856878, Y=0.21519085911319938, Z=0.9177348470779726])] within shape? true within bounds? false + [junit4] > docID=10 deleted?=false + [junit4] > query=PointInGeo3DShapeQuery: field=point: Shape: GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=65f193fc, testPoint=[lat=1.3005550159098878, lon=-2.4043250791032897([X=-0.1972404544647752, Y=-0.17911237095124333, Z=0.9617794725902562])], testPointInSet=false, shapes={ {[lat=0.972005250702484, lon=-1.9776473855435277([X=-0.22278290030997686, Y=-0.5170266140533727, Z=0.8250470449472769])], [lat=0.5530477484903267, lon=2.5300578442038137([X=-0.6968439858923609, Y=0.4886310878468911, Z=0.5253825248638686])], [lat=1.5185372097372358, lon=-0.33848566616392867([X=0.04916162127975167, Y=-0.01730656055596007, Z=0.9964092501726799])]}} + [junit4] > explanation: + [junit4] > target is in leaf _0(7.0.0):c13 of full reader StandardDirectoryReader(segments:3:nrt _0(7.0.0):c13) + [junit4] > full BKD path to target doc: + [junit4] > Cell(x=-0.9060562472023252 TO 1.0010658113048514 y=-0.5681445384324596 TO 0.7613281936331098 z=-0.43144274682272304 TO 0.9977622920582089); Shape relationship = OVERLAPS; Quantized point within cell = true; Unquantized point within cell = true + [junit4] > on cell Cell(x=-0.9060562472023252 TO 1.0010658113048514 y=-0.5681445384324596 TO 0.7613281936331098 z=-0.43144274682272304 TO 0.9977622920582089); Shape relationship = OVERLAPS; Quantized point within cell = true; Unquantized point within cell = true, wrapped visitor returned CELL_CROSSES_QUERY + [junit4] > leaf visit docID=10 x=0.32866145093230836 y=0.21519085912590594 z=0.9177348472123349 + */ + final GeoPoint testPoint = + new GeoPoint(PlanetModel.WGS84, 1.3005550159098878, -2.4043250791032897); final boolean testPointInSet = false; final List pointList = new ArrayList<>(); pointList.add(new GeoPoint(PlanetModel.WGS84, 0.972005250702484, -1.9776473855435277)); pointList.add(new GeoPoint(PlanetModel.WGS84, 0.5530477484903267, 2.5300578442038137)); pointList.add(new GeoPoint(PlanetModel.WGS84, 1.5185372097372358, -0.33848566616392867)); - + final GeoPolygon pSanity = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, pointList); - + assertTrue(pSanity.isWithin(testPoint) == testPointInSet); - + final List> shapeList = new ArrayList<>(); shapeList.add(pointList); - final GeoPolygon p = new GeoComplexPolygon(PlanetModel.WGS84, shapeList, testPoint, testPointInSet); - + final GeoPolygon p = + new GeoComplexPolygon(PlanetModel.WGS84, shapeList, testPoint, testPointInSet); + final GeoPoint intersectionPoint = new GeoPoint(0.26643017529034996, 0.0, 0.9617794725902564); assertTrue(pSanity.isWithin(intersectionPoint) == p.isWithin(intersectionPoint)); assertTrue(p.isWithin(intersectionPoint)); - + final GeoPoint maxXPoint = new GeoPoint(PlanetModel.WGS84, 0.0, 0.0); - + assertTrue(pSanity.isWithin(maxXPoint) == p.isWithin(maxXPoint)); - - final GeoPoint checkPoint = new GeoPoint(PlanetModel.WGS84, 1.166339260547107, 0.5797066870374205); - + + final GeoPoint checkPoint = + new GeoPoint(PlanetModel.WGS84, 1.166339260547107, 0.5797066870374205); + // Given the choice of test point, does this all make sense? assertTrue(pSanity.isWithin(checkPoint) == p.isWithin(checkPoint)); - + final XYZBounds referenceBounds = new XYZBounds(); pSanity.getBounds(referenceBounds); - + final XYZBounds actualBounds = new XYZBounds(); p.getBounds(actualBounds); - + assertEquals(referenceBounds.getMinimumX(), actualBounds.getMinimumX(), 0.0000001); assertEquals(referenceBounds.getMaximumX(), actualBounds.getMaximumX(), 0.0000001); assertEquals(referenceBounds.getMinimumY(), actualBounds.getMinimumY(), 0.0000001); assertEquals(referenceBounds.getMaximumY(), actualBounds.getMaximumY(), 0.0000001); assertEquals(referenceBounds.getMinimumZ(), actualBounds.getMinimumZ(), 0.0000001); assertEquals(referenceBounds.getMaximumZ(), actualBounds.getMaximumZ(), 0.0000001); - } - + @Test public void testLargePolygonFailureCase2() { /* - [junit4] > Throwable #1: java.lang.AssertionError: FAIL: id=2 should have matched but did not - [junit4] > shape=GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=6eccd33b, - testPoint=[lat=0.03170690566178683, lon=1.0862414976732029([X=0.46609969117964495, Y=0.8854242006628827, Z=0.0317369552646047])], - testPointInSet=false, - shapes={ { - [lat=1.0774842300167298, lon=-0.11534121538553185([X=0.46969930266058374, Y=-0.054417217622152375, Z=0.8794587218580684])], - [lat=0.05101544777239065, lon=1.031558236908661([X=0.5133835679471972, Y=0.8579350866926241, Z=0.051049928818862174])], - [lat=-0.011222928649880962, lon=1.5851249038356199([X=-0.01434320835886277, Y=1.0009526216234983, Z=-0.011235244842183226])], - [lat=-0.02571365137215876, lon=0.5627875521419741([X=0.8464356149277266, Y=0.5339650936800929, Z=-0.025739527171261035])], - [lat=0.03833766792865358, lon=1.0082901344798614([X=0.5335096521470836, Y=0.8462411929752105, Z=0.03837097111317845])], - [lat=0.1719054969347345, lon=0.9024290407832926([X=0.6111941952395734, Y=0.7740553755547761, Z=0.17123457719021212])], - [lat=0.08180947807010808, lon=1.0107147265848113([X=0.5300590148023426, Y=0.8453039531721928, Z=0.08180784289673602])]}} - [junit4] > bounds=XYZBounds: [xmin=-1.0011188544924792 xmax=1.0011188544924792 - ymin=-1.0011188544924792 ymax=1.0011188544924792 - zmin=-0.025739527671261034 zmax=0.9977622925221051] - [junit4] > world bounds=( minX=-1.0011188539924791 maxX=1.0011188539924791 minY=-1.0011188539924791 maxY=1.0011188539924791 minZ=-0.9977622920221051 maxZ=0.9977622920221051 - [junit4] > quantized point=[X=-0.477874179571219, Y=0.5908091335156603, Z=-0.6495967142221521] within shape? true within bounds? false - [junit4] > unquantized point=[lat=-0.7073124559987376, lon=2.2509085326629887([X=-0.47787417938801546, Y=0.5908091336704123, Z=-0.6495967140640758])] within shape? true within bounds? false - [junit4] > docID=2 deleted?=false - [junit4] > query=PointInGeo3DShapeQuery: field=point: Shape: GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=6eccd33b, testPoint=[lat=0.03170690566178683, lon=1.0862414976732029([X=0.46609969117964495, Y=0.8854242006628827, Z=0.0317369552646047])], testPointInSet=false, shapes={ {[lat=1.0774842300167298, lon=-0.11534121538553185([X=0.46969930266058374, Y=-0.054417217622152375, Z=0.8794587218580684])], [lat=0.05101544777239065, lon=1.031558236908661([X=0.5133835679471972, Y=0.8579350866926241, Z=0.051049928818862174])], [lat=-0.011222928649880962, lon=1.5851249038356199([X=-0.01434320835886277, Y=1.0009526216234983, Z=-0.011235244842183226])], [lat=-0.02571365137215876, lon=0.5627875521419741([X=0.8464356149277266, Y=0.5339650936800929, Z=-0.025739527171261035])], [lat=0.03833766792865358, lon=1.0082901344798614([X=0.5335096521470836, Y=0.8462411929752105, Z=0.03837097111317845])], [lat=0.1719054969347345, lon=0.9024290407832926([X=0.6111941952395734, Y=0.7740553755547761, Z=0.17123457719021212])], [lat=0.08180947807010808, lon=1.0107147265848113([X=0.5300590148023426, Y=0.8453039531721928, Z=0.08180784289673602])]}} - [junit4] > explanation: - [junit4] > target is in leaf _0(7.0.0):C11 of full reader StandardDirectoryReader(segments:3:nrt _0(7.0.0):C11) - [junit4] > full BKD path to target doc: - [junit4] > Cell(x=-0.8906255176936849 TO 1.0005089994430834 y=-0.6808995306272861 TO 0.9675171153117977 z=-0.997762292058209 TO 0.9939318087373729); Shape relationship = OVERLAPS; Quantized point within cell = true; Unquantized point within cell = true - [junit4] > on cell Cell(x=-0.8906255176936849 TO 1.0005089994430834 y=-0.6808995306272861 TO 0.9675171153117977 z=-0.997762292058209 TO 0.9939318087373729); Shape relationship = OVERLAPS; Quantized point within cell = true; Unquantized point within cell = true, wrapped visitor returned CELL_CROSSES_QUERY - [junit4] > leaf visit docID=2 x=-0.477874179571219 y=0.5908091335156603 z=-0.6495967142221521 - */ - final GeoPoint testPoint = new GeoPoint(PlanetModel.WGS84, 0.03170690566178683, 1.0862414976732029); + [junit4] > Throwable #1: java.lang.AssertionError: FAIL: id=2 should have matched but did not + [junit4] > shape=GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=6eccd33b, + testPoint=[lat=0.03170690566178683, lon=1.0862414976732029([X=0.46609969117964495, Y=0.8854242006628827, Z=0.0317369552646047])], + testPointInSet=false, + shapes={ { + [lat=1.0774842300167298, lon=-0.11534121538553185([X=0.46969930266058374, Y=-0.054417217622152375, Z=0.8794587218580684])], + [lat=0.05101544777239065, lon=1.031558236908661([X=0.5133835679471972, Y=0.8579350866926241, Z=0.051049928818862174])], + [lat=-0.011222928649880962, lon=1.5851249038356199([X=-0.01434320835886277, Y=1.0009526216234983, Z=-0.011235244842183226])], + [lat=-0.02571365137215876, lon=0.5627875521419741([X=0.8464356149277266, Y=0.5339650936800929, Z=-0.025739527171261035])], + [lat=0.03833766792865358, lon=1.0082901344798614([X=0.5335096521470836, Y=0.8462411929752105, Z=0.03837097111317845])], + [lat=0.1719054969347345, lon=0.9024290407832926([X=0.6111941952395734, Y=0.7740553755547761, Z=0.17123457719021212])], + [lat=0.08180947807010808, lon=1.0107147265848113([X=0.5300590148023426, Y=0.8453039531721928, Z=0.08180784289673602])]}} + [junit4] > bounds=XYZBounds: [xmin=-1.0011188544924792 xmax=1.0011188544924792 + ymin=-1.0011188544924792 ymax=1.0011188544924792 + zmin=-0.025739527671261034 zmax=0.9977622925221051] + [junit4] > world bounds=( minX=-1.0011188539924791 maxX=1.0011188539924791 minY=-1.0011188539924791 maxY=1.0011188539924791 minZ=-0.9977622920221051 maxZ=0.9977622920221051 + [junit4] > quantized point=[X=-0.477874179571219, Y=0.5908091335156603, Z=-0.6495967142221521] within shape? true within bounds? false + [junit4] > unquantized point=[lat=-0.7073124559987376, lon=2.2509085326629887([X=-0.47787417938801546, Y=0.5908091336704123, Z=-0.6495967140640758])] within shape? true within bounds? false + [junit4] > docID=2 deleted?=false + [junit4] > query=PointInGeo3DShapeQuery: field=point: Shape: GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=6eccd33b, testPoint=[lat=0.03170690566178683, lon=1.0862414976732029([X=0.46609969117964495, Y=0.8854242006628827, Z=0.0317369552646047])], testPointInSet=false, shapes={ {[lat=1.0774842300167298, lon=-0.11534121538553185([X=0.46969930266058374, Y=-0.054417217622152375, Z=0.8794587218580684])], [lat=0.05101544777239065, lon=1.031558236908661([X=0.5133835679471972, Y=0.8579350866926241, Z=0.051049928818862174])], [lat=-0.011222928649880962, lon=1.5851249038356199([X=-0.01434320835886277, Y=1.0009526216234983, Z=-0.011235244842183226])], [lat=-0.02571365137215876, lon=0.5627875521419741([X=0.8464356149277266, Y=0.5339650936800929, Z=-0.025739527171261035])], [lat=0.03833766792865358, lon=1.0082901344798614([X=0.5335096521470836, Y=0.8462411929752105, Z=0.03837097111317845])], [lat=0.1719054969347345, lon=0.9024290407832926([X=0.6111941952395734, Y=0.7740553755547761, Z=0.17123457719021212])], [lat=0.08180947807010808, lon=1.0107147265848113([X=0.5300590148023426, Y=0.8453039531721928, Z=0.08180784289673602])]}} + [junit4] > explanation: + [junit4] > target is in leaf _0(7.0.0):C11 of full reader StandardDirectoryReader(segments:3:nrt _0(7.0.0):C11) + [junit4] > full BKD path to target doc: + [junit4] > Cell(x=-0.8906255176936849 TO 1.0005089994430834 y=-0.6808995306272861 TO 0.9675171153117977 z=-0.997762292058209 TO 0.9939318087373729); Shape relationship = OVERLAPS; Quantized point within cell = true; Unquantized point within cell = true + [junit4] > on cell Cell(x=-0.8906255176936849 TO 1.0005089994430834 y=-0.6808995306272861 TO 0.9675171153117977 z=-0.997762292058209 TO 0.9939318087373729); Shape relationship = OVERLAPS; Quantized point within cell = true; Unquantized point within cell = true, wrapped visitor returned CELL_CROSSES_QUERY + [junit4] > leaf visit docID=2 x=-0.477874179571219 y=0.5908091335156603 z=-0.6495967142221521 + */ + final GeoPoint testPoint = + new GeoPoint(PlanetModel.WGS84, 0.03170690566178683, 1.0862414976732029); final boolean testPointInSet = false; final List pointList = new ArrayList<>(); - // If the 1.07748... line is at the top, the bounds are correct and the test succeeds. + // If the 1.07748... line is at the top, the bounds are correct and the test succeeds. // If this line is at the bottom, though, the bounds are wrong and the test fails. - //pointList.add(new GeoPoint(PlanetModel.WGS84, 1.0774842300167298, -0.11534121538553185)); + // pointList.add(new GeoPoint(PlanetModel.WGS84, 1.0774842300167298, -0.11534121538553185)); pointList.add(new GeoPoint(PlanetModel.WGS84, 0.05101544777239065, 1.031558236908661)); pointList.add(new GeoPoint(PlanetModel.WGS84, -0.011222928649880962, 1.5851249038356199)); pointList.add(new GeoPoint(PlanetModel.WGS84, -0.02571365137215876, 0.5627875521419741)); @@ -869,35 +972,36 @@ shape: pointList.add(new GeoPoint(PlanetModel.WGS84, 0.1719054969347345, 0.9024290407832926)); pointList.add(new GeoPoint(PlanetModel.WGS84, 0.08180947807010808, 1.0107147265848113)); pointList.add(new GeoPoint(PlanetModel.WGS84, 1.0774842300167298, -0.11534121538553185)); - + final GeoPolygon pSanity = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, pointList); - + assertTrue(pSanity.isWithin(testPoint) == testPointInSet); - + final List> shapeList = new ArrayList<>(); shapeList.add(pointList); - final GeoPolygon p = new GeoComplexPolygon(PlanetModel.WGS84, shapeList, testPoint, testPointInSet); - - //System.err.println(p); + final GeoPolygon p = + new GeoComplexPolygon(PlanetModel.WGS84, shapeList, testPoint, testPointInSet); + + // System.err.println(p); /* - [junit4] 2> GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=dcf3e99, - testPoint=[lat=0.03170690566178683, lon=1.0862414976732029([X=0.46609969117964506, Y=0.8854242006628825, Z=0.0317369552646047])], - testPointInSet=false, - shapes={ { - [lat=1.0774842300167298, lon=-0.11534121538553185([X=0.46969930266058374, Y=-0.054417217622152375, Z=0.8794587218580684])], - [lat=0.05101544777239065, lon=1.031558236908661([X=0.5133835679471972, Y=0.8579350866926241, Z=0.051049928818862174])], - [lat=-0.011222928649880962, lon=1.5851249038356199([X=-0.01434320835886277, Y=1.0009526216234983, Z=-0.011235244842183226])], - [lat=-0.02571365137215876, lon=0.5627875521419741([X=0.8464356149277266, Y=0.5339650936800929, Z=-0.025739527171261035])], - [lat=0.03833766792865358, lon=1.0082901344798614([X=0.5335096521470836, Y=0.8462411929752105, Z=0.03837097111317845])], - [lat=0.1719054969347345, lon=0.9024290407832926([X=0.6111941952395734, Y=0.7740553755547761, Z=0.17123457719021212])]}} - [lat=0.08180947807010808, lon=1.0107147265848113([X=0.5300590148023426, Y=0.8453039531721928, Z=0.08180784289673602])], - */ + [junit4] 2> GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=dcf3e99, + testPoint=[lat=0.03170690566178683, lon=1.0862414976732029([X=0.46609969117964506, Y=0.8854242006628825, Z=0.0317369552646047])], + testPointInSet=false, + shapes={ { + [lat=1.0774842300167298, lon=-0.11534121538553185([X=0.46969930266058374, Y=-0.054417217622152375, Z=0.8794587218580684])], + [lat=0.05101544777239065, lon=1.031558236908661([X=0.5133835679471972, Y=0.8579350866926241, Z=0.051049928818862174])], + [lat=-0.011222928649880962, lon=1.5851249038356199([X=-0.01434320835886277, Y=1.0009526216234983, Z=-0.011235244842183226])], + [lat=-0.02571365137215876, lon=0.5627875521419741([X=0.8464356149277266, Y=0.5339650936800929, Z=-0.025739527171261035])], + [lat=0.03833766792865358, lon=1.0082901344798614([X=0.5335096521470836, Y=0.8462411929752105, Z=0.03837097111317845])], + [lat=0.1719054969347345, lon=0.9024290407832926([X=0.6111941952395734, Y=0.7740553755547761, Z=0.17123457719021212])]}} + [lat=0.08180947807010808, lon=1.0107147265848113([X=0.5300590148023426, Y=0.8453039531721928, Z=0.08180784289673602])], + */ final XYZBounds referenceBounds = new XYZBounds(); pSanity.getBounds(referenceBounds); - + final XYZBounds actualBounds = new XYZBounds(); p.getBounds(actualBounds); - + assertEquals(referenceBounds.getMinimumX(), actualBounds.getMinimumX(), 0.0000001); assertEquals(referenceBounds.getMaximumX(), actualBounds.getMaximumX(), 0.0000001); assertEquals(referenceBounds.getMinimumY(), actualBounds.getMinimumY(), 0.0000001); @@ -905,18 +1009,23 @@ shape: assertEquals(referenceBounds.getMinimumZ(), actualBounds.getMinimumZ(), 0.0000001); assertEquals(referenceBounds.getMaximumZ(), actualBounds.getMaximumZ(), 0.0000001); - final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, - actualBounds.getMinimumX(), actualBounds.getMaximumX(), - actualBounds.getMinimumY(), actualBounds.getMaximumY(), - actualBounds.getMinimumZ(), actualBounds.getMaximumZ()); + final XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + actualBounds.getMinimumX(), + actualBounds.getMaximumX(), + actualBounds.getMinimumY(), + actualBounds.getMaximumY(), + actualBounds.getMinimumZ(), + actualBounds.getMaximumZ()); + + final GeoPoint checkPoint = + new GeoPoint(PlanetModel.WGS84, -0.7073124559987376, 2.2509085326629887); - final GeoPoint checkPoint = new GeoPoint(PlanetModel.WGS84, -0.7073124559987376, 2.2509085326629887); - // Given the choice of test point, does this all make sense? assertTrue(pSanity.isWithin(checkPoint) == p.isWithin(checkPoint)); assertTrue(p.isWithin(checkPoint)); assertTrue(solid.isWithin(checkPoint)); - } @Test @@ -928,7 +1037,7 @@ shape: poly2List.add(new GeoPoint(PlanetModel.WGS84, -0.5703530503197992, -3.141592653589793)); final BitSet poly2Bitset = new BitSet(); poly2Bitset.set(1); - + boolean result; try { final GeoConvexPolygon poly2 = new GeoConvexPolygon(PlanetModel.WGS84, poly2List); @@ -936,44 +1045,45 @@ shape: } catch (IllegalArgumentException e) { result = false; } - + assertTrue(!result); } @Test public void testPolygonFailureCase2() { /* - [junit4] 1> shape=GeoCompositeMembershipShape: {[GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ - [lat=1.079437865394857, lon=-1.720224083538152E-11([X=0.47111944719262044, Y=-8.104310192839264E-12, Z=0.8803759987367299])], - [lat=-1.5707963267948966, lon=0.017453291479645996([X=6.108601474971234E-17, Y=1.066260290095308E-18, Z=-0.997762292022105])], - [lat=0.017453291479645996, lon=2.4457272005608357E-47([X=1.0009653513901666, Y=2.448088186713865E-47, Z=0.01747191415779267])]], internalEdges={2}}, - GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ - [lat=1.079437865394857, lon=-1.720224083538152E-11([X=0.47111944719262044, Y=-8.104310192839264E-12, Z=0.8803759987367299])], - [lat=0.017453291479645996, lon=2.4457272005608357E-47([X=1.0009653513901666, Y=2.448088186713865E-47, Z=0.01747191415779267])], - [lat=0.0884233366943164, lon=0.4323234231678824([X=0.9054355304510789, Y=0.4178006803188124, Z=0.08840463683725623])]], internalEdges={0}}]} - */ + [junit4] 1> shape=GeoCompositeMembershipShape: {[GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ + [lat=1.079437865394857, lon=-1.720224083538152E-11([X=0.47111944719262044, Y=-8.104310192839264E-12, Z=0.8803759987367299])], + [lat=-1.5707963267948966, lon=0.017453291479645996([X=6.108601474971234E-17, Y=1.066260290095308E-18, Z=-0.997762292022105])], + [lat=0.017453291479645996, lon=2.4457272005608357E-47([X=1.0009653513901666, Y=2.448088186713865E-47, Z=0.01747191415779267])]], internalEdges={2}}, + GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ + [lat=1.079437865394857, lon=-1.720224083538152E-11([X=0.47111944719262044, Y=-8.104310192839264E-12, Z=0.8803759987367299])], + [lat=0.017453291479645996, lon=2.4457272005608357E-47([X=1.0009653513901666, Y=2.448088186713865E-47, Z=0.01747191415779267])], + [lat=0.0884233366943164, lon=0.4323234231678824([X=0.9054355304510789, Y=0.4178006803188124, Z=0.08840463683725623])]], internalEdges={0}}]} + */ final List poly1List = new ArrayList<>(); poly1List.add(new GeoPoint(PlanetModel.WGS84, 1.079437865394857, -1.720224083538152E-11)); poly1List.add(new GeoPoint(PlanetModel.WGS84, -1.5707963267948966, 0.017453291479645996)); poly1List.add(new GeoPoint(PlanetModel.WGS84, 0.017453291479645996, 2.4457272005608357E-47)); - final GeoPolygonFactory.PolygonDescription pd = new GeoPolygonFactory.PolygonDescription(poly1List); - + final GeoPolygonFactory.PolygonDescription pd = + new GeoPolygonFactory.PolygonDescription(poly1List); + final GeoPolygon poly1 = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, pd); - + /* - [junit4] 1> unquantized=[lat=-1.5316724989005415, lon=3.141592653589793([X=-0.03902652216795768, Y=4.779370545484258E-18, Z=-0.9970038705813589])] - [junit4] 1> quantized=[X=-0.03902652216283731, Y=2.3309121299774915E-10, Z=-0.9970038706538652] - */ - + [junit4] 1> unquantized=[lat=-1.5316724989005415, lon=3.141592653589793([X=-0.03902652216795768, Y=4.779370545484258E-18, Z=-0.9970038705813589])] + [junit4] 1> quantized=[X=-0.03902652216283731, Y=2.3309121299774915E-10, Z=-0.9970038706538652] + */ + final GeoPoint point = new GeoPoint(PlanetModel.WGS84, -1.5316724989005415, 3.141592653589793); final XYZBounds actualBounds1 = new XYZBounds(); poly1.getBounds(actualBounds1); - + final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, actualBounds1); - assertTrue(poly1.isWithin(point)?solid.isWithin(point):true); + assertTrue(poly1.isWithin(point) ? solid.isWithin(point) : true); } @Test @@ -983,9 +1093,11 @@ shape: points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4)); - GeoPolygon polygon = ((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points)).getShape(0); - GeoPolygon polygonConcave = GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE,points); - assertEquals(polygon,polygonConcave); + GeoPolygon polygon = + ((GeoCompositePolygon) GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points)) + .getShape(0); + GeoPolygon polygonConcave = GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE, points); + assertEquals(polygon, polygonConcave); } @Test @@ -1000,17 +1112,22 @@ shape: hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6)); hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5)); hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4)); - - GeoPolygonFactory.PolygonDescription holeDescription = new GeoPolygonFactory.PolygonDescription(hole_points); + + GeoPolygonFactory.PolygonDescription holeDescription = + new GeoPolygonFactory.PolygonDescription(hole_points); List holes = new ArrayList<>(1); holes.add(holeDescription); - GeoPolygonFactory.PolygonDescription polygonDescription = new GeoPolygonFactory.PolygonDescription(points, holes); - - // Create two polygons -- one simple, the other complex. Both have holes. Compare their behavior. - GeoPolygon holeSimplePolygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE,polygonDescription); + GeoPolygonFactory.PolygonDescription polygonDescription = + new GeoPolygonFactory.PolygonDescription(points, holes); + + // Create two polygons -- one simple, the other complex. Both have holes. Compare their + // behavior. + GeoPolygon holeSimplePolygon = + GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, polygonDescription); List polys = new ArrayList<>(1); polys.add(polygonDescription); - GeoPolygon holeComplexPolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE,polys); + GeoPolygon holeComplexPolygon = + GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, polys); // Sample some nearby points outside GeoPoint gp; @@ -1029,7 +1146,6 @@ shape: assertEquals(holeSimplePolygon.isWithin(gp), holeComplexPolygon.isWithin(gp)); gp = new GeoPoint(PlanetModel.SPHERE, 0.0, Math.PI); assertEquals(holeSimplePolygon.isWithin(gp), holeComplexPolygon.isWithin(gp)); - } @Test @@ -1039,9 +1155,11 @@ shape: points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, 0.5)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0.5)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0)); - GeoPolygon polygon = ((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points)).getShape(0); - GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConvexPolygon(PlanetModel.SPHERE,points); - assertEquals(polygon,polygon2); + GeoPolygon polygon = + ((GeoCompositePolygon) GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points)) + .getShape(0); + GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConvexPolygon(PlanetModel.SPHERE, points); + assertEquals(polygon, polygon2); } @Test @@ -1056,149 +1174,339 @@ shape: hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6)); hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5)); hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4)); - GeoPolygon hole = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE,hole_points); + GeoPolygon hole = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, hole_points); - GeoPolygon polygon = ((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points,Collections.singletonList(hole))).getShape(0); - GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConvexPolygon(PlanetModel.SPHERE,points,Collections.singletonList(hole)); - assertEquals(polygon,polygon2); + GeoPolygon polygon = + ((GeoCompositePolygon) + GeoPolygonFactory.makeGeoPolygon( + PlanetModel.SPHERE, points, Collections.singletonList(hole))) + .getShape(0); + GeoPolygon polygon2 = + GeoPolygonFactory.makeGeoConvexPolygon( + PlanetModel.SPHERE, points, Collections.singletonList(hole)); + assertEquals(polygon, polygon2); } @Test public void testLUCENE8133() { - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-23.434456), Geo3DUtil.fromDegrees(14.459204)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-23.43394), Geo3DUtil.fromDegrees(14.459206)); - GeoPoint check = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-23.434067), Geo3DUtil.fromDegrees(14.458927)); + GeoPoint point1 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-23.434456), + Geo3DUtil.fromDegrees(14.459204)); + GeoPoint point2 = + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-23.43394), Geo3DUtil.fromDegrees(14.459206)); + GeoPoint check = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-23.434067), + Geo3DUtil.fromDegrees(14.458927)); if (!point1.isIdentical(point2) && !check.isIdentical(point1) && !check.isIdentical(point2)) { SidedPlane plane = new SidedPlane(check, point1, point2); assertTrue(plane.isWithin(check)); assertTrue(plane.isWithin(point1)); assertTrue(plane.isWithin(point2)); - //POLYGON((14.459204 -23.434456, 14.459206 -23.43394,14.458647 -23.434196, 14.458646 -23.434452,14.459204 -23.434456)) + // POLYGON((14.459204 -23.434456, 14.459206 -23.43394,14.458647 -23.434196, 14.458646 + // -23.434452,14.459204 -23.434456)) List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-23.434456), Geo3DUtil.fromDegrees(14.459204))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees( -23.43394), Geo3DUtil.fromDegrees(14.459206))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-23.434196), Geo3DUtil.fromDegrees(14.458647))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-23.434452), Geo3DUtil.fromDegrees(14.458646))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-23.434456), + Geo3DUtil.fromDegrees(14.459204))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-23.43394), + Geo3DUtil.fromDegrees(14.459206))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-23.434196), + Geo3DUtil.fromDegrees(14.458647))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-23.434452), + Geo3DUtil.fromDegrees(14.458646))); GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); } } @Test public void testLUCENE8140() throws Exception { - //POINT(15.426026 68.35078) is coplanar - //"POLYGON((15.426411 68.35069,15.4261 68.35078,15.426026 68.35078,15.425868 68.35078,15.425745 68.350746,15.426411 68.35069))"; + // POINT(15.426026 68.35078) is coplanar + // "POLYGON((15.426411 68.35069,15.4261 68.35078,15.426026 68.35078,15.425868 68.35078,15.425745 + // 68.350746,15.426411 68.35069))"; List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.35069), Geo3DUtil.fromDegrees(15.426411))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.35078), Geo3DUtil.fromDegrees(15.4261))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.35078), Geo3DUtil.fromDegrees(15.426026))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.35078), Geo3DUtil.fromDegrees(15.425868))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.350746), Geo3DUtil.fromDegrees(15.426411))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.35069), Geo3DUtil.fromDegrees(15.426411))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.35078), Geo3DUtil.fromDegrees(15.4261))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.35078), Geo3DUtil.fromDegrees(15.426026))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(68.35078), Geo3DUtil.fromDegrees(15.425868))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(68.350746), + Geo3DUtil.fromDegrees(15.426411))); assertTrue(GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points) != null); } - @Test public void testLUCENE8211() { - //We need to handle the situation where the check point is parallel to - //the test point. + // We need to handle the situation where the check point is parallel to + // the test point. List points = new ArrayList<>(); points.add(new GeoPoint(PlanetModel.SPHERE, 0, 0)); points.add(new GeoPoint(PlanetModel.SPHERE, 0, 1)); points.add(new GeoPoint(PlanetModel.SPHERE, 1, 1)); - points.add(new GeoPoint(PlanetModel.SPHERE,1, 0)); + points.add(new GeoPoint(PlanetModel.SPHERE, 1, 0)); GeoPoint testPoint = new GeoPoint(PlanetModel.SPHERE, 0.5, 0.5); final List> pointsList = new ArrayList<>(); pointsList.add(points); GeoPolygon polygon = new GeoComplexPolygon(PlanetModel.SPHERE, pointsList, testPoint, true); - assertTrue(polygon.isWithin(PlanetModel.SPHERE.createSurfacePoint(testPoint.x, testPoint.y, testPoint.z))); - assertFalse(polygon.isWithin(PlanetModel.SPHERE.createSurfacePoint(-testPoint.x, -testPoint.y, -testPoint.z))); - //special cases - assertFalse(polygon.isWithin(PlanetModel.SPHERE.createSurfacePoint(testPoint.x, -testPoint.y, -testPoint.z))); - assertFalse(polygon.isWithin(PlanetModel.SPHERE.createSurfacePoint(-testPoint.x, testPoint.y, -testPoint.z))); - assertFalse(polygon.isWithin(PlanetModel.SPHERE.createSurfacePoint(-testPoint.x, -testPoint.y, testPoint.z))); + assertTrue( + polygon.isWithin( + PlanetModel.SPHERE.createSurfacePoint(testPoint.x, testPoint.y, testPoint.z))); + assertFalse( + polygon.isWithin( + PlanetModel.SPHERE.createSurfacePoint(-testPoint.x, -testPoint.y, -testPoint.z))); + // special cases + assertFalse( + polygon.isWithin( + PlanetModel.SPHERE.createSurfacePoint(testPoint.x, -testPoint.y, -testPoint.z))); + assertFalse( + polygon.isWithin( + PlanetModel.SPHERE.createSurfacePoint(-testPoint.x, testPoint.y, -testPoint.z))); + assertFalse( + polygon.isWithin( + PlanetModel.SPHERE.createSurfacePoint(-testPoint.x, -testPoint.y, testPoint.z))); } @Test public void testCoplanarityTileConvex() throws Exception { - // This test has been disabled because it is possible that the polygon specified actually intersects itself. - //POLYGON((24.39398 65.77519,24.3941 65.77498,24.394024 65.77497,24.393976 65.77495,24.393963 65.77493,24.394068 65.774925,24.394156 65.77495,24.394201 65.77495,24.394234 65.77496,24.394266 65.77498,24.394318 65.77498,24.39434 65.774956,24.394377 65.77495,24.394451 65.77494,24.394476 65.77495,24.394457 65.77498,24.39398 65.77519))" + // This test has been disabled because it is possible that the polygon specified actually + // intersects itself. + // POLYGON((24.39398 65.77519,24.3941 65.77498,24.394024 65.77497,24.393976 65.77495,24.393963 + // 65.77493,24.394068 65.774925,24.394156 65.77495,24.394201 65.77495,24.394234 + // 65.77496,24.394266 65.77498,24.394318 65.77498,24.39434 65.774956,24.394377 + // 65.77495,24.394451 65.77494,24.394476 65.77495,24.394457 65.77498,24.39398 65.77519))" List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77519), Geo3DUtil.fromDegrees(24.39398))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77498), Geo3DUtil.fromDegrees(24.3941))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77497), Geo3DUtil.fromDegrees(24.394024))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.393976))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77493), Geo3DUtil.fromDegrees(24.393963))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.774925), Geo3DUtil.fromDegrees(24.394068))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.394156))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.394201))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77496), Geo3DUtil.fromDegrees(24.394234))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77498), Geo3DUtil.fromDegrees(24.394266))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77498), Geo3DUtil.fromDegrees(24.394318))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.774956), Geo3DUtil.fromDegrees(24.39434))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.394377))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77494), Geo3DUtil.fromDegrees(24.394451))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.394476))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77498), Geo3DUtil.fromDegrees(24.394457))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77519), Geo3DUtil.fromDegrees(24.39398))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77498), Geo3DUtil.fromDegrees(24.3941))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77497), Geo3DUtil.fromDegrees(24.394024))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.393976))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77493), Geo3DUtil.fromDegrees(24.393963))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(65.774925), + Geo3DUtil.fromDegrees(24.394068))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.394156))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.394201))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77496), Geo3DUtil.fromDegrees(24.394234))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77498), Geo3DUtil.fromDegrees(24.394266))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77498), Geo3DUtil.fromDegrees(24.394318))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.774956), Geo3DUtil.fromDegrees(24.39434))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.394377))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77494), Geo3DUtil.fromDegrees(24.394451))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77495), Geo3DUtil.fromDegrees(24.394476))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(65.77498), Geo3DUtil.fromDegrees(24.394457))); GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); assertTrue(polygon != null); } @Test public void testCoplanarityConcave() throws Exception { - //POLYGON((-52.18851 64.53777,-52.18853 64.53828,-52.18675 64.53829,-52.18676 64.53855,-52.18736 64.53855,-52.18737 64.53881,-52.18677 64.53881,-52.18683 64.54009,-52.18919 64.53981,-52.18916 64.53905,-52.19093 64.53878,-52.19148 64.53775,-52.18851 64.53777)) + // POLYGON((-52.18851 64.53777,-52.18853 64.53828,-52.18675 64.53829,-52.18676 + // 64.53855,-52.18736 64.53855,-52.18737 64.53881,-52.18677 64.53881,-52.18683 + // 64.54009,-52.18919 64.53981,-52.18916 64.53905,-52.19093 64.53878,-52.19148 + // 64.53775,-52.18851 64.53777)) List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53777), Geo3DUtil.fromDegrees(-52.18851))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53828), Geo3DUtil.fromDegrees(-52.18853))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53829), Geo3DUtil.fromDegrees(-52.18675))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53855), Geo3DUtil.fromDegrees(-52.18676))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53855), Geo3DUtil.fromDegrees(-52.18736))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53881), Geo3DUtil.fromDegrees(-52.18737))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53881), Geo3DUtil.fromDegrees(-52.18677))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.54009), Geo3DUtil.fromDegrees(-52.18683))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53981), Geo3DUtil.fromDegrees(-52.18919))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53905), Geo3DUtil.fromDegrees(-52.18916))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53878), Geo3DUtil.fromDegrees(-52.19093))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53775), Geo3DUtil.fromDegrees(-52.19148))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53777), Geo3DUtil.fromDegrees(-52.18851))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53828), Geo3DUtil.fromDegrees(-52.18853))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53829), Geo3DUtil.fromDegrees(-52.18675))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53855), Geo3DUtil.fromDegrees(-52.18676))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53855), Geo3DUtil.fromDegrees(-52.18736))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53881), Geo3DUtil.fromDegrees(-52.18737))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53881), Geo3DUtil.fromDegrees(-52.18677))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.54009), Geo3DUtil.fromDegrees(-52.18683))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53981), Geo3DUtil.fromDegrees(-52.18919))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53905), Geo3DUtil.fromDegrees(-52.18916))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53878), Geo3DUtil.fromDegrees(-52.19093))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53775), Geo3DUtil.fromDegrees(-52.19148))); GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); Collections.reverse(points); - polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); + polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); } @Test public void testCoplanarityConvex2() throws Exception { - //POLYGON((-3.488658 50.45564,-3.4898987 50.455627,-3.489865 50.455585,-3.489833 50.45551,-3.489808 50.455433,-3.489806 50.455406,-3.4898643 50.45525,-3.4892037 50.455162,-3.4891756 50.455166,-3.4891088 50.455147,-3.4890108 50.455166,-3.4889853 50.455166,-3.48895 50.45516,-3.488912 50.455166,-3.4889014 50.455177,-3.488893 50.455185,-3.488927 50.45523,-3.4890666 50.455456,-3.48905 50.455467,-3.488658 50.45564)) + // POLYGON((-3.488658 50.45564,-3.4898987 50.455627,-3.489865 50.455585,-3.489833 + // 50.45551,-3.489808 50.455433,-3.489806 50.455406,-3.4898643 50.45525,-3.4892037 + // 50.455162,-3.4891756 50.455166,-3.4891088 50.455147,-3.4890108 50.455166,-3.4889853 + // 50.455166,-3.48895 50.45516,-3.488912 50.455166,-3.4889014 50.455177,-3.488893 + // 50.455185,-3.488927 50.45523,-3.4890666 50.455456,-3.48905 50.455467,-3.488658 50.45564)) List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45564), Geo3DUtil.fromDegrees(-3.488658))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455627), Geo3DUtil.fromDegrees(-3.4898987))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455585), Geo3DUtil.fromDegrees(-3.489865))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45551), Geo3DUtil.fromDegrees(-3.489833))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455433), Geo3DUtil.fromDegrees(-3.489808))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455406), Geo3DUtil.fromDegrees(-3.489806))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45525), Geo3DUtil.fromDegrees(-3.4898643))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455162), Geo3DUtil.fromDegrees(-3.4892037))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455166), Geo3DUtil.fromDegrees(-3.4891756))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455147), Geo3DUtil.fromDegrees(-3.4891088))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455166), Geo3DUtil.fromDegrees(-3.4890108))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455166), Geo3DUtil.fromDegrees(-3.4889853))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45516), Geo3DUtil.fromDegrees(-3.48895))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455166), Geo3DUtil.fromDegrees(-3.488912))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455177), Geo3DUtil.fromDegrees(-3.4889014))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455185), Geo3DUtil.fromDegrees( -3.488893))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45523), Geo3DUtil.fromDegrees(-3.488927))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455456), Geo3DUtil.fromDegrees(-3.4890666))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455467), Geo3DUtil.fromDegrees( -3.48905))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45564), Geo3DUtil.fromDegrees(-3.488658))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455627), + Geo3DUtil.fromDegrees(-3.4898987))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455585), + Geo3DUtil.fromDegrees(-3.489865))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45551), Geo3DUtil.fromDegrees(-3.489833))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455433), + Geo3DUtil.fromDegrees(-3.489808))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455406), + Geo3DUtil.fromDegrees(-3.489806))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.45525), + Geo3DUtil.fromDegrees(-3.4898643))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455162), + Geo3DUtil.fromDegrees(-3.4892037))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455166), + Geo3DUtil.fromDegrees(-3.4891756))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455147), + Geo3DUtil.fromDegrees(-3.4891088))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455166), + Geo3DUtil.fromDegrees(-3.4890108))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455166), + Geo3DUtil.fromDegrees(-3.4889853))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45516), Geo3DUtil.fromDegrees(-3.48895))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455166), + Geo3DUtil.fromDegrees(-3.488912))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455177), + Geo3DUtil.fromDegrees(-3.4889014))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455185), + Geo3DUtil.fromDegrees(-3.488893))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.45523), Geo3DUtil.fromDegrees(-3.488927))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(50.455456), + Geo3DUtil.fromDegrees(-3.4890666))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455467), Geo3DUtil.fromDegrees(-3.48905))); GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); Collections.reverse(points); - polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); + polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); } - + /* - [lat=-0.63542308910253, lon=0.9853722928232957([X=0.4446759777403525, Y=0.6707549854468698, Z=-0.5934780737681111])], - [lat=0.0, lon=0.0([X=1.0011188539924791, Y=0.0, Z=0.0])], - [lat=0.45435018176633574, lon=3.141592653589793([X=-0.8989684544372841, Y=1.1009188402610632E-16, Z=0.4390846549572752])], - [lat=-0.375870856827283, lon=2.9129132647718414([X=-0.9065744420970767, Y=0.21100590938346708, Z=-0.36732668582405886])], + [lat=-0.63542308910253, lon=0.9853722928232957([X=0.4446759777403525, Y=0.6707549854468698, Z=-0.5934780737681111])], + [lat=0.0, lon=0.0([X=1.0011188539924791, Y=0.0, Z=0.0])], + [lat=0.45435018176633574, lon=3.141592653589793([X=-0.8989684544372841, Y=1.1009188402610632E-16, Z=0.4390846549572752])], + [lat=-0.375870856827283, lon=2.9129132647718414([X=-0.9065744420970767, Y=0.21100590938346708, Z=-0.36732668582405886])], [lat=-1.2205765069413237, lon=3.141592653589793([X=-0.3424714964202101, Y=4.194066218902145E-17, Z=-0.9375649457139603])]}} - + [junit4] 1> unquantized=[lat=-3.1780051348770987E-74, lon=-3.032608859187692([X=-0.9951793580358298, Y=-0.1088898762907205, Z=-3.181560858610375E-74])] [junit4] 1> quantized=[X=-0.9951793580415914, Y=-0.10888987641797832, Z=-2.3309121299774915E-10] */ @@ -1211,32 +1519,34 @@ shape: points.add(new GeoPoint(PlanetModel.WGS84, -0.375870856827283, 2.9129132647718414)); points.add(new GeoPoint(PlanetModel.WGS84, -1.2205765069413237, 3.141592653589793)); GeoPolygonFactory.PolygonDescription pd = new GeoPolygonFactory.PolygonDescription(points); - + /* for (int i = 0; i < points.size(); i++) { System.out.println("Point "+i+": "+points.get(i)); } */ - final GeoPoint unquantized = new GeoPoint(PlanetModel.WGS84, -3.1780051348770987E-74, -3.032608859187692); - //final GeoPoint quantized = new GeoPoint(-0.9951793580415914, -0.10888987641797832, -2.3309121299774915E-10); - - // Construct a standard polygon first to see what that does. This winds up being a large polygon under the covers. + final GeoPoint unquantized = + new GeoPoint(PlanetModel.WGS84, -3.1780051348770987E-74, -3.032608859187692); + // final GeoPoint quantized = new GeoPoint(-0.9951793580415914, -0.10888987641797832, + // -2.3309121299774915E-10); + + // Construct a standard polygon first to see what that does. This winds up being a large + // polygon under the covers. GeoPolygon standard = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, pd); - + // This should be in-set too, but isn't!! assertTrue(standard.isWithin(PlanetModel.WGS84.MIN_X_POLE)); - + final XYZBounds standardBounds = new XYZBounds(); standard.getBounds(standardBounds); final XYZSolid standardSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, standardBounds); // If within shape, should be within bounds - //assertTrue(standard.isWithin(quantized)?standardSolid.isWithin(quantized):true); - assertTrue(standard.isWithin(unquantized)?standardSolid.isWithin(unquantized):true); - + // assertTrue(standard.isWithin(quantized)?standardSolid.isWithin(quantized):true); + assertTrue(standard.isWithin(unquantized) ? standardSolid.isWithin(unquantized) : true); } - + /* [junit4] 1> doc=754 is contained by shape but is outside the returned XYZBounds [junit4] 1> unquantized=[lat=2.4043303687704734E-204, lon=3.1342447995980507([X=-1.0010918284309325, Y=0.007356008974104805, Z=2.4070204634028112E-204])] @@ -1245,13 +1555,13 @@ shape: [junit4] 1> doc=3728 is contained by shape but is outside the returned XYZBounds [junit4] 1> unquantized=[lat=2.4457272005608357E-47, lon=-3.1404077424936307([X=-1.001118151199965, Y=-0.0011862365610909341, Z=2.448463612203698E-47])] [junit4] 1> quantized=[X=-1.0011181510675629, Y=-0.001186236379718708, Z=2.3309121299774915E-10] - - [junit4] 1> shape=GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=7969cab3, + + [junit4] 1> shape=GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=7969cab3, testPoint=[X=-0.07416172733314662, Y=0.5686488061136892, Z=0.8178445379402641], testPointInSet=true, shapes={ { - [lat=-1.5707963267948966, lon=-1.0755217966112058([X=2.903696886845155E-17, Y=-5.375400029710238E-17, Z=-0.997762292022105])], - [lat=-1.327365682666958, lon=-2.9674513704178316([X=-0.23690293696956322, Y=-0.04167672037374933, Z=-0.9685334156912658])], - [lat=0.32288591161895097, lon=3.141592653589793([X=-0.9490627533610154, Y=1.1622666630935417E-16, Z=0.3175519551883462])], - [lat=0.0, lon=0.0([X=1.0011188539924791, Y=0.0, Z=0.0])], + [lat=-1.5707963267948966, lon=-1.0755217966112058([X=2.903696886845155E-17, Y=-5.375400029710238E-17, Z=-0.997762292022105])], + [lat=-1.327365682666958, lon=-2.9674513704178316([X=-0.23690293696956322, Y=-0.04167672037374933, Z=-0.9685334156912658])], + [lat=0.32288591161895097, lon=3.141592653589793([X=-0.9490627533610154, Y=1.1622666630935417E-16, Z=0.3175519551883462])], + [lat=0.0, lon=0.0([X=1.0011188539924791, Y=0.0, Z=0.0])], [lat=0.2839194570254642, lon=-1.2434404554202965([X=0.30893121415043073, Y=-0.9097632721627391, Z=0.2803596238536593])]}} */ @Test @@ -1263,128 +1573,145 @@ shape: points.add(new GeoPoint(PlanetModel.WGS84, 0.0, 0.0)); points.add(new GeoPoint(PlanetModel.WGS84, 0.2839194570254642, -1.2434404554202965)); GeoPolygonFactory.PolygonDescription pd = new GeoPolygonFactory.PolygonDescription(points); - - final GeoPoint unquantized = new GeoPoint(PlanetModel.WGS84, 2.4457272005608357E-47, -3.1404077424936307); - final GeoPoint quantized = new GeoPoint(-1.0011181510675629, -0.001186236379718708, 2.3309121299774915E-10); - + + final GeoPoint unquantized = + new GeoPoint(PlanetModel.WGS84, 2.4457272005608357E-47, -3.1404077424936307); + final GeoPoint quantized = + new GeoPoint(-1.0011181510675629, -0.001186236379718708, 2.3309121299774915E-10); + // Is the north pole in set, or out of set? final GeoPoint northPole = new GeoPoint(PlanetModel.WGS84, Math.PI * 0.5, 0.0); final GeoPoint negativeX = new GeoPoint(PlanetModel.WGS84, 0.0, Math.PI); final GeoPoint negativeY = new GeoPoint(PlanetModel.WGS84, 0.0, -Math.PI * 0.5); final GeoPoint positiveY = new GeoPoint(PlanetModel.WGS84, 0.0, Math.PI * 0.5); - final GeoPoint testPoint = new GeoPoint(-0.074161727332972, 0.5686488061123504, 0.8178445379383386); + final GeoPoint testPoint = + new GeoPoint(-0.074161727332972, 0.5686488061123504, 0.8178445379383386); - // Construct a standard polygon first to see what that does. This winds up being a large polygon under the covers. + // Construct a standard polygon first to see what that does. This winds up being a large + // polygon under the covers. GeoPolygon standard = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, pd); - + // This should be true, by inspection, but is false. That's the cause for the failure. assertTrue(standard.isWithin(negativeX)); - + assertTrue(standard.isWithin(testPoint)); - + // This is in-set because it's on an edge assertTrue(standard.isWithin(northPole)); - + // This is in-set assertTrue(standard.isWithin(positiveY)); - final XYZBounds standardBounds = new XYZBounds(); standard.getBounds(standardBounds); final XYZSolid standardSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, standardBounds); // If within shape, should be within bounds - assertTrue(standard.isWithin(unquantized)?standardSolid.isWithin(unquantized):true); - assertTrue(standard.isWithin(quantized)?standardSolid.isWithin(quantized):true); - + assertTrue(standard.isWithin(unquantized) ? standardSolid.isWithin(unquantized) : true); + assertTrue(standard.isWithin(quantized) ? standardSolid.isWithin(quantized) : true); } - + @Test public void testLUCENE7642() { // Construct XYZ solid - final XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, - 0.1845405855034623, 0.2730694323646922, - -1.398547277986495E-9, 0.020766291030223535, - 0.7703937553371503, 0.9977622932859774); - + final XYZSolid solid = + XYZSolidFactory.makeXYZSolid( + PlanetModel.WGS84, + 0.1845405855034623, + 0.2730694323646922, + -1.398547277986495E-9, + 0.020766291030223535, + 0.7703937553371503, + 0.9977622932859774); + /* - [junit4] 1> individual planes - [junit4] 1> notableMinXPoints=[ - [X=0.1845405855034623, Y=-1.398547277986495E-9, Z=0.9806642352600131], - [X=0.1845405855034623, Y=0.020766291030223535, Z=0.9804458120424796]] - notableMaxXPoints=[ - [X=0.2730694323646922, Y=-1.398547277986495E-9, Z=0.959928047174481], - [X=0.2730694323646922, Y=0.020766291030223535, Z=0.9597049045335464]] - notableMinYPoints=[ - [X=0.1845405855034623, Y=-1.398547277986495E-9, Z=0.9806642352600131], - [X=0.2730694323646922, Y=-1.398547277986495E-9, Z=0.959928047174481]] - notableMaxYPoints=[ - [X=0.1845405855034623, Y=0.020766291030223535, Z=0.9804458120424796], - [X=0.2730694323646922, Y=0.020766291030223535, Z=0.9597049045335464]] - notableMinZPoints=[] - notableMaxZPoints=[] - - [junit4] 1> All edge points=[ - [X=0.1845405855034623, Y=-1.398547277986495E-9, Z=0.9806642352600131], - [X=0.1845405855034623, Y=0.020766291030223535, Z=0.9804458120424796], - [X=0.2730694323646922, Y=-1.398547277986495E-9, Z=0.959928047174481], - [X=0.2730694323646922, Y=0.020766291030223535, Z=0.9597049045335464]] + [junit4] 1> individual planes + [junit4] 1> notableMinXPoints=[ + [X=0.1845405855034623, Y=-1.398547277986495E-9, Z=0.9806642352600131], + [X=0.1845405855034623, Y=0.020766291030223535, Z=0.9804458120424796]] + notableMaxXPoints=[ + [X=0.2730694323646922, Y=-1.398547277986495E-9, Z=0.959928047174481], + [X=0.2730694323646922, Y=0.020766291030223535, Z=0.9597049045335464]] + notableMinYPoints=[ + [X=0.1845405855034623, Y=-1.398547277986495E-9, Z=0.9806642352600131], + [X=0.2730694323646922, Y=-1.398547277986495E-9, Z=0.959928047174481]] + notableMaxYPoints=[ + [X=0.1845405855034623, Y=0.020766291030223535, Z=0.9804458120424796], + [X=0.2730694323646922, Y=0.020766291030223535, Z=0.9597049045335464]] + notableMinZPoints=[] + notableMaxZPoints=[] - */ + [junit4] 1> All edge points=[ + [X=0.1845405855034623, Y=-1.398547277986495E-9, Z=0.9806642352600131], + [X=0.1845405855034623, Y=0.020766291030223535, Z=0.9804458120424796], + [X=0.2730694323646922, Y=-1.398547277986495E-9, Z=0.959928047174481], + [X=0.2730694323646922, Y=0.020766291030223535, Z=0.9597049045335464]] - final GeoPoint edge1 = new GeoPoint(0.1845405855034623, -1.398547277986495E-9, 0.9806642352600131); - final GeoPoint edge2 = new GeoPoint(0.1845405855034623, 0.020766291030223535, 0.9804458120424796); - final GeoPoint edge3 = new GeoPoint(0.2730694323646922, -1.398547277986495E-9, 0.959928047174481); - final GeoPoint edge4 = new GeoPoint(0.2730694323646922, 0.020766291030223535, 0.9597049045335464); - - // The above says that none of these intersect the surface: minZmaxX, minZminX, minZmaxY, minZminY, or + */ + + final GeoPoint edge1 = + new GeoPoint(0.1845405855034623, -1.398547277986495E-9, 0.9806642352600131); + final GeoPoint edge2 = + new GeoPoint(0.1845405855034623, 0.020766291030223535, 0.9804458120424796); + final GeoPoint edge3 = + new GeoPoint(0.2730694323646922, -1.398547277986495E-9, 0.959928047174481); + final GeoPoint edge4 = + new GeoPoint(0.2730694323646922, 0.020766291030223535, 0.9597049045335464); + + // The above says that none of these intersect the surface: minZmaxX, minZminX, minZmaxY, + // minZminY, or // maxZmaxX, maxZminX, maxZmaxY, maxZminY. - + // So what about minZ and maxZ all by themselves? // - // [junit4] 1> Outside world: minXminYminZ=false minXminYmaxZ=true minXmaxYminZ=false minXmaxYmaxZ=true maxXminYminZ=false + // [junit4] 1> Outside world: minXminYminZ=false minXminYmaxZ=true minXmaxYminZ=false + // minXmaxYmaxZ=true maxXminYminZ=false // maxXminYmaxZ=true maxXmaxYminZ=false maxXmaxYmaxZ=true // - // So the minz plane does not intersect the world because it's all inside. The maxZ plane is all outside but may intersect the world still. + // So the minz plane does not intersect the world because it's all inside. The maxZ plane is + // all outside but may intersect the world still. // But it doesn't because it's too far north. // So it looks like these are our edge points, and they are correct. - + /* - GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ - [lat=-1.2267098126036888, lon=3.141592653589793([X=-0.33671029227864785, Y=4.123511816790159E-17, Z=-0.9396354281810864])], - [lat=0.2892272352400239, lon=0.017453291479645996([X=0.9591279281485559, Y=0.01674163926221766, Z=0.28545251693892165])], - [lat=-1.5707963267948966, lon=1.6247683074702402E-201([X=6.109531986173988E-17, Y=9.926573944611206E-218, Z=-0.997762292022105])]], internalEdges={2}}, - GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ - [lat=-1.2267098126036888, lon=3.141592653589793([X=-0.33671029227864785, Y=4.123511816790159E-17, Z=-0.9396354281810864])], - [lat=-1.5707963267948966, lon=1.6247683074702402E-201([X=6.109531986173988E-17, Y=9.926573944611206E-218, Z=-0.997762292022105])], - [lat=0.6723906085905078, lon=-3.0261581679831E-12([X=0.7821883235431606, Y=-2.367025584191143E-12, Z=0.6227413298552851])]], internalEdges={0}}]} - */ + GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ + [lat=-1.2267098126036888, lon=3.141592653589793([X=-0.33671029227864785, Y=4.123511816790159E-17, Z=-0.9396354281810864])], + [lat=0.2892272352400239, lon=0.017453291479645996([X=0.9591279281485559, Y=0.01674163926221766, Z=0.28545251693892165])], + [lat=-1.5707963267948966, lon=1.6247683074702402E-201([X=6.109531986173988E-17, Y=9.926573944611206E-218, Z=-0.997762292022105])]], internalEdges={2}}, + GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ + [lat=-1.2267098126036888, lon=3.141592653589793([X=-0.33671029227864785, Y=4.123511816790159E-17, Z=-0.9396354281810864])], + [lat=-1.5707963267948966, lon=1.6247683074702402E-201([X=6.109531986173988E-17, Y=9.926573944611206E-218, Z=-0.997762292022105])], + [lat=0.6723906085905078, lon=-3.0261581679831E-12([X=0.7821883235431606, Y=-2.367025584191143E-12, Z=0.6227413298552851])]], internalEdges={0}}]} + */ final List points = new ArrayList<>(); points.add(new GeoPoint(PlanetModel.WGS84, -1.2267098126036888, 3.141592653589793)); points.add(new GeoPoint(PlanetModel.WGS84, 0.2892272352400239, 0.017453291479645996)); points.add(new GeoPoint(PlanetModel.WGS84, -1.5707963267948966, 1.6247683074702402E-201)); points.add(new GeoPoint(PlanetModel.WGS84, 0.6723906085905078, -3.0261581679831E-12)); - - final GeoPolygonFactory.PolygonDescription pd = new GeoPolygonFactory.PolygonDescription(points); + + final GeoPolygonFactory.PolygonDescription pd = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon shape = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, pd); final List pdList = new ArrayList<>(1); pdList.add(pd); - final GeoPolygon largeShape = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel. WGS84, pdList); - + final GeoPolygon largeShape = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, pdList); + /* This is the output: - [junit4] 1> shape = GeoCompositePolygon: {[ - GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ - [lat=-1.2267098126036888, lon=3.141592653589793([X=-0.33671029227864785, Y=4.123511816790159E-17, Z=-0.9396354281810864])], - [lat=0.2892272352400239, lon=0.017453291479645996([X=0.9591279281485559, Y=0.01674163926221766, Z=0.28545251693892165])], - [lat=-1.5707963267948966, lon=1.6247683074702402E-201([X=6.109531986173988E-17, Y=9.926573944611206E-218, Z=-0.997762292022105])]], internalEdges={2}}, - GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ - [lat=-1.2267098126036888, lon=3.141592653589793([X=-0.33671029227864785, Y=4.123511816790159E-17, Z=-0.9396354281810864])], - [lat=-1.5707963267948966, lon=1.6247683074702402E-201([X=6.109531986173988E-17, Y=9.926573944611206E-218, Z=-0.997762292022105])], - [lat=0.6723906085905078, lon=-3.0261581679831E-12([X=0.7821883235431606, Y=-2.367025584191143E-12, Z=0.6227413298552851])]], internalEdges={0}}]} - */ - - final GeoPoint quantized = new GeoPoint(0.24162356556559528, 2.3309121299774915E-10, 0.9682657049003708); - final GeoPoint unquantized = new GeoPoint(PlanetModel.WGS84, 1.3262481806651818, 2.4457272005608357E-47); + [junit4] 1> shape = GeoCompositePolygon: {[ + GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ + [lat=-1.2267098126036888, lon=3.141592653589793([X=-0.33671029227864785, Y=4.123511816790159E-17, Z=-0.9396354281810864])], + [lat=0.2892272352400239, lon=0.017453291479645996([X=0.9591279281485559, Y=0.01674163926221766, Z=0.28545251693892165])], + [lat=-1.5707963267948966, lon=1.6247683074702402E-201([X=6.109531986173988E-17, Y=9.926573944611206E-218, Z=-0.997762292022105])]], internalEdges={2}}, + GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[ + [lat=-1.2267098126036888, lon=3.141592653589793([X=-0.33671029227864785, Y=4.123511816790159E-17, Z=-0.9396354281810864])], + [lat=-1.5707963267948966, lon=1.6247683074702402E-201([X=6.109531986173988E-17, Y=9.926573944611206E-218, Z=-0.997762292022105])], + [lat=0.6723906085905078, lon=-3.0261581679831E-12([X=0.7821883235431606, Y=-2.367025584191143E-12, Z=0.6227413298552851])]], internalEdges={0}}]} + */ + + final GeoPoint quantized = + new GeoPoint(0.24162356556559528, 2.3309121299774915E-10, 0.9682657049003708); + final GeoPoint unquantized = + new GeoPoint(PlanetModel.WGS84, 1.3262481806651818, 2.4457272005608357E-47); // This passes; the point is definitely within the solid. assertTrue(solid.isWithin(unquantized)); @@ -1394,28 +1721,30 @@ shape: // This fails because the point is within the shape but apparently shouldn't be. // Instrumenting isWithin finds that the point is on three edge planes somehow: /* - [junit4] 1> localIsWithin start for point [0.2416235655409041,5.90945326539883E-48,0.9682657046994557] - [junit4] 1> For edge [A=-1.224646799147353E-16, B=-1.0, C=-7.498798913309287E-33, D=0.0, side=1.0] the point evaluation is -2.959035261382389E-17 - [junit4] 1> For edge [A=-3.0261581679831E-12, B=-0.9999999999999999, C=-1.8529874570670608E-28, D=0.0, side=1.0] the point evaluation is -7.31191126438807E-13 - [junit4] 1> For edge [A=4.234084035470679E-12, B=1.0, C=-1.5172037954732973E-12, D=0.0, side=1.0] the point evaluation is -4.460019207463956E-13 - */ - // These are too close to parallel. The only solution is to prevent the poly from being created. Let's see if Geo3d thinks they are parallel. - + [junit4] 1> localIsWithin start for point [0.2416235655409041,5.90945326539883E-48,0.9682657046994557] + [junit4] 1> For edge [A=-1.224646799147353E-16, B=-1.0, C=-7.498798913309287E-33, D=0.0, side=1.0] the point evaluation is -2.959035261382389E-17 + [junit4] 1> For edge [A=-3.0261581679831E-12, B=-0.9999999999999999, C=-1.8529874570670608E-28, D=0.0, side=1.0] the point evaluation is -7.31191126438807E-13 + [junit4] 1> For edge [A=4.234084035470679E-12, B=1.0, C=-1.5172037954732973E-12, D=0.0, side=1.0] the point evaluation is -4.460019207463956E-13 + */ + // These are too close to parallel. The only solution is to prevent the poly from being + // created. Let's see if Geo3d thinks they are parallel. + final Plane p1 = new Plane(-1.224646799147353E-16, -1.0, -7.498798913309287E-33, 0.0); - final Plane p2 = new Plane(-3.0261581679831E-12, -0.9999999999999999, -1.8529874570670608E-28, 0.0); + final Plane p2 = + new Plane(-3.0261581679831E-12, -0.9999999999999999, -1.8529874570670608E-28, 0.0); final Plane p3 = new Plane(4.234084035470679E-12, 1.0, -1.5172037954732973E-12, 0.0); - + assertFalse(shape.isWithin(unquantized)); - + // This point is indeed outside the shape but it doesn't matter assertFalse(shape.isWithin(quantized)); - + // Sanity check with different poly implementation assertTrue(shape.isWithin(edge1) == largeShape.isWithin(edge1)); assertTrue(shape.isWithin(edge2) == largeShape.isWithin(edge2)); assertTrue(shape.isWithin(edge3) == largeShape.isWithin(edge3)); assertTrue(shape.isWithin(edge4) == largeShape.isWithin(edge4)); - + // Verify both shapes give the same relationship int intersection = solid.getRelationship(shape); int largeIntersection = solid.getRelationship(largeShape); @@ -1428,17 +1757,22 @@ shape: points.add(new GeoPoint(PlanetModel.SPHERE, -0.5, -0.5)); points.add(new GeoPoint(PlanetModel.SPHERE, -0.5, 0.5)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0.5)); - points.add(new GeoPoint(PlanetModel.SPHERE,0.5, -0.5)); + points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, -0.5)); GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); - //Large polygon with arbitrary testPoint in set - GeoPolygon largePolygon = new GeoComplexPolygon(PlanetModel.SPHERE, Collections.singletonList(points), new GeoPoint(PlanetModel.SPHERE, 0.25, 0), true); - //This point is ok + // Large polygon with arbitrary testPoint in set + GeoPolygon largePolygon = + new GeoComplexPolygon( + PlanetModel.SPHERE, + Collections.singletonList(points), + new GeoPoint(PlanetModel.SPHERE, 0.25, 0), + true); + // This point is ok GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-8); assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1)); - //This point is ok + // This point is ok point1 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-5); assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1)); - //Fails here + // Fails here point1 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-7); assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1)); } @@ -1449,354 +1783,847 @@ shape: points.add(new GeoPoint(PlanetModel.SPHERE, -0.5, -0.5)); points.add(new GeoPoint(PlanetModel.SPHERE, -0.5, 0.5)); points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0.5)); - points.add(new GeoPoint(PlanetModel.SPHERE,0.5, -0.5)); + points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, -0.5)); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); - //Large polygon with test point in (0,0) - final GeoPolygon largePolygon = new GeoComplexPolygon(PlanetModel.SPHERE, Collections.singletonList(points), new GeoPoint(PlanetModel.SPHERE, 0.0, 0), true); - //Chooses Plane Z and succeed + // Large polygon with test point in (0,0) + final GeoPolygon largePolygon = + new GeoComplexPolygon( + PlanetModel.SPHERE, + Collections.singletonList(points), + new GeoPoint(PlanetModel.SPHERE, 0.0, 0), + true); + // Chooses Plane Z and succeed final GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-5); assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1)); - //Numerically identical + // Numerically identical final GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-13); assertTrue(polygon.isWithin(point2) == largePolygon.isWithin(point2)); - //Fails here, chooses plane X + // Fails here, chooses plane X final GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-6); assertTrue(polygon.isWithin(point3) == largePolygon.isWithin(point3)); } @Test public void testAboveBelowCrossingDifferentEdges() { - //POLYGON((130.846821906638 -5.066128831305991,134.5635278421427 21.75703481126756,156.31803093908155 44.5755831677161,0.0 8.860146581178396E-33,130.846821906638 -5.066128831305991)) + // POLYGON((130.846821906638 -5.066128831305991,134.5635278421427 + // 21.75703481126756,156.31803093908155 44.5755831677161,0.0 + // 8.860146581178396E-33,130.846821906638 -5.066128831305991)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.066128831305991), Geo3DUtil.fromDegrees(130.846821906638))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(21.75703481126756), Geo3DUtil.fromDegrees(134.5635278421427))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(44.5755831677161), Geo3DUtil.fromDegrees(156.31803093908155))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(8.860146581178396E-33), Geo3DUtil.fromDegrees(0.0))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.066128831305991), + Geo3DUtil.fromDegrees(130.846821906638))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(21.75703481126756), + Geo3DUtil.fromDegrees(134.5635278421427))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(44.5755831677161), + Geo3DUtil.fromDegrees(156.31803093908155))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(8.860146581178396E-33), + Geo3DUtil.fromDegrees(0.0))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(-15.37308034708334 1.3353777223310798E-11) - final GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(1.3353777223310798E-11), Geo3DUtil.fromDegrees(-15.37308034708334)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); + // POINT(-15.37308034708334 1.3353777223310798E-11) + final GeoPoint point1 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(1.3353777223310798E-11), + Geo3DUtil.fromDegrees(-15.37308034708334)); assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1)); } @Test public void testBelowCrossingTwiceEdgePoint() { - //POLYGON((162.9024012378976 -0.17652184258966092,162.56882659034474 -0.009075185910497524,162.52932263918404 1.6235907240799453E-189,162.17731099253956 -0.2154890860855618,162.9024012378976 -0.17652184258966092)) + // POLYGON((162.9024012378976 -0.17652184258966092,162.56882659034474 + // -0.009075185910497524,162.52932263918404 1.6235907240799453E-189,162.17731099253956 + // -0.2154890860855618,162.9024012378976 -0.17652184258966092)) List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.17652184258966092), Geo3DUtil.fromDegrees(162.9024012378976))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.009075185910497524), Geo3DUtil.fromDegrees(162.56882659034474))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(1.6235907240799453E-189), Geo3DUtil.fromDegrees(162.52932263918404))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.2154890860855618), Geo3DUtil.fromDegrees(162.17731099253956))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.17652184258966092), + Geo3DUtil.fromDegrees(162.9024012378976))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.009075185910497524), + Geo3DUtil.fromDegrees(162.56882659034474))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(1.6235907240799453E-189), + Geo3DUtil.fromDegrees(162.52932263918404))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.2154890860855618), + Geo3DUtil.fromDegrees(162.17731099253956))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(91.60559215160585 -6.782152464351765E-11) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-6.782152464351765E-11), Geo3DUtil.fromDegrees(91.60559215160585)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); + // POINT(91.60559215160585 -6.782152464351765E-11) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-6.782152464351765E-11), + Geo3DUtil.fromDegrees(91.60559215160585)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test public void testLUCENE8245() { - //POLYGON((-70.19447784626787 -83.117346007187,0.0 2.8E-322,-139.99870438810106 7.994601469571884,-143.14292702670522 -18.500141088122664,-158.7373186858464 -35.42942085357812,-70.19447784626787 -83.117346007187)) + // POLYGON((-70.19447784626787 -83.117346007187,0.0 2.8E-322,-139.99870438810106 + // 7.994601469571884,-143.14292702670522 -18.500141088122664,-158.7373186858464 + // -35.42942085357812,-70.19447784626787 -83.117346007187)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-83.117346007187), Geo3DUtil.fromDegrees(-70.19447784626787))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(2.8E-322), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(7.994601469571884), Geo3DUtil.fromDegrees(-139.99870438810106))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-18.500141088122664), Geo3DUtil.fromDegrees(-143.14292702670522))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-35.42942085357812), Geo3DUtil.fromDegrees(-158.7373186858464))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-83.117346007187), + Geo3DUtil.fromDegrees(-70.19447784626787))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(2.8E-322), Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(7.994601469571884), + Geo3DUtil.fromDegrees(-139.99870438810106))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-18.500141088122664), + Geo3DUtil.fromDegrees(-143.14292702670522))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-35.42942085357812), + Geo3DUtil.fromDegrees(-158.7373186858464))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(-1.91633079336513E-11 12.282452091883385) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(12.282452091883385), Geo3DUtil.fromDegrees(-1.91633079336513E-11)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); + // POINT(-1.91633079336513E-11 12.282452091883385) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(12.282452091883385), + Geo3DUtil.fromDegrees(-1.91633079336513E-11)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test public void testLUCENE8245_case2() { - //POLYGON((5.512285089810178 -26.833721534785912,12.13983320542565 -16.085163683089583,4.868755337835201 -9.167423203860656,0.0 -5.261747514529465,-15.696549288211289 -21.362181191487718,5.512285089810178 -26.833721534785912)) + // POLYGON((5.512285089810178 -26.833721534785912,12.13983320542565 + // -16.085163683089583,4.868755337835201 -9.167423203860656,0.0 + // -5.261747514529465,-15.696549288211289 -21.362181191487718,5.512285089810178 + // -26.833721534785912)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-26.833721534785912), Geo3DUtil.fromDegrees(5.512285089810178))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-16.085163683089583), Geo3DUtil.fromDegrees(12.13983320542565))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-9.167423203860656), Geo3DUtil.fromDegrees(4.868755337835201))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.261747514529465), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-21.362181191487718), Geo3DUtil.fromDegrees(-15.696549288211289))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-26.833721534785912), + Geo3DUtil.fromDegrees(5.512285089810178))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-16.085163683089583), + Geo3DUtil.fromDegrees(12.13983320542565))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-9.167423203860656), + Geo3DUtil.fromDegrees(4.868755337835201))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.261747514529465), + Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-21.362181191487718), + Geo3DUtil.fromDegrees(-15.696549288211289))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(-6.994273817216168E-11 -1.6915596606526662E-292) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-1.6915596606526662E-292), Geo3DUtil.fromDegrees(-6.994273817216168E-11)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); + // POINT(-6.994273817216168E-11 -1.6915596606526662E-292) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-1.6915596606526662E-292), + Geo3DUtil.fromDegrees(-6.994273817216168E-11)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test public void testLUCENE8245_case3() { - //POLYGON((144.76249846857021 8.828705232593283,166.00162989841027 -8.5E-322,157.03429484830787 64.92565566857392,108.64696979831984 39.10241638996957,102.54234512410089 20.471658760034586,144.76249846857021 8.828705232593283)) + // POLYGON((144.76249846857021 8.828705232593283,166.00162989841027 -8.5E-322,157.03429484830787 + // 64.92565566857392,108.64696979831984 39.10241638996957,102.54234512410089 + // 20.471658760034586,144.76249846857021 8.828705232593283)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(8.828705232593283), Geo3DUtil.fromDegrees(144.76249846857021))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-8.5E-322), Geo3DUtil.fromDegrees(166.00162989841027))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.92565566857392), Geo3DUtil.fromDegrees(157.03429484830787))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(39.10241638996957), Geo3DUtil.fromDegrees(108.64696979831984))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(20.471658760034586), Geo3DUtil.fromDegrees(102.54234512410089))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(8.828705232593283), + Geo3DUtil.fromDegrees(144.76249846857021))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-8.5E-322), + Geo3DUtil.fromDegrees(166.00162989841027))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(64.92565566857392), + Geo3DUtil.fromDegrees(157.03429484830787))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(39.10241638996957), + Geo3DUtil.fromDegrees(108.64696979831984))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(20.471658760034586), + Geo3DUtil.fromDegrees(102.54234512410089))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(179.9999999999998 7.627654408067997E-11) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(7.627654408067997E-11), Geo3DUtil.fromDegrees(179.9999999999998)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); + // POINT(179.9999999999998 7.627654408067997E-11) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(7.627654408067997E-11), + Geo3DUtil.fromDegrees(179.9999999999998)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test public void testLUCENE8245_case4() { - //POLYGON((-3.728795716978514 -10.354090605548162,-137.97868338527985 0.05602723926521642,-113.87317441507611 -76.2471400450585,-162.64032677742279 -89.9999999991684,179.9999999999998 -89.99999999999997,-3.728795716978514 -10.354090605548162)) + // POLYGON((-3.728795716978514 -10.354090605548162,-137.97868338527985 + // 0.05602723926521642,-113.87317441507611 -76.2471400450585,-162.64032677742279 + // -89.9999999991684,179.9999999999998 -89.99999999999997,-3.728795716978514 + // -10.354090605548162)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-10.354090605548162), Geo3DUtil.fromDegrees(-3.728795716978514))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(0.05602723926521642), Geo3DUtil.fromDegrees(-137.97868338527985))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-76.2471400450585), Geo3DUtil.fromDegrees(-113.87317441507611))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-89.9999999991684), Geo3DUtil.fromDegrees(-162.64032677742279))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-89.99999999999997), Geo3DUtil.fromDegrees(179.9999999999998))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-10.354090605548162), + Geo3DUtil.fromDegrees(-3.728795716978514))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(0.05602723926521642), + Geo3DUtil.fromDegrees(-137.97868338527985))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-76.2471400450585), + Geo3DUtil.fromDegrees(-113.87317441507611))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-89.9999999991684), + Geo3DUtil.fromDegrees(-162.64032677742279))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-89.99999999999997), + Geo3DUtil.fromDegrees(179.9999999999998))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(-1.2862855990004445E-10 -39.178517830976105) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-39.178517830976105), Geo3DUtil.fromDegrees(-1.2862855990004445E-10)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); + // POINT(-1.2862855990004445E-10 -39.178517830976105) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-39.178517830976105), + Geo3DUtil.fromDegrees(-1.2862855990004445E-10)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } - + @Test public void testLUCENE8251() { - //POLYGON((135.63207358036593 -51.43541696593334,113.00782694696038 -58.984559858566556,0.0 -3.68E-321,-66.33598777585381 -7.382056816201731,135.63207358036593 -51.43541696593334)) + // POLYGON((135.63207358036593 -51.43541696593334,113.00782694696038 -58.984559858566556,0.0 + // -3.68E-321,-66.33598777585381 -7.382056816201731,135.63207358036593 -51.43541696593334)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-51.43541696593334), Geo3DUtil.fromDegrees(135.63207358036593))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-58.984559858566556), Geo3DUtil.fromDegrees(113.00782694696038))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-3.68E-321), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-7.382056816201731), Geo3DUtil.fromDegrees(-66.33598777585381))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-51.43541696593334), Geo3DUtil.fromDegrees(135.63207358036593))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-51.43541696593334), + Geo3DUtil.fromDegrees(135.63207358036593))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-58.984559858566556), + Geo3DUtil.fromDegrees(113.00782694696038))); + points.add( + new GeoPoint( + PlanetModel.WGS84, Geo3DUtil.fromDegrees(-3.68E-321), Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-7.382056816201731), + Geo3DUtil.fromDegrees(-66.33598777585381))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-51.43541696593334), + Geo3DUtil.fromDegrees(135.63207358036593))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.WGS84, Collections.singletonList(description)); - //POINT(0.005183505059185348 1.98E-321) - final GeoPoint point = new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(1.98E-321), Geo3DUtil.fromDegrees(0.005183505059185348)); + // POINT(0.005183505059185348 1.98E-321) + final GeoPoint point = + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(1.98E-321), + Geo3DUtil.fromDegrees(0.005183505059185348)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test public void testLUCENE8257() { - //POLYGON((12.9610296281349 -8.35317290232106,15.448601008878832 -3.990004427754539,22.375905319231205 0.2308875600810982,-13.473550791109867 30.10483127471788,-17.854443360411242 33.07441476406424,-3.928621142543736E-11 4.688559453373203E-11,0.0 -5.546974900361278E-104,12.9610296281349 -8.35317290232106)) + // POLYGON((12.9610296281349 -8.35317290232106,15.448601008878832 + // -3.990004427754539,22.375905319231205 0.2308875600810982,-13.473550791109867 + // 30.10483127471788,-17.854443360411242 33.07441476406424,-3.928621142543736E-11 + // 4.688559453373203E-11,0.0 -5.546974900361278E-104,12.9610296281349 -8.35317290232106)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-8.35317290232106), Geo3DUtil.fromDegrees(12.9610296281349))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-3.990004427754539), Geo3DUtil.fromDegrees(15.448601008878832))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(0.2308875600810982), Geo3DUtil.fromDegrees(22.375905319231205))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(30.10483127471788), Geo3DUtil.fromDegrees(-13.473550791109867))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(33.07441476406424), Geo3DUtil.fromDegrees(-17.854443360411242))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(4.688559453373203E-11), Geo3DUtil.fromDegrees(-3.928621142543736E-11))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-5.546974900361278E-104), Geo3DUtil.fromDegrees(0.0))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-8.35317290232106), + Geo3DUtil.fromDegrees(12.9610296281349))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-3.990004427754539), + Geo3DUtil.fromDegrees(15.448601008878832))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(0.2308875600810982), + Geo3DUtil.fromDegrees(22.375905319231205))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(30.10483127471788), + Geo3DUtil.fromDegrees(-13.473550791109867))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(33.07441476406424), + Geo3DUtil.fromDegrees(-17.854443360411242))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(4.688559453373203E-11), + Geo3DUtil.fromDegrees(-3.928621142543736E-11))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-5.546974900361278E-104), + Geo3DUtil.fromDegrees(0.0))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.WGS84, Collections.singletonList(description)); - //POINT(-179.99999999999997 -9.638811778842766E-12) - final GeoPoint point = new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-9.638811778842766E-12), Geo3DUtil.fromDegrees(-179.99999999999997)); + // POINT(-179.99999999999997 -9.638811778842766E-12) + final GeoPoint point = + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-9.638811778842766E-12), + Geo3DUtil.fromDegrees(-179.99999999999997)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test public void testLUCENE8258() { - //POLYGON((0.004541088101890366 2.457524007073783E-4,0.003771467014711204 0.0011493732122651466,0.003975546116981415 0.002208372357731988,0.0010780690991920934 0.0014120274287707404,0.0 2.8E-322,7.486881020702663E-4 -3.4191957123300967E-4,-8.981008225032098E-4 -0.0032334745041058812,0.004541088101890366 2.457524007073783E-4)) + // POLYGON((0.004541088101890366 2.457524007073783E-4,0.003771467014711204 + // 0.0011493732122651466,0.003975546116981415 0.002208372357731988,0.0010780690991920934 + // 0.0014120274287707404,0.0 2.8E-322,7.486881020702663E-4 + // -3.4191957123300967E-4,-8.981008225032098E-4 -0.0032334745041058812,0.004541088101890366 + // 2.457524007073783E-4)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(2.457524007073783E-4), Geo3DUtil.fromDegrees(0.004541088101890366))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(0.0011493732122651466), Geo3DUtil.fromDegrees(0.003771467014711204))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(0.002208372357731988), Geo3DUtil.fromDegrees(0.003975546116981415))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(0.0014120274287707404), Geo3DUtil.fromDegrees(0.0010780690991920934))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(2.8E-322), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-3.4191957123300967E-4), Geo3DUtil.fromDegrees(7.486881020702663E-4))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.0032334745041058812), Geo3DUtil.fromDegrees(-8.981008225032098E-4))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(2.457524007073783E-4), + Geo3DUtil.fromDegrees(0.004541088101890366))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(0.0011493732122651466), + Geo3DUtil.fromDegrees(0.003771467014711204))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(0.002208372357731988), + Geo3DUtil.fromDegrees(0.003975546116981415))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(0.0014120274287707404), + Geo3DUtil.fromDegrees(0.0010780690991920934))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(2.8E-322), Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-3.4191957123300967E-4), + Geo3DUtil.fromDegrees(7.486881020702663E-4))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.0032334745041058812), + Geo3DUtil.fromDegrees(-8.981008225032098E-4))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(1.413E-321 2.104316138623836E-4) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(2.104316138623836E-4), Geo3DUtil.fromDegrees(1.413E-321)); + // POINT(1.413E-321 2.104316138623836E-4) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(2.104316138623836E-4), + Geo3DUtil.fromDegrees(1.413E-321)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test public void testLUCENE8266_case1() { - //POLYGON((-6.35093158794635E-11 -4.965517818537545E-11,0.0 3.113E-321,-60.23538585411111 18.46706692248612, 162.37100340450482 -25.988383239097754,-6.35093158794635E-11 -4.965517818537545E-11)) + // POLYGON((-6.35093158794635E-11 -4.965517818537545E-11,0.0 3.113E-321,-60.23538585411111 + // 18.46706692248612, 162.37100340450482 -25.988383239097754,-6.35093158794635E-11 + // -4.965517818537545E-11)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-4.965517818537545E-11), Geo3DUtil.fromDegrees(-6.35093158794635E-11))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(3.113E-321), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(18.46706692248612), Geo3DUtil.fromDegrees(-60.23538585411111))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-25.988383239097754), Geo3DUtil.fromDegrees(162.37100340450482))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-4.965517818537545E-11), + Geo3DUtil.fromDegrees(-6.35093158794635E-11))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(3.113E-321), Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(18.46706692248612), + Geo3DUtil.fromDegrees(-60.23538585411111))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-25.988383239097754), + Geo3DUtil.fromDegrees(162.37100340450482))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(-179.99999999999974 2.4432260684194717E-11) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(2.4432260684194717E-11), Geo3DUtil.fromDegrees(-179.99999999999974)); + // POINT(-179.99999999999974 2.4432260684194717E-11) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(2.4432260684194717E-11), + Geo3DUtil.fromDegrees(-179.99999999999974)); assertFalse(polygon.isWithin(point)); assertFalse(largePolygon.isWithin(point)); } @Test public void testLUCENE8266_case2() { - //POLYGON((7.885596306952593 -42.25131029665893,1.5412637897085604 -6.829581354691802,34.03338913004999 27.583811665797796,0.0 5.7E-322,-8.854664233194431E-12 7.132883127401669E-11,-40.20723013296905 15.679563923063258,7.885596306952593 -42.25131029665893)) + // POLYGON((7.885596306952593 -42.25131029665893,1.5412637897085604 + // -6.829581354691802,34.03338913004999 27.583811665797796,0.0 5.7E-322,-8.854664233194431E-12 + // 7.132883127401669E-11,-40.20723013296905 15.679563923063258,7.885596306952593 + // -42.25131029665893)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-42.25131029665893), Geo3DUtil.fromDegrees(7.885596306952593))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-6.829581354691802), Geo3DUtil.fromDegrees(1.5412637897085604))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(27.583811665797796), Geo3DUtil.fromDegrees(34.03338913004999))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(5.7E-322), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(7.132883127401669E-11), Geo3DUtil.fromDegrees( -8.854664233194431E-12))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(15.679563923063258), Geo3DUtil.fromDegrees(-40.20723013296905))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-42.25131029665893), + Geo3DUtil.fromDegrees(7.885596306952593))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-6.829581354691802), + Geo3DUtil.fromDegrees(1.5412637897085604))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(27.583811665797796), + Geo3DUtil.fromDegrees(34.03338913004999))); + points.add( + new GeoPoint( + PlanetModel.WGS84, Geo3DUtil.fromDegrees(5.7E-322), Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(7.132883127401669E-11), + Geo3DUtil.fromDegrees(-8.854664233194431E-12))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(15.679563923063258), + Geo3DUtil.fromDegrees(-40.20723013296905))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.WGS84, Collections.singletonList(description)); - //POINT(-179.99999999999983 -8.474427850967216E-12) - final GeoPoint point = new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-8.474427850967216E-12), Geo3DUtil.fromDegrees(-179.99999999999983)); + // POINT(-179.99999999999983 -8.474427850967216E-12) + final GeoPoint point = + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-8.474427850967216E-12), + Geo3DUtil.fromDegrees(-179.99999999999983)); assertFalse(polygon.isWithin(point)); assertFalse(largePolygon.isWithin(point)); } @Test public void testLUCENE8266_case3() { - //POLYGON((-98.38897266664411 7.286530349760722,-169.07259176302364 -7.410435277740526,8E-123,-179.9999999999438 -1.298973436027626E-10,66.2759716901292 -52.84327866278771,-98.38897266664411 7.286530349760722)) + // POLYGON((-98.38897266664411 7.286530349760722,-169.07259176302364 + // -7.410435277740526,8E-123,-179.9999999999438 -1.298973436027626E-10,66.2759716901292 + // -52.84327866278771,-98.38897266664411 7.286530349760722)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(7.286530349760722), Geo3DUtil.fromDegrees(-98.38897266664411))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-7.410435277740526), Geo3DUtil.fromDegrees(-169.07259176302364))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-8.136646215781618E-123), Geo3DUtil.fromDegrees(-180.0))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-1.298973436027626E-10), Geo3DUtil.fromDegrees(-179.9999999999438))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-52.84327866278771), Geo3DUtil.fromDegrees(66.2759716901292))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(7.286530349760722), + Geo3DUtil.fromDegrees(-98.38897266664411))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-7.410435277740526), + Geo3DUtil.fromDegrees(-169.07259176302364))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-8.136646215781618E-123), + Geo3DUtil.fromDegrees(-180.0))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-1.298973436027626E-10), + Geo3DUtil.fromDegrees(-179.9999999999438))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-52.84327866278771), + Geo3DUtil.fromDegrees(66.2759716901292))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.WGS84, Collections.singletonList(description)); - //POINT(3.4279315107728157E-122 2.694960611439045E-11) - final GeoPoint point = new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(2.694960611439045E-11), Geo3DUtil.fromDegrees(3.4279315107728157E-122)); + // POINT(3.4279315107728157E-122 2.694960611439045E-11) + final GeoPoint point = + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(2.694960611439045E-11), + Geo3DUtil.fromDegrees(3.4279315107728157E-122)); assertFalse(polygon.isWithin(point)); assertFalse(largePolygon.isWithin(point)); } @Test public void testLUCENE8276_case1() { - //POLYGON((1.0517792672527197E-4 -1.592702733911458E-5,1.0324192726355287E-4 2.5741558803919037E-5,7.879018764391666E-5 7.192932029677136E-5,0.0 9.400459451570553E-24,3.50020551583809E-5 -6.508699856255637E-5,1.0517792672527197E-4 -1.592702733911458E-5)) + // POLYGON((1.0517792672527197E-4 -1.592702733911458E-5,1.0324192726355287E-4 + // 2.5741558803919037E-5,7.879018764391666E-5 7.192932029677136E-5,0.0 + // 9.400459451570553E-24,3.50020551583809E-5 -6.508699856255637E-5,1.0517792672527197E-4 + // -1.592702733911458E-5)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-1.592702733911458E-5), Geo3DUtil.fromDegrees(1.0517792672527197E-4))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(2.5741558803919037E-5), Geo3DUtil.fromDegrees(1.0324192726355287E-4))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(7.192932029677136E-5), Geo3DUtil.fromDegrees(7.879018764391666E-5))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(9.400459451570553E-24), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-6.508699856255637E-5), Geo3DUtil.fromDegrees(3.50020551583809E-5))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-1.592702733911458E-5), + Geo3DUtil.fromDegrees(1.0517792672527197E-4))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(2.5741558803919037E-5), + Geo3DUtil.fromDegrees(1.0324192726355287E-4))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(7.192932029677136E-5), + Geo3DUtil.fromDegrees(7.879018764391666E-5))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(9.400459451570553E-24), + Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-6.508699856255637E-5), + Geo3DUtil.fromDegrees(3.50020551583809E-5))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(-1.13E-321 2.83E-321) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-1.13E-321), Geo3DUtil.fromDegrees(-1.13E-321)); + // POINT(-1.13E-321 2.83E-321) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-1.13E-321), + Geo3DUtil.fromDegrees(-1.13E-321)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test public void testLUCENE8276_case2() { - //POLYGON((0.05925400271049228 -0.08922986460239596,0.07309863706879852 -0.07813330646578831,0.07411491387725304 -0.07715685640120272,0.0 -2.8E-322,-0.005013788374470427 0.06774540608427036,-0.09349862417147398 0.051577774969906794,-0.10359306491815146 -0.02537375818592368,0.05925400271049228 -0.08922986460239596)) + // POLYGON((0.05925400271049228 -0.08922986460239596,0.07309863706879852 + // -0.07813330646578831,0.07411491387725304 -0.07715685640120272,0.0 + // -2.8E-322,-0.005013788374470427 0.06774540608427036,-0.09349862417147398 + // 0.051577774969906794,-0.10359306491815146 -0.02537375818592368,0.05925400271049228 + // -0.08922986460239596)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.08922986460239596), Geo3DUtil.fromDegrees(0.05925400271049228))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.07813330646578831), Geo3DUtil.fromDegrees(0.07309863706879852))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.07715685640120272), Geo3DUtil.fromDegrees(0.07411491387725304))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-2.8E-322), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(0.06774540608427036), Geo3DUtil.fromDegrees(-0.005013788374470427))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(0.051577774969906794), Geo3DUtil.fromDegrees(-0.09349862417147398))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.02537375818592368), Geo3DUtil.fromDegrees(-0.10359306491815146))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.08922986460239596), + Geo3DUtil.fromDegrees(0.05925400271049228))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.07813330646578831), + Geo3DUtil.fromDegrees(0.07309863706879852))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.07715685640120272), + Geo3DUtil.fromDegrees(0.07411491387725304))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-2.8E-322), Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(0.06774540608427036), + Geo3DUtil.fromDegrees(-0.005013788374470427))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(0.051577774969906794), + Geo3DUtil.fromDegrees(-0.09349862417147398))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.02537375818592368), + Geo3DUtil.fromDegrees(-0.10359306491815146))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(9.020991048228685E-4 -2.5357127427108625E-98) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-2.5357127427108625E-98), Geo3DUtil.fromDegrees(9.020991048228685E-4)); + // POINT(9.020991048228685E-4 -2.5357127427108625E-98) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-2.5357127427108625E-98), + Geo3DUtil.fromDegrees(9.020991048228685E-4)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8276") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8276") public void testLUCENE8276_case3() { - //POLYGON((2.693381024483753E-4 -0.001073608118084019,1.5848404608659423E-4 -2.6378130512803985E-4,8.981079660799132E-4 -6.4697719116416E-4,-7.934854852157693E-5 4.193687767358618E-4,0.0 8.013660459916381E-131,-3.968797970346633E-4 3.2057826073172334E-4,2.693381024483753E-4 -0.001073608118084019)) + // POLYGON((2.693381024483753E-4 -0.001073608118084019,1.5848404608659423E-4 + // -2.6378130512803985E-4,8.981079660799132E-4 -6.4697719116416E-4,-7.934854852157693E-5 + // 4.193687767358618E-4,0.0 8.013660459916381E-131,-3.968797970346633E-4 + // 3.2057826073172334E-4,2.693381024483753E-4 -0.001073608118084019)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.001073608118084019), Geo3DUtil.fromDegrees(2.693381024483753E-4))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-2.6378130512803985E-4), Geo3DUtil.fromDegrees(1.5848404608659423E-4))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-6.4697719116416E-4), Geo3DUtil.fromDegrees(8.981079660799132E-4))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(4.193687767358618E-4), Geo3DUtil.fromDegrees(-7.934854852157693E-5))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(8.013660459916381E-131), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(3.2057826073172334E-4), Geo3DUtil.fromDegrees(-3.968797970346633E-4))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-0.001073608118084019), + Geo3DUtil.fromDegrees(2.693381024483753E-4))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-2.6378130512803985E-4), + Geo3DUtil.fromDegrees(1.5848404608659423E-4))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-6.4697719116416E-4), + Geo3DUtil.fromDegrees(8.981079660799132E-4))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(4.193687767358618E-4), + Geo3DUtil.fromDegrees(-7.934854852157693E-5))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(8.013660459916381E-131), + Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(3.2057826073172334E-4), + Geo3DUtil.fromDegrees(-3.968797970346633E-4))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(-2.394808631784144E-4 5.7E-322) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(5.7E-322), Geo3DUtil.fromDegrees(-2.394808631784144E-4)); + // POINT(-2.394808631784144E-4 5.7E-322) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(5.7E-322), + Geo3DUtil.fromDegrees(-2.394808631784144E-4)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } - + @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8281") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8281") public void testLUCENE8281() { /* - [junit4] > Standard polygon: GeoCompositePolygon: {[GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[[lat=-3.89514302068452E-6, lon=6.597839410815709E-6([X=1.0011188539630433, Y=6.605221429683868E-6, Z=-3.89950111699443E-6])], [lat=-2.8213942160840002E-6, lon=1.608008770581648E-5([X=1.0011188538590383, Y=1.60980789753873E-5, Z=-2.8245509442632E-6])], [lat=3.8977187534179774E-6, lon=1.9713406091526053E-5([X=1.0011188537902969, Y=1.973546251320774E-5, Z=3.902079731596721E-6])], [lat=1.980614928404974E-5, lon=4.069266235973146E-6([X=1.0011188537865057, Y=4.07381914993205E-6, Z=1.982830947192924E-5])], [lat=7.4E-323, lon=0.0([X=1.0011188539924791, Y=0.0, Z=7.4E-323])]], internalEdges={4}}, GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[[lat=-3.89514302068452E-6, lon=6.597839410815709E-6([X=1.0011188539630433, Y=6.605221429683868E-6, Z=-3.89950111699443E-6])], [lat=7.4E-323, lon=0.0([X=1.0011188539924791, Y=0.0, Z=7.4E-323])], [lat=-1.261719663233924E-5, lon=-1.5701544210600105E-5([X=1.001118853788849, Y=-1.5719111944122703E-5, Z=-1.2631313432823314E-5])]], internalEdges={0}}]} - [junit4] > Large polygon: GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=d8738cf, testPoint=[lat=7.28355694648262E-7, lon=5.126509206005681E-6([X=1.0011188539790565, Y=5.13224502127445E-6, Z=7.291706183250984E-7])], testPointInSet=true, shapes={ {[lat=-1.261719663233924E-5, lon=-1.5701544210600105E-5([X=1.001118853788849, Y=-1.5719111944122703E-5, Z=-1.2631313432823314E-5])], [lat=-3.89514302068452E-6, lon=6.597839410815709E-6([X=1.0011188539630433, Y=6.605221429683868E-6, Z=-3.89950111699443E-6])], [lat=-2.8213942160840002E-6, lon=1.608008770581648E-5([X=1.0011188538590383, Y=1.60980789753873E-5, Z=-2.8245509442632E-6])], [lat=3.8977187534179774E-6, lon=1.9713406091526053E-5([X=1.0011188537902969, Y=1.973546251320774E-5, Z=3.902079731596721E-6])], [lat=1.980614928404974E-5, lon=4.069266235973146E-6([X=1.0011188537865057, Y=4.07381914993205E-6, Z=1.982830947192924E-5])], [lat=7.4E-323, lon=0.0([X=1.0011188539924791, Y=0.0, Z=7.4E-323])]}} - [junit4] > Point: [lat=4.983019447098944E-6, lon=-3.0E-323([X=1.0011188539799663, Y=-3.0E-323, Z=4.98859471828087E-6])] - [junit4] > WKT: POLYGON(( - 3.7802835214482185E-4 -2.2317525568506174E-4, - 9.213211597434869E-4 -1.6165398092423463E-4, - 0.0011294949688719308 2.233228342998425E-4, - 2.3315178103634778E-4 0.0011348087623821073, - 0.0 4.244E-321, - -8.996322151054578E-4 -7.22912116319714E-4, - 3.7802835214482185E-4 -2.2317525568506174E-4)) - [junit4] > WKT: POINT(-1.7E-321 2.855059835503825E-4) - */ + [junit4] > Standard polygon: GeoCompositePolygon: {[GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[[lat=-3.89514302068452E-6, lon=6.597839410815709E-6([X=1.0011188539630433, Y=6.605221429683868E-6, Z=-3.89950111699443E-6])], [lat=-2.8213942160840002E-6, lon=1.608008770581648E-5([X=1.0011188538590383, Y=1.60980789753873E-5, Z=-2.8245509442632E-6])], [lat=3.8977187534179774E-6, lon=1.9713406091526053E-5([X=1.0011188537902969, Y=1.973546251320774E-5, Z=3.902079731596721E-6])], [lat=1.980614928404974E-5, lon=4.069266235973146E-6([X=1.0011188537865057, Y=4.07381914993205E-6, Z=1.982830947192924E-5])], [lat=7.4E-323, lon=0.0([X=1.0011188539924791, Y=0.0, Z=7.4E-323])]], internalEdges={4}}, GeoConvexPolygon: {planetmodel=PlanetModel.WGS84, points=[[lat=-3.89514302068452E-6, lon=6.597839410815709E-6([X=1.0011188539630433, Y=6.605221429683868E-6, Z=-3.89950111699443E-6])], [lat=7.4E-323, lon=0.0([X=1.0011188539924791, Y=0.0, Z=7.4E-323])], [lat=-1.261719663233924E-5, lon=-1.5701544210600105E-5([X=1.001118853788849, Y=-1.5719111944122703E-5, Z=-1.2631313432823314E-5])]], internalEdges={0}}]} + [junit4] > Large polygon: GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=d8738cf, testPoint=[lat=7.28355694648262E-7, lon=5.126509206005681E-6([X=1.0011188539790565, Y=5.13224502127445E-6, Z=7.291706183250984E-7])], testPointInSet=true, shapes={ {[lat=-1.261719663233924E-5, lon=-1.5701544210600105E-5([X=1.001118853788849, Y=-1.5719111944122703E-5, Z=-1.2631313432823314E-5])], [lat=-3.89514302068452E-6, lon=6.597839410815709E-6([X=1.0011188539630433, Y=6.605221429683868E-6, Z=-3.89950111699443E-6])], [lat=-2.8213942160840002E-6, lon=1.608008770581648E-5([X=1.0011188538590383, Y=1.60980789753873E-5, Z=-2.8245509442632E-6])], [lat=3.8977187534179774E-6, lon=1.9713406091526053E-5([X=1.0011188537902969, Y=1.973546251320774E-5, Z=3.902079731596721E-6])], [lat=1.980614928404974E-5, lon=4.069266235973146E-6([X=1.0011188537865057, Y=4.07381914993205E-6, Z=1.982830947192924E-5])], [lat=7.4E-323, lon=0.0([X=1.0011188539924791, Y=0.0, Z=7.4E-323])]}} + [junit4] > Point: [lat=4.983019447098944E-6, lon=-3.0E-323([X=1.0011188539799663, Y=-3.0E-323, Z=4.98859471828087E-6])] + [junit4] > WKT: POLYGON(( + 3.7802835214482185E-4 -2.2317525568506174E-4, + 9.213211597434869E-4 -1.6165398092423463E-4, + 0.0011294949688719308 2.233228342998425E-4, + 2.3315178103634778E-4 0.0011348087623821073, + 0.0 4.244E-321, + -8.996322151054578E-4 -7.22912116319714E-4, + 3.7802835214482185E-4 -2.2317525568506174E-4)) + [junit4] > WKT: POINT(-1.7E-321 2.855059835503825E-4) + */ final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-2.2317525568506174E-4), Geo3DUtil.fromDegrees(3.7802835214482185E-4))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-1.6165398092423463E-4), Geo3DUtil.fromDegrees(9.213211597434869E-4))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(2.233228342998425E-4), Geo3DUtil.fromDegrees(0.0011294949688719308))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(0.0011348087623821073), Geo3DUtil.fromDegrees(2.3315178103634778E-4))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(4.244E-321), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-7.22912116319714E-4), Geo3DUtil.fromDegrees(-8.996322151054578E-4))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-2.2317525568506174E-4), Geo3DUtil.fromDegrees(3.7802835214482185E-4))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-2.2317525568506174E-4), + Geo3DUtil.fromDegrees(3.7802835214482185E-4))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-1.6165398092423463E-4), + Geo3DUtil.fromDegrees(9.213211597434869E-4))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(2.233228342998425E-4), + Geo3DUtil.fromDegrees(0.0011294949688719308))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(0.0011348087623821073), + Geo3DUtil.fromDegrees(2.3315178103634778E-4))); + points.add( + new GeoPoint( + PlanetModel.WGS84, Geo3DUtil.fromDegrees(4.244E-321), Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-7.22912116319714E-4), + Geo3DUtil.fromDegrees(-8.996322151054578E-4))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-2.2317525568506174E-4), + Geo3DUtil.fromDegrees(3.7802835214482185E-4))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.WGS84, Collections.singletonList(description)); - //POINT(-2.394808631784144E-4 5.7E-322) - final GeoPoint point = new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(2.855059835503825E-4), Geo3DUtil.fromDegrees(-1.7E-321)); + // POINT(-2.394808631784144E-4 5.7E-322) + final GeoPoint point = + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(2.855059835503825E-4), + Geo3DUtil.fromDegrees(-1.7E-321)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); - } - + @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8280") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8280") public void testLUCENE8280() { /* - [junit4] 1> unquantized=[lat=0.16367268756896675, lon=-3.141592653589793([X=-0.9876510422569805, Y=-1.2095236875745584E-16, Z=0.16311061810965483])] - [junit4] 1> quantized=[X=-0.9876510423773649, Y=-2.3309121299774915E-10, Z=0.16311061829120332] - [junit4] 1> shape=GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=7fb785c7, - testPoint=[lat=-1.3164421003439726, lon=-0.3852878798825553([X=0.23270178206383424, Y=-0.09437388649617809, Z=-0.9658649833483698])], testPointInSet=true, - shapes={ { - [lat=-0.914670478121684, lon=2.4457272005608357E-47([X=0.609446252447186, Y=1.4905392768899487E-47, Z=-0.7915752112532345])], - [lat=-0.737919215699403, lon=-1.0814374159521924([X=0.34764272191418555, Y=-0.6527705659008658, Z=-0.6724777381306498])], - [lat=-0.2581712131420987, lon=-3.141592653589793([X=-0.9677277372221494, Y=-1.1851246758352164E-16, Z=-0.2555423342455023])], - [lat=-0.40516490647074055, lon=2.4457272005608357E-47([X=0.919584346757591, Y=2.2490524500750083E-47, Z=-0.39440489992508504])], - [lat=2.4457272005608357E-47, lon=-0.6244585784444767([X=0.8121874885299789, Y=-0.5853122613567737, Z=2.448463612203698E-47])]}} - [junit4] 1> bounds=XYZBounds: [xmin=-1.0011188549924792 xmax=1.0011188549924792 ymin=-0.6616249691360604 ymax=1.0E-9 zmin=-0.9977622930221051 zmax=1.0E-9] - */ + [junit4] 1> unquantized=[lat=0.16367268756896675, lon=-3.141592653589793([X=-0.9876510422569805, Y=-1.2095236875745584E-16, Z=0.16311061810965483])] + [junit4] 1> quantized=[X=-0.9876510423773649, Y=-2.3309121299774915E-10, Z=0.16311061829120332] + [junit4] 1> shape=GeoComplexPolygon: {planetmodel=PlanetModel.WGS84, number of shapes=1, address=7fb785c7, + testPoint=[lat=-1.3164421003439726, lon=-0.3852878798825553([X=0.23270178206383424, Y=-0.09437388649617809, Z=-0.9658649833483698])], testPointInSet=true, + shapes={ { + [lat=-0.914670478121684, lon=2.4457272005608357E-47([X=0.609446252447186, Y=1.4905392768899487E-47, Z=-0.7915752112532345])], + [lat=-0.737919215699403, lon=-1.0814374159521924([X=0.34764272191418555, Y=-0.6527705659008658, Z=-0.6724777381306498])], + [lat=-0.2581712131420987, lon=-3.141592653589793([X=-0.9677277372221494, Y=-1.1851246758352164E-16, Z=-0.2555423342455023])], + [lat=-0.40516490647074055, lon=2.4457272005608357E-47([X=0.919584346757591, Y=2.2490524500750083E-47, Z=-0.39440489992508504])], + [lat=2.4457272005608357E-47, lon=-0.6244585784444767([X=0.8121874885299789, Y=-0.5853122613567737, Z=2.448463612203698E-47])]}} + [junit4] 1> bounds=XYZBounds: [xmin=-1.0011188549924792 xmax=1.0011188549924792 ymin=-0.6616249691360604 ymax=1.0E-9 zmin=-0.9977622930221051 zmax=1.0E-9] + */ final List points = new ArrayList<>(); points.add(new GeoPoint(PlanetModel.WGS84, -0.914670478121684, 2.4457272005608357E-47)); points.add(new GeoPoint(PlanetModel.WGS84, -0.737919215699403, -1.0814374159521924)); points.add(new GeoPoint(PlanetModel.WGS84, -0.2581712131420987, -3.141592653589793)); points.add(new GeoPoint(PlanetModel.WGS84, -0.40516490647074055, 2.4457272005608357E-47)); points.add(new GeoPoint(PlanetModel.WGS84, 2.4457272005608357E-47, -0.6244585784444767)); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); - // I think this polygon may cross itself around lat=-0.91, lon=0. If so, this is an invalid test. - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, Collections.singletonList(description)); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); + // I think this polygon may cross itself around lat=-0.91, lon=0. If so, this is an invalid + // test. + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.WGS84, Collections.singletonList(description)); final GeoPoint point = new GeoPoint(PlanetModel.WGS84, 0.16367268756896675, -3.141592653589793); assertFalse(largePolygon.isWithin(point)); @@ -1804,129 +2631,251 @@ shape: /* Confirmed that bounds is OK final XYZBounds xyzBounds = new XYZBounds(); largePolygon.getBounds(xyzBounds); - + System.out.println("North pole is within? "+largePolygon.isWithin(PlanetModel.WGS84.NORTH_POLE)); System.out.println("South pole is within? "+largePolygon.isWithin(PlanetModel.WGS84.SOUTH_POLE)); - + final XYZSolid xyzSolid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, xyzBounds); // Failure is due either to bounds computation or multiple points having their in-set status wrongly assessed. // Probably it is the former because there are more than a dozen points that otherwise fail to be correct. assertTrue(largePolygon.isWithin(point)?xyzSolid.isWithin(point):true); */ - + } - + @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8337") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8337") public void testLUCENE8337() { /* - {planetmodel=PlanetModel.WGS84, number of shapes=1, address=c865f21d, - testPoint=[lat=2.114284741800425E-5, lon=-3.141516973708951([X=-1.0011188509002849, Y=-7.57645554894811E-5, Z=2.1166503175641402E-5])], testPointInSet=true, shapes={ -{[lat=1.4379241972924144E-5, lon=-3.141520309370815([X=-1.0011188512685139, Y=-7.24251615257059E-5, Z=1.4395330244708275E-5])], -[lat=-1.858900171939205E-5, lon=-3.1415059739464217([X=-1.001118850057461, Y=-8.677662511280753E-5, Z=-1.860980009708855E-5])], -[lat=2.7071641284581073E-5, lon=-3.141469177092562([X=-1.001118845991408, Y=-1.2361464904363391E-4, Z=2.7101930495137982E-5])], -[lat=8.285235549000288E-5, lon=-3.1414967545451287([X=-1.0011188459297669, Y=-9.600634121467467E-5, Z=8.29450550819143E-5])], -[lat=-8.956596846349593E-303, lon=-3.1415926535897922([X=-1.0011188539924791, Y=-1.0117738616818362E-15, Z=-8.966617970490158E-303])]}} + {planetmodel=PlanetModel.WGS84, number of shapes=1, address=c865f21d, + testPoint=[lat=2.114284741800425E-5, lon=-3.141516973708951([X=-1.0011188509002849, Y=-7.57645554894811E-5, Z=2.1166503175641402E-5])], testPointInSet=true, shapes={ + {[lat=1.4379241972924144E-5, lon=-3.141520309370815([X=-1.0011188512685139, Y=-7.24251615257059E-5, Z=1.4395330244708275E-5])], + [lat=-1.858900171939205E-5, lon=-3.1415059739464217([X=-1.001118850057461, Y=-8.677662511280753E-5, Z=-1.860980009708855E-5])], + [lat=2.7071641284581073E-5, lon=-3.141469177092562([X=-1.001118845991408, Y=-1.2361464904363391E-4, Z=2.7101930495137982E-5])], + [lat=8.285235549000288E-5, lon=-3.1414967545451287([X=-1.0011188459297669, Y=-9.600634121467467E-5, Z=8.29450550819143E-5])], + [lat=-8.956596846349593E-303, lon=-3.1415926535897922([X=-1.0011188539924791, Y=-1.0117738616818362E-15, Z=-8.966617970490158E-303])]}} + + [junit4] > Point: [lat=-6.499661194605612E-10, lon=-2.0286460544410216([X=-0.4425148814082194, Y=-0.8980086522698344, Z=-6.506933366482957E-10])] + */ - [junit4] > Point: [lat=-6.499661194605612E-10, lon=-2.0286460544410216([X=-0.4425148814082194, Y=-0.8980086522698344, Z=-6.506933366482957E-10])] - */ - final List points = new ArrayList<>(); points.add(new GeoPoint(PlanetModel.WGS84, 1.4379241972924144E-5, -3.141520309370815)); points.add(new GeoPoint(PlanetModel.WGS84, -1.858900171939205E-5, -3.1415059739464217)); points.add(new GeoPoint(PlanetModel.WGS84, 2.7071641284581073E-5, -3.141469177092562)); points.add(new GeoPoint(PlanetModel.WGS84, 8.285235549000288E-5, -3.1414967545451287)); points.add(new GeoPoint(PlanetModel.WGS84, -8.956596846349593E-303, -3.1415926535897922)); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); + + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.WGS84, Collections.singletonList(description)); + final GeoPolygon smallPolygon = + GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description); + + final GeoPoint thePoint = + new GeoPoint(PlanetModel.WGS84, -6.499661194605612E-10, -2.0286460544410216); + + System.out.println("large inset: " + largePolygon.isWithin(thePoint)); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, Collections.singletonList(description)); - final GeoPolygon smallPolygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description); - - final GeoPoint thePoint = new GeoPoint(PlanetModel.WGS84, -6.499661194605612E-10, -2.0286460544410216); - - System.out.println("large inset: "+largePolygon.isWithin(thePoint)); - assertTrue(largePolygon.isWithin(thePoint) == smallPolygon.isWithin(thePoint)); - } @Test public void testLUCENE8444() { - //POLYGON((0.0 -67.68132244526963,-1.2477695347678826E-95 -88.11137674490907, 1.7059188343238906E-9 7.009654350320916,0.0 -67.68132244526963)) + // POLYGON((0.0 -67.68132244526963,-1.2477695347678826E-95 -88.11137674490907, + // 1.7059188343238906E-9 7.009654350320916,0.0 -67.68132244526963)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-67.68132244526963), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-88.11137674490907), Geo3DUtil.fromDegrees(-1.2477695347678826E-95))); - points.add(new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(7.009654350320916), Geo3DUtil.fromDegrees(1.7059188343238906E-9))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-67.68132244526963), + Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-88.11137674490907), + Geo3DUtil.fromDegrees(-1.2477695347678826E-95))); + points.add( + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(7.009654350320916), + Geo3DUtil.fromDegrees(1.7059188343238906E-9))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.WGS84, Collections.singletonList(description)); - //POINT(180.0 -61.73229670194638) - final GeoPoint point = new GeoPoint(PlanetModel.WGS84, Geo3DUtil.fromDegrees(-61.73229670194638), Geo3DUtil.fromDegrees(180.0)); - //assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); + // POINT(180.0 -61.73229670194638) + final GeoPoint point = + new GeoPoint( + PlanetModel.WGS84, + Geo3DUtil.fromDegrees(-61.73229670194638), + Geo3DUtil.fromDegrees(180.0)); + // assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); assertTrue(false == largePolygon.isWithin(point)); } @Test public void testLUCENE8445() { - //POLYGON((32.18017946378854 -17.397683785381247,49.51018758330871 -9.870219317504647,58.77903721991479 33.90553510354402,2.640604559432277 9.363173880050821,3.1673235739886286E-10 8.853669066894417E-11,0.0 -5.7E-322,4.820339742500488E-5 5.99784517213369E-7,32.18017946378854 -17.397683785381247)) + // POLYGON((32.18017946378854 -17.397683785381247,49.51018758330871 + // -9.870219317504647,58.77903721991479 33.90553510354402,2.640604559432277 + // 9.363173880050821,3.1673235739886286E-10 8.853669066894417E-11,0.0 + // -5.7E-322,4.820339742500488E-5 5.99784517213369E-7,32.18017946378854 -17.397683785381247)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-17.397683785381247), Geo3DUtil.fromDegrees(32.18017946378854))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-9.870219317504647), Geo3DUtil.fromDegrees(49.51018758330871))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(33.90553510354402), Geo3DUtil.fromDegrees(58.77903721991479))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(9.363173880050821), Geo3DUtil.fromDegrees(2.640604559432277))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(8.853669066894417E-11), Geo3DUtil.fromDegrees(3.1673235739886286E-10))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.7E-322), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(5.99784517213369E-7), Geo3DUtil.fromDegrees(4.820339742500488E-5))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-17.397683785381247), + Geo3DUtil.fromDegrees(32.18017946378854))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-9.870219317504647), + Geo3DUtil.fromDegrees(49.51018758330871))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(33.90553510354402), + Geo3DUtil.fromDegrees(58.77903721991479))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(9.363173880050821), + Geo3DUtil.fromDegrees(2.640604559432277))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(8.853669066894417E-11), + Geo3DUtil.fromDegrees(3.1673235739886286E-10))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.7E-322), Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(5.99784517213369E-7), + Geo3DUtil.fromDegrees(4.820339742500488E-5))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(179.99999999999983 -5.021400461974724E-11) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.021400461974724E-11), Geo3DUtil.fromDegrees(179.99999999999983)); + // POINT(179.99999999999983 -5.021400461974724E-11) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.021400461974724E-11), + Geo3DUtil.fromDegrees(179.99999999999983)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); } @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8451") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8451") public void testLUCENE8451() { - //POLYGON((-2.5185339401969213 -24.093993739745027,0.0 8.828539494442529E-27,5.495998489568957E-11 -8.321407453133E-11,2.7174659198424288E-11 1.0260761462208114E-10,88.32137548549387 16.934529875343244,-87.97237709688223 39.919704493657484,-88.0876897472551 34.91204903885665,-2.5185339401969213 -24.093993739745027)) + // POLYGON((-2.5185339401969213 -24.093993739745027,0.0 + // 8.828539494442529E-27,5.495998489568957E-11 -8.321407453133E-11,2.7174659198424288E-11 + // 1.0260761462208114E-10,88.32137548549387 16.934529875343244,-87.97237709688223 + // 39.919704493657484,-88.0876897472551 34.91204903885665,-2.5185339401969213 + // -24.093993739745027)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-24.093993739745027), Geo3DUtil.fromDegrees(-2.5185339401969213))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(8.828539494442529E-27), Geo3DUtil.fromDegrees(0.0))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-8.321407453133E-11), Geo3DUtil.fromDegrees(5.495998489568957E-11))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(1.0260761462208114E-10), Geo3DUtil.fromDegrees(2.7174659198424288E-11))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(16.934529875343244), Geo3DUtil.fromDegrees(88.32137548549387))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(39.919704493657484), Geo3DUtil.fromDegrees(-87.97237709688223))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(34.91204903885665), Geo3DUtil.fromDegrees(-88.0876897472551))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-24.093993739745027), + Geo3DUtil.fromDegrees(-2.5185339401969213))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(8.828539494442529E-27), + Geo3DUtil.fromDegrees(0.0))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-8.321407453133E-11), + Geo3DUtil.fromDegrees(5.495998489568957E-11))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(1.0260761462208114E-10), + Geo3DUtil.fromDegrees(2.7174659198424288E-11))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(16.934529875343244), + Geo3DUtil.fromDegrees(88.32137548549387))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(39.919704493657484), + Geo3DUtil.fromDegrees(-87.97237709688223))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(34.91204903885665), + Geo3DUtil.fromDegrees(-88.0876897472551))); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(179.99999999999983 -5.021400461974724E-11) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.021400461974724E-11), Geo3DUtil.fromDegrees(179.99999999999983)); + // POINT(179.99999999999983 -5.021400461974724E-11) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.021400461974724E-11), + Geo3DUtil.fromDegrees(179.99999999999983)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); - } - - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8512") + + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8512") public void testLUCENE8512() { - //POLYGON((35.4190030282028 -67.85799140154762,35.420218772379776 -67.85786846162631,35.42021877254679 -67.85786846168897,35.420218772734266 -67.85786846168025,35.4190030282028 -67.85799140154762)) + // POLYGON((35.4190030282028 -67.85799140154762,35.420218772379776 + // -67.85786846162631,35.42021877254679 -67.85786846168897,35.420218772734266 + // -67.85786846168025,35.4190030282028 -67.85799140154762)) final List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-67.85799140154762), Geo3DUtil.fromDegrees(35.4190030282028))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-67.85786846162631), Geo3DUtil.fromDegrees(35.420218772379776))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-67.85786846168897), Geo3DUtil.fromDegrees(35.42021877254679))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-67.85786846168025), Geo3DUtil.fromDegrees(35.420218772734266))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-67.85799140154762), + Geo3DUtil.fromDegrees(35.4190030282028))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-67.85786846162631), + Geo3DUtil.fromDegrees(35.420218772379776))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-67.85786846168897), + Geo3DUtil.fromDegrees(35.42021877254679))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-67.85786846168025), + Geo3DUtil.fromDegrees(35.420218772734266))); - final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points); + final GeoPolygonFactory.PolygonDescription description = + new GeoPolygonFactory.PolygonDescription(points); final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description); - final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description)); + final GeoPolygon largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + PlanetModel.SPHERE, Collections.singletonList(description)); - //POINT(179.99999999999983 -5.021400461974724E-11) - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.021400461974724E-11), Geo3DUtil.fromDegrees(179.99999999999983)); + // POINT(179.99999999999983 -5.021400461974724E-11) + final GeoPoint point = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.021400461974724E-11), + Geo3DUtil.fromDegrees(179.99999999999983)); assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point)); - } } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestPlane.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestPlane.java index 3d02f20e57f..93d518404c2 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestPlane.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestPlane.java @@ -16,15 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; -import org.junit.Test; - +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; -/** - * Test basic plane functionality. - */ +import org.junit.Test; + +/** Test basic plane functionality. */ public class TestPlane { @Test @@ -45,9 +43,9 @@ public class TestPlane { @Test public void testIdenticalVector() { - final Vector v1 = new Vector(1, 0 , 0); - final Vector v2 = new Vector(1, 0 , 0); - final Vector v3 = new Vector(-1, 0 , 0); + final Vector v1 = new Vector(1, 0, 0); + final Vector v2 = new Vector(1, 0, 0); + final Vector v3 = new Vector(-1, 0, 0); assertTrue(v1.isNumericallyIdentical(v2)); assertFalse(v1.isNumericallyIdentical(v3)); } @@ -57,19 +55,22 @@ public class TestPlane { // [X=0.35168818443386646, Y=-0.19637966197066342, Z=0.9152870857244183], // [X=0.5003343189532654, Y=0.522128543226148, Z=0.6906861469771293], - final GeoPoint start = new GeoPoint(0.35168818443386646, -0.19637966197066342, 0.9152870857244183); + final GeoPoint start = + new GeoPoint(0.35168818443386646, -0.19637966197066342, 0.9152870857244183); final GeoPoint end = new GeoPoint(0.5003343189532654, 0.522128543226148, 0.6906861469771293); - // [A=-0.6135342247741855, B=0.21504338363863665, C=0.28188192383666794, D=0.0, side=-1.0] internal? false; + // [A=-0.6135342247741855, B=0.21504338363863665, C=0.28188192383666794, D=0.0, side=-1.0] + // internal? false; final Plane p = new Plane(-0.6135342247741855, 0.21504338363863665, 0.28188192383666794, 0.0); - final GeoPoint[] points = p.interpolate(PlanetModel.SPHERE, start, end, new double[]{0.25, 0.50, 0.75}); + final GeoPoint[] points = + p.interpolate(PlanetModel.SPHERE, start, end, new double[] {0.25, 0.50, 0.75}); for (GeoPoint point : points) { assertTrue(p.evaluateIsZero(point)); } } - + @Test public void testFindArcPoints() { // Create two points @@ -87,6 +88,4 @@ public class TestPlane { assertEquals(0.20, p1.arcDistance(newPoints[0]), 1e-6); assertEquals(0.20, p1.arcDistance(newPoints[1]), 1e-6); } - } - diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomBinaryCodec.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomBinaryCodec.java index 3169c3b967c..0bb14c5866f 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomBinaryCodec.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomBinaryCodec.java @@ -17,21 +17,18 @@ package org.apache.lucene.spatial3d.geom; +import com.carrotsearch.randomizedtesting.annotations.Repeat; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; - -import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.junit.Test; -/** - * Test to check Serialization - */ +/** Test to check Serialization */ public class TestRandomBinaryCodec extends RandomGeo3dShapeGenerator { @Test @Repeat(iterations = 10) - public void testRandomPointCodec() throws IOException{ + public void testRandomPointCodec() throws IOException { PlanetModel planetModel = randomPlanetModel(); GeoPoint shape = randomGeoPoint(planetModel); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); @@ -43,7 +40,7 @@ public class TestRandomBinaryCodec extends RandomGeo3dShapeGenerator { @Test @Repeat(iterations = 100) - public void testRandomPlanetObjectCodec() throws IOException{ + public void testRandomPlanetObjectCodec() throws IOException { PlanetModel planetModel = randomPlanetModel(); int type = randomShapeType(); GeoShape shape = randomGeoShape(type, planetModel); @@ -56,7 +53,7 @@ public class TestRandomBinaryCodec extends RandomGeo3dShapeGenerator { @Test @Repeat(iterations = 100) - public void testRandomShapeCodec() throws IOException{ + public void testRandomShapeCodec() throws IOException { PlanetModel planetModel = randomPlanetModel(); int type = randomShapeType(); GeoShape shape = randomGeoShape(type, planetModel); diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoPolygon.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoPolygon.java index 6d34a1bea46..74fcfee158e 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoPolygon.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoPolygon.java @@ -16,16 +16,13 @@ */ package org.apache.lucene.spatial3d.geom; +import com.carrotsearch.randomizedtesting.generators.BiasedNumbers; import java.util.ArrayList; import java.util.Collections; import java.util.List; - -import com.carrotsearch.randomizedtesting.generators.BiasedNumbers; import org.junit.Test; -/** - * Random test for polygons. - */ +/** Random test for polygons. */ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { @Test @@ -33,12 +30,13 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { final PlanetModel planetModel = randomPlanetModel(); final GeoPoint startPoint = randomGeoPoint(planetModel); double d = random().nextDouble(); - final double distanceSmall = d * 1e-9 + Vector.MINIMUM_ANGULAR_RESOLUTION; - final double distanceBig = d * 1e-7 + Vector.MINIMUM_ANGULAR_RESOLUTION ; - final double bearing = random().nextDouble() * Math.PI; - GeoPoint point1 = planetModel.surfacePointOnBearing(startPoint, distanceSmall, bearing*1.001); + final double distanceSmall = d * 1e-9 + Vector.MINIMUM_ANGULAR_RESOLUTION; + final double distanceBig = d * 1e-7 + Vector.MINIMUM_ANGULAR_RESOLUTION; + final double bearing = random().nextDouble() * Math.PI; + GeoPoint point1 = planetModel.surfacePointOnBearing(startPoint, distanceSmall, bearing * 1.001); GeoPoint point2 = planetModel.surfacePointOnBearing(startPoint, distanceBig, bearing); - GeoPoint point3 = planetModel.surfacePointOnBearing(startPoint, distanceBig, bearing - 0.5 * Math.PI); + GeoPoint point3 = + planetModel.surfacePointOnBearing(startPoint, distanceBig, bearing - 0.5 * Math.PI); List points = new ArrayList<>(); points.add(startPoint); points.add(point1); @@ -47,8 +45,7 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { try { GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, points); assertTrue(polygon != null); - } - catch(Exception e) { + } catch (Exception e) { fail(points.toString()); } } @@ -65,53 +62,59 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { points.add(point4); try { GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); - } - catch(Exception e) { + } catch (Exception e) { fail(points.toString()); } } @Test public void testCoplanarityTilePolygon() { - //POLYGON((-90.55764 -0.34907,-90.55751 -0.34868,-90.55777 -0.34842,-90.55815 -0.34766,-90.55943 -0.34842, -90.55918 -0.34842,-90.55764 -0.34907)) + // POLYGON((-90.55764 -0.34907,-90.55751 -0.34868,-90.55777 -0.34842,-90.55815 + // -0.34766,-90.55943 -0.34842, -90.55918 -0.34842,-90.55764 -0.34907)) List points = new ArrayList<>(); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34907), Geo3DUtil.fromDegrees(-90.55764))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34868), Geo3DUtil.fromDegrees(-90.55751))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34842), Geo3DUtil.fromDegrees(-90.55777))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34766), Geo3DUtil.fromDegrees(-90.55815))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34842), Geo3DUtil.fromDegrees(-90.55943))); - points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34842), Geo3DUtil.fromDegrees(-90.55918))); - GeoCompositePolygon polygon = (GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34907), Geo3DUtil.fromDegrees(-90.55764))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34868), Geo3DUtil.fromDegrees(-90.55751))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34842), Geo3DUtil.fromDegrees(-90.55777))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34766), Geo3DUtil.fromDegrees(-90.55815))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34842), Geo3DUtil.fromDegrees(-90.55943))); + points.add( + new GeoPoint( + PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.34842), Geo3DUtil.fromDegrees(-90.55918))); + GeoCompositePolygon polygon = + (GeoCompositePolygon) GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); assertTrue(polygon.size() == 3); } - /** - * Test comparing different polygon (Big) technologies using random - * biased doubles. - */ + /** Test comparing different polygon (Big) technologies using random biased doubles. */ @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8281") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8281") public void testCompareBigPolygons() { testComparePolygons(Math.PI); } - /** - * Test comparing different polygon (Small) technologies using random - * biased doubles. - */ + /** Test comparing different polygon (Small) technologies using random biased doubles. */ @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8281") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8281") public void testCompareSmallPolygons() { testComparePolygons(1e-4 * Math.PI); } - private void testComparePolygons(double limitDistance) { final PlanetModel planetModel = randomPlanetModel(); - //Create polygon points using a reference point and a maximum distance to the point + // Create polygon points using a reference point and a maximum distance to the point final GeoPoint referencePoint; if (random().nextBoolean()) { - referencePoint = getBiasedPoint(planetModel); + referencePoint = getBiasedPoint(planetModel); } else { referencePoint = randomGeoPoint(planetModel); } @@ -123,18 +126,22 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { do { final List points = new ArrayList<>(n); double maxDistance = random().nextDouble() * limitDistance; - //if distance is too small we can fail - //building the polygon. + // if distance is too small we can fail + // building the polygon. while (maxDistance < 1e-7) { maxDistance = random().nextDouble() * limitDistance; } for (int i = 0; i < n; i++) { while (true) { - final double distance = BiasedNumbers.randomDoubleBetween(random(), 0, maxDistance);// random().nextDouble() * maxDistance; + final double distance = + BiasedNumbers.randomDoubleBetween( + random(), 0, maxDistance); // random().nextDouble() * maxDistance; final double bearing = random().nextDouble() * 2 * Math.PI; final GeoPoint p = planetModel.surfacePointOnBearing(referencePoint, distance, bearing); if (!contains(p, points)) { - if (points.size() > 1 && Plane.arePointsCoplanar(points.get(points.size() - 1), points.get(points.size() - 2), p)) { + if (points.size() > 1 + && Plane.arePointsCoplanar( + points.get(points.size() - 1), points.get(points.size() - 2), p)) { continue; } points.add(p); @@ -142,17 +149,19 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { } } } - //order points so we don't get crossing edges + // order points so we don't get crossing edges orderedPoints = orderPoints(points); if (random().nextBoolean() && random().nextBoolean()) { Collections.reverse(orderedPoints); } - final GeoPolygonFactory.PolygonDescription polygonDescription = new GeoPolygonFactory.PolygonDescription(orderedPoints); + final GeoPolygonFactory.PolygonDescription polygonDescription = + new GeoPolygonFactory.PolygonDescription(orderedPoints); try { polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, polygonDescription); } catch (Exception e) { - final StringBuilder buffer = new StringBuilder("Polygon failed to build with an exception:\n"); + final StringBuilder buffer = + new StringBuilder("Polygon failed to build with an exception:\n"); buffer.append(points.toString() + "\n"); buffer.append("WKT:" + getWKT(orderedPoints)); buffer.append(e.toString()); @@ -165,9 +174,12 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { fail(buffer.toString()); } try { - largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(planetModel, Collections.singletonList(polygonDescription)); + largePolygon = + GeoPolygonFactory.makeLargeGeoPolygon( + planetModel, Collections.singletonList(polygonDescription)); } catch (Exception e) { - final StringBuilder buffer = new StringBuilder("Large polygon failed to build with an exception:\n"); + final StringBuilder buffer = + new StringBuilder("Large polygon failed to build with an exception:\n"); buffer.append(points.toString() + "\n"); buffer.append("WKT:" + getWKT(orderedPoints)); buffer.append(e.toString()); @@ -179,17 +191,23 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { buffer.append("WKT:" + getWKT(orderedPoints)); fail(buffer.toString()); } - } while(polygon.getClass().equals(largePolygon.getClass())); - //Some of these do not work but it seems it s from the way the point is created - //GeoPoint centerOfMass = getCenterOfMass(planetModel, orderedPoints); - //checkPoint(polygon, largePolygon, centerOfMass, orderedPoints); - //checkPoint(polygon, largePolygon, new GeoPoint(-centerOfMass.x, -centerOfMass.y, -centerOfMass.z), orderedPoints); - //checkPoint(polygon, largePolygon, new GeoPoint(centerOfMass.x, -centerOfMass.y, -centerOfMass.z), orderedPoints); - //checkPoint(polygon, largePolygon, new GeoPoint(centerOfMass.x, centerOfMass.y, -centerOfMass.z), orderedPoints); - //checkPoint(polygon, largePolygon, new GeoPoint(-centerOfMass.x, -centerOfMass.y, centerOfMass.z), orderedPoints); - //checkPoint(polygon, largePolygon, new GeoPoint(-centerOfMass.x, centerOfMass.y, -centerOfMass.z), orderedPoints); - //checkPoint(polygon, largePolygon, new GeoPoint(centerOfMass.x, -centerOfMass.y, centerOfMass.z), orderedPoints); - for(int i = 0; i < 100000; i++) { + } while (polygon.getClass().equals(largePolygon.getClass())); + // Some of these do not work but it seems it s from the way the point is created + // GeoPoint centerOfMass = getCenterOfMass(planetModel, orderedPoints); + // checkPoint(polygon, largePolygon, centerOfMass, orderedPoints); + // checkPoint(polygon, largePolygon, new GeoPoint(-centerOfMass.x, -centerOfMass.y, + // -centerOfMass.z), orderedPoints); + // checkPoint(polygon, largePolygon, new GeoPoint(centerOfMass.x, -centerOfMass.y, + // -centerOfMass.z), orderedPoints); + // checkPoint(polygon, largePolygon, new GeoPoint(centerOfMass.x, centerOfMass.y, + // -centerOfMass.z), orderedPoints); + // checkPoint(polygon, largePolygon, new GeoPoint(-centerOfMass.x, -centerOfMass.y, + // centerOfMass.z), orderedPoints); + // checkPoint(polygon, largePolygon, new GeoPoint(-centerOfMass.x, centerOfMass.y, + // -centerOfMass.z), orderedPoints); + // checkPoint(polygon, largePolygon, new GeoPoint(centerOfMass.x, -centerOfMass.y, + // centerOfMass.z), orderedPoints); + for (int i = 0; i < 100000; i++) { final GeoPoint point; if (random().nextBoolean()) { point = getBiasedPoint(planetModel); @@ -200,25 +218,34 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { } } - private void checkPoint(final GeoPolygon polygon, final GeoPolygon largePolygon, final GeoPoint point, final List orderedPoints) { + private void checkPoint( + final GeoPolygon polygon, + final GeoPolygon largePolygon, + final GeoPoint point, + final List orderedPoints) { final boolean withIn1 = polygon.isWithin(point); final boolean withIn2 = largePolygon.isWithin(point); StringBuilder buffer = new StringBuilder(); if (withIn1 != withIn2) { - //NOTE: Standard and large polygon are mathematically slightly different - //close to the edges (due to bounding planes). Nothing we can do about that - //so we filter the differences. + // NOTE: Standard and large polygon are mathematically slightly different + // close to the edges (due to bounding planes). Nothing we can do about that + // so we filter the differences. final double d1 = polygon.computeOutsideDistance(DistanceStyle.ARC, point); - final double d2 = largePolygon.computeOutsideDistance(DistanceStyle.ARC, point); + final double d2 = largePolygon.computeOutsideDistance(DistanceStyle.ARC, point); if (d1 == 0 && d2 == 0) { return; } - buffer = buffer.append("\nStandard polygon: " + polygon.toString() +"\n"); - buffer = buffer.append("\nLarge polygon: " + largePolygon.toString() +"\n"); - buffer = buffer.append("\nPoint: " + point.toString() +"\n"); + buffer = buffer.append("\nStandard polygon: " + polygon.toString() + "\n"); + buffer = buffer.append("\nLarge polygon: " + largePolygon.toString() + "\n"); + buffer = buffer.append("\nPoint: " + point.toString() + "\n"); buffer.append("\nWKT: " + getWKT(orderedPoints)); - buffer.append("\nWKT: POINT(" + Geo3DUtil.toDegrees(point.getLongitude()) + " " + Geo3DUtil.toDegrees(point.getLatitude()) + ")\n"); - buffer.append("normal polygon: " +withIn1 + "\n"); + buffer.append( + "\nWKT: POINT(" + + Geo3DUtil.toDegrees(point.getLongitude()) + + " " + + Geo3DUtil.toDegrees(point.getLatitude()) + + ")\n"); + buffer.append("normal polygon: " + withIn1 + "\n"); buffer.append("large polygon: " + withIn2 + "\n"); } assertTrue(buffer.toString(), withIn1 == withIn2); @@ -239,9 +266,17 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { private String getWKT(List points) { StringBuffer buffer = new StringBuffer("POLYGON(("); for (GeoPoint point : points) { - buffer.append(Geo3DUtil.toDegrees(point.getLongitude()) + " " + Geo3DUtil.toDegrees(point.getLatitude()) + ","); + buffer.append( + Geo3DUtil.toDegrees(point.getLongitude()) + + " " + + Geo3DUtil.toDegrees(point.getLatitude()) + + ","); } - buffer.append(Geo3DUtil.toDegrees(points.get(0).getLongitude()) + " " + Geo3DUtil.toDegrees(points.get(0).getLatitude()) + "))\n"); + buffer.append( + Geo3DUtil.toDegrees(points.get(0).getLongitude()) + + " " + + Geo3DUtil.toDegrees(points.get(0).getLatitude()) + + "))\n"); return buffer.toString(); } @@ -258,7 +293,7 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { double x = 0; double y = 0; double z = 0; - //get center of mass + // get center of mass for (final GeoPoint point : points) { x += point.x; y += point.y; @@ -267,5 +302,4 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator { // Normalization is not needed because createSurfacePoint does the scaling anyway. return planetModel.createSurfacePoint(x, y, z); } - } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoShapeRelationship.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoShapeRelationship.java index b4a5fcec72d..2698a875021 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoShapeRelationship.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoShapeRelationship.java @@ -1,34 +1,30 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.lucene.spatial3d.geom; import org.junit.Test; -/** - * Random test to check relationship between GeoAreaShapes and GeoShapes. - */ +/** Random test to check relationship between GeoAreaShapes and GeoShapes. */ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { /** - * Test for WITHIN points. We build a WITHIN shape with respect the geoAreaShape - * and create a point WITHIN the shape. The resulting shape should be WITHIN - * the original shape. - * + * Test for WITHIN points. We build a WITHIN shape with respect the geoAreaShape and create a + * point WITHIN the shape. The resulting shape should be WITHIN the original shape. */ @Test public void testRandomPointWithin() { @@ -44,7 +40,7 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { shape = randomGeoAreaShape(shapeType, planetModel); Constraints constraints = getEmptyConstraint(); constraints.put(shape, GeoArea.WITHIN); - GeoAreaShape reference = randomGeoAreaShape(referenceShapeType, planetModel, constraints); + GeoAreaShape reference = randomGeoAreaShape(referenceShapeType, planetModel, constraints); if (reference != null) { constraints = new Constraints(); constraints.put(reference, GeoArea.WITHIN); @@ -58,10 +54,8 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { } /** - * Test for NOT WITHIN points. We build a DIJOINT shape with respect the geoAreaShape - * and create a point WITHIN that shape. The resulting shape should not be WITHIN - * the original shape. - * + * Test for NOT WITHIN points. We build a DIJOINT shape with respect the geoAreaShape and create a + * point WITHIN that shape. The resulting shape should not be WITHIN the original shape. */ public void testRandomPointNotWithin() { int referenceShapeType = CONVEX_POLYGON; @@ -73,7 +67,7 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { shape = randomGeoAreaShape(shapeType, planetModel); Constraints constraints = getEmptyConstraint(); constraints.put(shape, GeoArea.DISJOINT); - GeoAreaShape reference = randomGeoAreaShape(referenceShapeType, planetModel, constraints); + GeoAreaShape reference = randomGeoAreaShape(referenceShapeType, planetModel, constraints); if (reference != null) { constraints = new Constraints(); constraints.put(reference, GeoArea.WITHIN); @@ -87,11 +81,10 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { } /** - * Test for disjoint shapes. We build a DISJOINT shape with respect the geoAreaShape - * and create shapes WITHIN that shapes. The resulting shape should be DISJOINT - * to the geoAreaShape. + * Test for disjoint shapes. We build a DISJOINT shape with respect the geoAreaShape and create + * shapes WITHIN that shapes. The resulting shape should be DISJOINT to the geoAreaShape. * - * Note that both shapes cannot be concave. + *

    Note that both shapes cannot be concave. */ @Test public void testRandomDisjoint() { @@ -119,32 +112,31 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { int rel = geoAreaShape.getRelationship(shape); assertEquals(b.toString(), GeoArea.DISJOINT, rel); if (shape instanceof GeoArea) { - rel = ((GeoArea)shape).getRelationship(geoAreaShape); + rel = ((GeoArea) shape).getRelationship(geoAreaShape); assertEquals(b.toString(), GeoArea.DISJOINT, rel); } } /** - * Test for within shapes. We build a shape WITHIN the geoAreaShape and create - * shapes WITHIN that shape. The resulting shape should be WITHIN - * to the geoAreaShape. + * Test for within shapes. We build a shape WITHIN the geoAreaShape and create shapes WITHIN that + * shape. The resulting shape should be WITHIN to the geoAreaShape. * - * Note that if the geoAreaShape is not concave the other shape must be not concave. + *

    Note that if the geoAreaShape is not concave the other shape must be not concave. */ @Test public void testRandomWithIn() { PlanetModel planetModel = randomPlanetModel(); int geoAreaShapeType = randomGeoAreaShapeType(); - //shapes cannot be point or line -- no area! - while(geoAreaShapeType == POINT || geoAreaShapeType == LINE) { + // shapes cannot be point or line -- no area! + while (geoAreaShapeType == POINT || geoAreaShapeType == LINE) { geoAreaShapeType = randomGeoAreaShapeType(); } - int shapeType = LINE;//randomShapeType(); + int shapeType = LINE; // randomShapeType(); int referenceShapeType = CONVEX_SIMPLE_POLYGON; - if (!isConcave(geoAreaShapeType)){ - shapeType =randomConvexShapeType(); + if (!isConcave(geoAreaShapeType)) { + shapeType = randomConvexShapeType(); } - if(isConcave(shapeType)){//both concave + if (isConcave(shapeType)) { // both concave referenceShapeType = CONCAVE_SIMPLE_POLYGON; } GeoShape shape = null; @@ -166,38 +158,36 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { int rel = geoAreaShape.getRelationship(shape); assertEquals(b.toString(), GeoArea.WITHIN, rel); if (shape instanceof GeoArea) { - rel = ((GeoArea)shape).getRelationship(geoAreaShape); + rel = ((GeoArea) shape).getRelationship(geoAreaShape); assertEquals(b.toString(), GeoArea.CONTAINS, rel); } } - /** - * Test for contains shapes. We build a shape containing the geoAreaShape and create - * shapes WITHIN that shape. The resulting shape should CONTAIN - * the geoAreaShape. - * - * Note that if the geoAreaShape is concave the other shape must be concave. - * If shape is concave, the shape for reference should be concave as well. + * Test for contains shapes. We build a shape containing the geoAreaShape and create shapes WITHIN + * that shape. The resulting shape should CONTAIN the geoAreaShape. * + *

    Note that if the geoAreaShape is concave the other shape must be concave. If shape is + * concave, the shape for reference should be concave as well. */ // TODO: this test seems to hit pathological cases that cause it to run for many minutes?! - @Test @Nightly + @Test + @Nightly public void testRandomContains() { int referenceShapeType = CONVEX_SIMPLE_POLYGON; PlanetModel planetModel = randomPlanetModel(); int geoAreaShapeType = randomGeoAreaShapeType(); - while (geoAreaShapeType == COLLECTION ){ + while (geoAreaShapeType == COLLECTION) { geoAreaShapeType = randomGeoAreaShapeType(); } int shapeType = randomShapeType(); while (shapeType == POINT || shapeType == LINE) { shapeType = randomShapeType(); } - if (isConcave(geoAreaShapeType)){ + if (isConcave(geoAreaShapeType)) { shapeType = randomConcaveShapeType(); } - if (isConcave(shapeType)){ + if (isConcave(shapeType)) { referenceShapeType = CONCAVE_SIMPLE_POLYGON; } GeoShape shape = null; @@ -206,7 +196,8 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { geoAreaShape = randomGeoAreaShape(geoAreaShapeType, planetModel); Constraints constraints = getEmptyConstraint(); constraints.put(geoAreaShape, GeoArea.CONTAINS); - GeoPolygon reference =(GeoPolygon)randomGeoAreaShape(referenceShapeType, planetModel, constraints); + GeoPolygon reference = + (GeoPolygon) randomGeoAreaShape(referenceShapeType, planetModel, constraints); if (reference != null) { constraints = getEmptyConstraint(); constraints.put(reference, GeoArea.CONTAINS); @@ -219,16 +210,15 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { int rel = geoAreaShape.getRelationship(shape); assertEquals(b.toString(), GeoArea.CONTAINS, rel); if (shape instanceof GeoArea) { - rel = ((GeoArea)shape).getRelationship(geoAreaShape); + rel = ((GeoArea) shape).getRelationship(geoAreaShape); assertEquals(b.toString(), GeoArea.WITHIN, rel); } } /** - * Test for overlapping shapes. We build a shape that contains part of the - * geoAreaShape, is disjoint to other part and contains a disjoint shape. We create - * shapes according the criteria. The resulting shape should OVERLAP - * the geoAreaShape. + * Test for overlapping shapes. We build a shape that contains part of the geoAreaShape, is + * disjoint to other part and contains a disjoint shape. We create shapes according the criteria. + * The resulting shape should OVERLAP the geoAreaShape. */ @Test public void testRandomOverlaps() { @@ -246,16 +236,16 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { while (shape == null) { geoAreaShape = randomGeoAreaShape(geoAreaShapeType, planetModel); Constraints constraints = getEmptyConstraint(); - constraints.put(geoAreaShape,GeoArea.WITHIN); + constraints.put(geoAreaShape, GeoArea.WITHIN); GeoAreaShape reference1 = randomGeoAreaShape(CONVEX_SIMPLE_POLYGON, planetModel, constraints); - if (reference1 == null){ + if (reference1 == null) { continue; } constraints = getEmptyConstraint(); constraints.put(geoAreaShape, GeoArea.WITHIN); constraints.put(reference1, GeoArea.DISJOINT); GeoAreaShape reference2 = randomGeoAreaShape(CONVEX_SIMPLE_POLYGON, planetModel, constraints); - if (reference2 == null){ + if (reference2 == null) { continue; } constraints = getEmptyConstraint(); @@ -275,7 +265,7 @@ public class TestRandomGeoShapeRelationship extends RandomGeo3dShapeGenerator { int rel = geoAreaShape.getRelationship(shape); assertEquals(b.toString(), GeoArea.OVERLAPS, rel); if (shape instanceof GeoArea) { - rel = ((GeoArea)shape).getRelationship(geoAreaShape); + rel = ((GeoArea) shape).getRelationship(geoAreaShape); assertEquals(b.toString(), GeoArea.OVERLAPS, rel); } } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomPlane.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomPlane.java index ce48250cce8..87ff11e2686 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomPlane.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomPlane.java @@ -17,15 +17,12 @@ package org.apache.lucene.spatial3d.geom; +import com.carrotsearch.randomizedtesting.annotations.Repeat; import java.util.ArrayList; import java.util.List; - -import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.junit.Test; -/** - * Random test for planes. - */ +/** Random test for planes. */ public class TestRandomPlane extends RandomGeo3dShapeGenerator { @Test @@ -33,10 +30,12 @@ public class TestRandomPlane extends RandomGeo3dShapeGenerator { public void testPlaneAccuracy() { PlanetModel planetModel = randomPlanetModel(); GeoPoint point1 = randomGeoPoint(planetModel); - for (int i= 0; i < 1000; i++) { - double dist = random().nextDouble() * Vector.MINIMUM_ANGULAR_RESOLUTION + Vector.MINIMUM_ANGULAR_RESOLUTION; + for (int i = 0; i < 1000; i++) { + double dist = + random().nextDouble() * Vector.MINIMUM_ANGULAR_RESOLUTION + + Vector.MINIMUM_ANGULAR_RESOLUTION; double bearing = random().nextDouble() * 2 * Math.PI; - GeoPoint point2 = planetModel.surfacePointOnBearing(point1, dist, bearing ); + GeoPoint point2 = planetModel.surfacePointOnBearing(point1, dist, bearing); GeoPoint check = randomGeoPoint(planetModel); if (!point1.isNumericallyIdentical(point2)) { SidedPlane plane = new SidedPlane(check, point1, point2); @@ -44,47 +43,56 @@ public class TestRandomPlane extends RandomGeo3dShapeGenerator { assertTrue(msg, plane.isWithin(check)); assertTrue(msg, plane.isWithin(point2)); assertTrue(msg, plane.isWithin(point1)); - } - else { + } else { assertFalse("numerically identical", true); } } } - + @Test @Repeat(iterations = 10) public void testPlaneThreePointsAccuracy() { PlanetModel planetModel = randomPlanetModel(); - for (int i= 0; i < 1000; i++) { + for (int i = 0; i < 1000; i++) { GeoPoint point1 = randomGeoPoint(planetModel); double dist = random().nextDouble() * Math.PI - Vector.MINIMUM_ANGULAR_RESOLUTION; double bearing = random().nextDouble() * 2 * Math.PI; - GeoPoint point2 = planetModel.surfacePointOnBearing(point1, dist, bearing ); - dist = random().nextDouble() * Vector.MINIMUM_ANGULAR_RESOLUTION + Vector.MINIMUM_ANGULAR_RESOLUTION; + GeoPoint point2 = planetModel.surfacePointOnBearing(point1, dist, bearing); + dist = + random().nextDouble() * Vector.MINIMUM_ANGULAR_RESOLUTION + + Vector.MINIMUM_ANGULAR_RESOLUTION; bearing = random().nextDouble() * 2 * Math.PI; - GeoPoint point3 = planetModel.surfacePointOnBearing(point1, dist, bearing ); + GeoPoint point3 = planetModel.surfacePointOnBearing(point1, dist, bearing); GeoPoint check = randomGeoPoint(planetModel); - SidedPlane plane = SidedPlane.constructNormalizedThreePointSidedPlane(check, point1, point2, point3); - String msg = planetModel + " point 1: " + point1 + ", point 2: " + point2 + ", point 3: " + point3 + " , check: " + check; + SidedPlane plane = + SidedPlane.constructNormalizedThreePointSidedPlane(check, point1, point2, point3); + String msg = + planetModel + + " point 1: " + + point1 + + ", point 2: " + + point2 + + ", point 3: " + + point3 + + " , check: " + + check; if (plane == null) { fail(msg); } // This is not expected - //assertTrue(plane.evaluate(check) + " " + msg, plane.isWithin(check)); - assertTrue(plane.evaluate(point1) + " " +msg, plane.isWithin(point1)); - assertTrue(plane.evaluate(point2) + " " +msg, plane.isWithin(point2)); - assertTrue(plane.evaluate(point3) + " " +msg, plane.isWithin(point3)); + // assertTrue(plane.evaluate(check) + " " + msg, plane.isWithin(check)); + assertTrue(plane.evaluate(point1) + " " + msg, plane.isWithin(point1)); + assertTrue(plane.evaluate(point2) + " " + msg, plane.isWithin(point2)); + assertTrue(plane.evaluate(point3) + " " + msg, plane.isWithin(point3)); } } - - @Test @Repeat(iterations = 10) public void testPolygonAccuracy() { PlanetModel planetModel = randomPlanetModel(); GeoPoint point1 = randomGeoPoint(planetModel); - for (int i= 0; i < 1000; i++) { + for (int i = 0; i < 1000; i++) { double dist = random().nextDouble() * 1e-6 + Vector.MINIMUM_ANGULAR_RESOLUTION; GeoPoint point2 = planetModel.surfacePointOnBearing(point1, dist, 0); GeoPoint point3 = planetModel.surfacePointOnBearing(point1, dist, 0.5 * Math.PI); @@ -94,8 +102,6 @@ public class TestRandomPlane extends RandomGeo3dShapeGenerator { points.add(point2); points.add(point3); GeoPolygonFactory.makeGeoPolygon(planetModel, points); - } } - } diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestSimpleGeoPolygonRelationships.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestSimpleGeoPolygonRelationships.java index 53a32f15445..ad2c0ac29d2 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestSimpleGeoPolygonRelationships.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestSimpleGeoPolygonRelationships.java @@ -1,554 +1,576 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.lucene.spatial3d.geom; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import org.junit.Test; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.junit.Test; + /** - * Check relationship between polygon and GeoShapes of basic polygons. Normally we construct - * the convex, concave counterpart and the convex polygon as a complex polygon. + * Check relationship between polygon and GeoShapes of basic polygons. Normally we construct the + * convex, concave counterpart and the convex polygon as a complex polygon. */ public class TestSimpleGeoPolygonRelationships { - /** - * Test with two shapes with no crossing edges and no points in common in convex case. - */ + /** Test with two shapes with no crossing edges and no points in common in convex case. */ @Test public void testGeoSimplePolygon1() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 19.845091 -60.452631)) disjoint - GeoPolygon originalConvexPol = buildConvexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 19.845091 -60.452631)) disjoint + GeoPolygon originalConvexPol = + buildConvexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalConcavePol = buildConcaveGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalConcavePol = + buildConcaveGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalComplexPol = buildComplexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalComplexPol = + buildComplexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon polConvex = buildConvexGeoPolygon(20.0, -60.4, - 20.1, -60.4, - 20.1, -60.3, - 20.0, -60.3); + GeoPolygon polConvex = + buildConvexGeoPolygon(20.0, -60.4, 20.1, -60.4, 20.1, -60.3, 20.0, -60.3); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.0, -60.4, - 20.1, -60.4, - 20.1, -60.3, - 20.0, -60.3); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.0, -60.4, 20.1, -60.4, 20.1, -60.3, 20.0, -60.3); - - //Convex + // Convex int rel = originalConvexPol.getRelationship(polConvex); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalConvexPol); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = originalConvexPol.getRelationship(polConcave); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(originalConvexPol); - assertEquals(GeoArea.WITHIN, rel);//Check + assertEquals(GeoArea.WITHIN, rel); // Check - //Concave + // Concave rel = originalConcavePol.getRelationship(polConvex); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalConcavePol); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = originalConcavePol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConcavePol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); - //Complex + // Complex rel = originalComplexPol.getRelationship(polConvex); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalComplexPol); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = originalComplexPol.getRelationship(polConcave); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(originalComplexPol); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); } - - /** - * Test with two shapes with crossing edges and some points inside in convex case. - */ + /** Test with two shapes with crossing edges and some points inside in convex case. */ @Test public void testGeoSimplePolygon2() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 19.845091 -60.452631)) disjoint - GeoPolygon originalConvexPol = buildConvexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 19.845091 -60.452631)) disjoint + GeoPolygon originalConvexPol = + buildConvexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalConcavePol = buildConcaveGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalConcavePol = + buildConcaveGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalComplexPol = buildComplexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalComplexPol = + buildComplexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - //POLYGON ((20.0 -60.4, 23.1 -60.4, 23.1 -60.3, 20.0 -60.3,20.0 -60.4)) - GeoPolygon polConvex = buildConvexGeoPolygon(20.0, -60.4, - 23.1, -60.4, - 23.1, -60.3, - 20.0, -60.3); + // POLYGON ((20.0 -60.4, 23.1 -60.4, 23.1 -60.3, 20.0 -60.3,20.0 -60.4)) + GeoPolygon polConvex = + buildConvexGeoPolygon(20.0, -60.4, 23.1, -60.4, 23.1, -60.3, 20.0, -60.3); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.0, -60.4, - 23.1, -60.4, - 23.1, -60.3, - 20.0, -60.3); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.0, -60.4, 23.1, -60.4, 23.1, -60.3, 20.0, -60.3); - //Convex + // Convex int rel = originalConvexPol.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); - //Concave + // Concave rel = originalConcavePol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConcavePol); - assertEquals(GeoArea.OVERLAPS, rel); - + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConcavePol.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConcavePol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); - //Complex + // Complex rel = originalComplexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalComplexPol); - assertEquals(GeoArea.OVERLAPS, rel); - + assertEquals(GeoArea.OVERLAPS, rel); rel = originalComplexPol.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalComplexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); } - /** - * Test with two shapes with no crossing edges and all points inside in convex case. - */ + /** Test with two shapes with no crossing edges and all points inside in convex case. */ @Test public void testGeoSimplePolygon3() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 19.845091 -60.452631)) disjoint - GeoPolygon originalConvexPol = buildConvexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 19.845091 -60.452631)) disjoint + GeoPolygon originalConvexPol = + buildConvexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalConcavePol = buildConcaveGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalConcavePol = + buildConcaveGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalComplexPol = buildComplexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalComplexPol = + buildComplexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - //POLYGON ((20.0 -61.1, 20.1 -61.1, 20.1 -60.5, 20.0 -60.5,20.0 -61.1)) - GeoPolygon polConvex = buildConvexGeoPolygon(20.0, -61.1, - 20.1, -61.1, - 20.1, -60.5, - 20.0, -60.5); + // POLYGON ((20.0 -61.1, 20.1 -61.1, 20.1 -60.5, 20.0 -60.5,20.0 -61.1)) + GeoPolygon polConvex = + buildConvexGeoPolygon(20.0, -61.1, 20.1, -61.1, 20.1, -60.5, 20.0, -60.5); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.0, -61.1, - 20.1, -61.1, - 20.1, -60.5, - 20.0, -60.5); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.0, -61.1, 20.1, -61.1, 20.1, -60.5, 20.0, -60.5); - //Convex + // Convex int rel = originalConvexPol.getRelationship(polConvex); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalConvexPol); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = originalConvexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); - //Concave + // Concave rel = originalConcavePol.getRelationship(polConcave); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(originalConcavePol); - assertEquals(GeoArea.WITHIN, rel);//check + assertEquals(GeoArea.WITHIN, rel); // check rel = originalConcavePol.getRelationship(polConvex); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalConcavePol); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); - //Complex + // Complex rel = originalComplexPol.getRelationship(polConvex); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalComplexPol); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = originalComplexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalComplexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); } - /** - * Test with two shapes with crossing edges and no points inside in convex case. - */ + /** Test with two shapes with crossing edges and no points inside in convex case. */ @Test public void testGeoSimplePolygon4() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 19.845091 -60.452631)) disjoint - GeoPolygon originalConvexPol = buildConvexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 19.845091 -60.452631)) disjoint + GeoPolygon originalConvexPol = + buildConvexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalConcavePol = buildConcaveGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalConcavePol = + buildConcaveGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalComplexPol = buildComplexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalComplexPol = + buildComplexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - //POLYGON ((20.0 -62.4, 20.1 -62.4, 20.1 -60.3, 20.0 -60.3,20.0 -62.4)) intersects no points inside - GeoPolygon polConvex = buildConvexGeoPolygon(20.0, -62.4, - 20.1, -62.4, - 20.1, -60.3, - 20.0, -60.3); + // POLYGON ((20.0 -62.4, 20.1 -62.4, 20.1 -60.3, 20.0 -60.3,20.0 -62.4)) intersects no points + // inside + GeoPolygon polConvex = + buildConvexGeoPolygon(20.0, -62.4, 20.1, -62.4, 20.1, -60.3, 20.0, -60.3); - GeoPolygon polConcave = buildConcaveGeoPolygon(20.0, -62.4, - 20.1, -62.4, - 20.1, -60.3, - 20.0, -60.3); + GeoPolygon polConcave = + buildConcaveGeoPolygon(20.0, -62.4, 20.1, -62.4, 20.1, -60.3, 20.0, -60.3); - //Convex + // Convex int rel = originalConvexPol.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); - //concave + // concave rel = originalConcavePol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConcavePol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConcavePol.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConcavePol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); - //Complex + // Complex rel = originalComplexPol.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalComplexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalComplexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalComplexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); } - /** - * Test with two shapes with no crossing edges and polygon in hole in convex case. - */ + /** Test with two shapes with no crossing edges and polygon in hole in convex case. */ @Test public void testGeoSimplePolygonWithHole1() { - //POLYGON((-135 -31, -135 -30, -137 -30, -137 -31, -135 -31),(-135.5 -30.7, -135.5 -30.4, -136.5 -30.4, -136.5 -30.7, -135.5 -30.7)) - GeoPolygon hole = buildConcaveGeoPolygon(-135.5, -30.7, - -135.5, -30.4, - -136.5, -30.4, - -136.5, -30.7); - GeoPolygon originalConvexPol = buildConvexGeoPolygonWithHole(-135, -31, - -135, -30, - -137, -30, - -137, -31, hole); + // POLYGON((-135 -31, -135 -30, -137 -30, -137 -31, -135 -31),(-135.5 -30.7, -135.5 -30.4, + // -136.5 -30.4, -136.5 -30.7, -135.5 -30.7)) + GeoPolygon hole = + buildConcaveGeoPolygon(-135.5, -30.7, -135.5, -30.4, -136.5, -30.4, -136.5, -30.7); + GeoPolygon originalConvexPol = + buildConvexGeoPolygonWithHole(-135, -31, -135, -30, -137, -30, -137, -31, hole); - GeoPolygon holeInv = buildConvexGeoPolygon(-135, -31, - -135, -30, - -137, -30, - -137, -31); + GeoPolygon holeInv = buildConvexGeoPolygon(-135, -31, -135, -30, -137, -30, -137, -31); - GeoPolygon originalConvexPolInv = buildConcaveGeoPolygonWithHole(-135.5, -30.7, - -135.5, -30.4, - -136.5, -30.4, - -136.5, -30.7, holeInv); + GeoPolygon originalConvexPolInv = + buildConcaveGeoPolygonWithHole( + -135.5, -30.7, -135.5, -30.4, -136.5, -30.4, -136.5, -30.7, holeInv); - //POLYGON((-135.7 -30.6, -135.7 -30.45, -136 -30.45, -136 -30.6, -135.7 -30.6)) in the hole - GeoPolygon polConvex = buildConvexGeoPolygon(-135.7, -30.6, - -135.7, -30.45, - -136, -30.45, - -136, -30.6); + // POLYGON((-135.7 -30.6, -135.7 -30.45, -136 -30.45, -136 -30.6, -135.7 -30.6)) in the hole + GeoPolygon polConvex = + buildConvexGeoPolygon(-135.7, -30.6, -135.7, -30.45, -136, -30.45, -136, -30.6); - GeoPolygon polConcave = buildConcaveGeoPolygon(-135.7, -30.6, - -135.7, -30.45, - -136, -30.45, - -136, -30.6); + GeoPolygon polConcave = + buildConcaveGeoPolygon(-135.7, -30.6, -135.7, -30.45, -136, -30.45, -136, -30.6); int rel = originalConvexPol.getRelationship(polConvex); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalConvexPol); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = originalConvexPol.getRelationship(polConcave); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(originalConvexPol); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); rel = originalConvexPolInv.getRelationship(polConvex); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = polConvex.getRelationship(originalConvexPolInv); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = originalConvexPolInv.getRelationship(polConcave); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(originalConvexPolInv); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); } - /** - * Test with two shapes with crossing edges in hole and some points inside in convex case. - */ + /** Test with two shapes with crossing edges in hole and some points inside in convex case. */ @Test public void testGeoSimplePolygonWithHole2() { - //POLYGON((-135 -31, -135 -30, -137 -30, -137 -31, -135 -31),(-135.5 -30.7, -135.5 -30.4, -136.5 -30.4, -136.5 -30.7, -135.5 -30.7)) - GeoPolygon hole = buildConcaveGeoPolygon(-135.5, -30.7, - -135.5, -30.4, - -136.5, -30.4, - -136.5, -30.7); - GeoPolygon originalConvexPol = buildConvexGeoPolygonWithHole(-135, -31, - -135, -30, - -137, -30, - -137, -31, hole); + // POLYGON((-135 -31, -135 -30, -137 -30, -137 -31, -135 -31),(-135.5 -30.7, -135.5 -30.4, + // -136.5 -30.4, -136.5 -30.7, -135.5 -30.7)) + GeoPolygon hole = + buildConcaveGeoPolygon(-135.5, -30.7, -135.5, -30.4, -136.5, -30.4, -136.5, -30.7); + GeoPolygon originalConvexPol = + buildConvexGeoPolygonWithHole(-135, -31, -135, -30, -137, -30, -137, -31, hole); - GeoPolygon holeInv = buildConvexGeoPolygon(-135, -31, - -135, -30, - -137, -30, - -137, -31); + GeoPolygon holeInv = buildConvexGeoPolygon(-135, -31, -135, -30, -137, -30, -137, -31); - GeoPolygon originalConvexPolInv = buildConcaveGeoPolygonWithHole(-135.5, -30.7, - -135.5, -30.4, - -136.5, -30.4, - -136.5, -30.7, holeInv); + GeoPolygon originalConvexPolInv = + buildConcaveGeoPolygonWithHole( + -135.5, -30.7, -135.5, -30.4, -136.5, -30.4, -136.5, -30.7, holeInv); - //POLYGON((-135.5 -31.2, -135.5 -30.8, -136 -30.8, -136 -31.2, -135.5 -31.2)) intersects the hole - GeoPolygon polConvex = buildConvexGeoPolygon(-135.5, -30.2, - -135.5, -30.8, - -136, -30.8, - -136, -30.2); + // POLYGON((-135.5 -31.2, -135.5 -30.8, -136 -30.8, -136 -31.2, -135.5 -31.2)) intersects the + // hole + GeoPolygon polConvex = + buildConvexGeoPolygon(-135.5, -30.2, -135.5, -30.8, -136, -30.8, -136, -30.2); - GeoPolygon polConcave = buildConcaveGeoPolygon(-135.5, -30.2, - -135.5, -30.8, - -136, -30.8, - -136, -30.2); + GeoPolygon polConcave = + buildConcaveGeoPolygon(-135.5, -30.2, -135.5, -30.8, -136, -30.8, -136, -30.2); int rel = originalConvexPol.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPolInv.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConvexPolInv); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPolInv.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPolInv); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); } - /** - * Test with two shapes with crossing edges and some points inside in convex case. - */ + /** Test with two shapes with crossing edges and some points inside in convex case. */ @Test public void testGeoSimplePolygonWithHole3() { - //POLYGON((-135 -31, -135 -30, -137 -30, -137 -31, -135 -31),(-135.5 -30.7, -135.5 -30.4, -136.5 -30.4, -136.5 -30.7, -135.5 -30.7)) - GeoPolygon hole = buildConcaveGeoPolygon(-135.5, -30.7, - -135.5, -30.4, - -136.5, -30.4, - -136.5, -30.7); - GeoPolygon originalConvexPol = buildConvexGeoPolygonWithHole(-135, -31, - -135, -30, - -137, -30, - -137, -31, hole); + // POLYGON((-135 -31, -135 -30, -137 -30, -137 -31, -135 -31),(-135.5 -30.7, -135.5 -30.4, + // -136.5 -30.4, -136.5 -30.7, -135.5 -30.7)) + GeoPolygon hole = + buildConcaveGeoPolygon(-135.5, -30.7, -135.5, -30.4, -136.5, -30.4, -136.5, -30.7); + GeoPolygon originalConvexPol = + buildConvexGeoPolygonWithHole(-135, -31, -135, -30, -137, -30, -137, -31, hole); - GeoPolygon holeInv = buildConvexGeoPolygon(-135, -31, - -135, -30, - -137, -30, - -137, -31); + GeoPolygon holeInv = buildConvexGeoPolygon(-135, -31, -135, -30, -137, -30, -137, -31); - GeoPolygon originalConvexPolInv = buildConcaveGeoPolygonWithHole(-135.5, -30.7, - -135.5, -30.4, - -136.5, -30.4, - -136.5, -30.7, holeInv); + GeoPolygon originalConvexPolInv = + buildConcaveGeoPolygonWithHole( + -135.5, -30.7, -135.5, -30.4, -136.5, -30.4, -136.5, -30.7, holeInv); - //POLYGON((-135.2 -30.8, -135.2 -30.2, -136.8 -30.2, -136.8 -30.8, -135.2 -30.8)) inside the polygon covering the hole - GeoPolygon polConvex = buildConvexGeoPolygon(-135.2, -30.8, - -135.2, -30.3, - -136.8, -30.2, - -136.8, -30.8); + // POLYGON((-135.2 -30.8, -135.2 -30.2, -136.8 -30.2, -136.8 -30.8, -135.2 -30.8)) inside the + // polygon covering the hole + GeoPolygon polConvex = + buildConvexGeoPolygon(-135.2, -30.8, -135.2, -30.3, -136.8, -30.2, -136.8, -30.8); - GeoPolygon polConcave = buildConcaveGeoPolygon(-135.2, -30.8, - -135.2, -30.3, - -136.8, -30.2, - -136.8, -30.8); + GeoPolygon polConcave = + buildConcaveGeoPolygon(-135.2, -30.8, -135.2, -30.3, -136.8, -30.2, -136.8, -30.8); int rel = originalConvexPol.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPolInv.getRelationship(polConvex); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConvex.getRelationship(originalConvexPolInv); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPolInv.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPolInv); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); } - /** - * Test with two shapes with no crossing edges and all points inside in convex case. - */ + /** Test with two shapes with no crossing edges and all points inside in convex case. */ @Test public void testGeoSimplePolygonWithHole4() { - //POLYGON((-135 -31, -135 -30, -137 -30, -137 -31, -135 -31),(-135.5 -30.7, -135.5 -30.4, -136.5 -30.4, -136.5 -30.7, -135.5 -30.7)) - GeoPolygon hole = buildConcaveGeoPolygon(-135.5, -30.7, - -135.5, -30.4, - -136.5, -30.4, - -136.5, -30.7); - GeoPolygon originalConvexPol = buildConvexGeoPolygonWithHole(-135, -31, - -135, -30, - -137, -30, - -137, -31, hole); + // POLYGON((-135 -31, -135 -30, -137 -30, -137 -31, -135 -31),(-135.5 -30.7, -135.5 -30.4, + // -136.5 -30.4, -136.5 -30.7, -135.5 -30.7)) + GeoPolygon hole = + buildConcaveGeoPolygon(-135.5, -30.7, -135.5, -30.4, -136.5, -30.4, -136.5, -30.7); + GeoPolygon originalConvexPol = + buildConvexGeoPolygonWithHole(-135, -31, -135, -30, -137, -30, -137, -31, hole); - GeoPolygon holeInv = buildConvexGeoPolygon(-135, -31, - -135, -30, - -137, -30, - -137, -31); + GeoPolygon holeInv = buildConvexGeoPolygon(-135, -31, -135, -30, -137, -30, -137, -31); - GeoPolygon originalConvexPolInv = buildConcaveGeoPolygonWithHole(-135.5, -30.7, - -135.5, -30.4, - -136.5, -30.4, - -136.5, -30.7, holeInv); + GeoPolygon originalConvexPolInv = + buildConcaveGeoPolygonWithHole( + -135.5, -30.7, -135.5, -30.4, -136.5, -30.4, -136.5, -30.7, holeInv); // POLYGON((-135.7 -30.3, -135.7 -30.2, -136 -30.2, -136 -30.3, -135.7 -30.3))inside the polygon - GeoPolygon polConvex = buildConvexGeoPolygon(-135.7, -30.3, - -135.7, -30.2, - -136, -30.2, - -136, -30.3); + GeoPolygon polConvex = + buildConvexGeoPolygon(-135.7, -30.3, -135.7, -30.2, -136, -30.2, -136, -30.3); - GeoPolygon polConcave = buildConcaveGeoPolygon(-135.7, -30.3, - -135.7, -30.2, - -136, -30.2, - -136, -30.3); + GeoPolygon polConcave = + buildConcaveGeoPolygon(-135.7, -30.3, -135.7, -30.2, -136, -30.2, -136, -30.3); int rel = originalConvexPol.getRelationship(polConvex); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalConvexPol); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = originalConvexPol.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPol); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = originalConvexPolInv.getRelationship(polConvex); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); rel = polConvex.getRelationship(originalConvexPolInv); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = originalConvexPolInv.getRelationship(polConcave); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(originalConvexPolInv); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); } @Test public void testGeoSimplePolygonWithCircle() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 19.845091 -60.452631)) disjoint - GeoPolygon originalConvexPol = buildConvexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 19.845091 -60.452631)) disjoint + GeoPolygon originalConvexPol = + buildConvexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalConcavePol = buildConcaveGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalConcavePol = + buildConcaveGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalComplexPol = buildComplexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalComplexPol = + buildComplexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoCircle outCircle = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-70), Geo3DUtil.fromDegrees(23), Geo3DUtil.fromDegrees(1)); + GeoCircle outCircle = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-70), + Geo3DUtil.fromDegrees(23), + Geo3DUtil.fromDegrees(1)); int rel = originalConvexPol.getRelationship(outCircle); assertEquals(GeoArea.DISJOINT, rel); rel = originalConcavePol.getRelationship(outCircle); @@ -556,7 +578,12 @@ public class TestSimpleGeoPolygonRelationships { rel = originalComplexPol.getRelationship(outCircle); assertEquals(GeoArea.DISJOINT, rel); - GeoCircle overlapCircle = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61.5), Geo3DUtil.fromDegrees(20), Geo3DUtil.fromDegrees(1)); + GeoCircle overlapCircle = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-61.5), + Geo3DUtil.fromDegrees(20), + Geo3DUtil.fromDegrees(1)); rel = originalConvexPol.getRelationship(overlapCircle); assertEquals(GeoArea.OVERLAPS, rel); rel = originalConcavePol.getRelationship(overlapCircle); @@ -564,7 +591,12 @@ public class TestSimpleGeoPolygonRelationships { rel = originalComplexPol.getRelationship(overlapCircle); assertEquals(GeoArea.OVERLAPS, rel); - GeoCircle inCircle = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61), Geo3DUtil.fromDegrees(21), Geo3DUtil.fromDegrees(0.1)); + GeoCircle inCircle = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-61), + Geo3DUtil.fromDegrees(21), + Geo3DUtil.fromDegrees(0.1)); rel = originalConvexPol.getRelationship(inCircle); assertEquals(GeoArea.WITHIN, rel); rel = originalConcavePol.getRelationship(inCircle); @@ -572,7 +604,12 @@ public class TestSimpleGeoPolygonRelationships { rel = originalComplexPol.getRelationship(inCircle); assertEquals(GeoArea.WITHIN, rel); - GeoCircle onCircle = GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61), Geo3DUtil.fromDegrees(21), Geo3DUtil.fromDegrees(10.)); + GeoCircle onCircle = + GeoCircleFactory.makeGeoCircle( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-61), + Geo3DUtil.fromDegrees(21), + Geo3DUtil.fromDegrees(10.)); rel = originalConvexPol.getRelationship(onCircle); assertEquals(GeoArea.CONTAINS, rel); rel = originalConcavePol.getRelationship(onCircle); @@ -583,26 +620,48 @@ public class TestSimpleGeoPolygonRelationships { @Test public void testGeoSimplePolygonWithBBox() { - //POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 -60.257713, 19.845091 -60.452631)) disjoint - GeoPolygon originalConvexPol = buildConvexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + // POLYGON ((19.845091 -60.452631, 20.119948 -61.655652, 23.207901 -61.453298, 22.820804 + // -60.257713, 19.845091 -60.452631)) disjoint + GeoPolygon originalConvexPol = + buildConvexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalConcavePol = buildConcaveGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalConcavePol = + buildConcaveGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoPolygon originalComplexPol = buildComplexGeoPolygon(19.84509, -60.452631, - 20.119948, -61.655652, - 23.207901, -61.453298, - 22.820804, -60.257713); + GeoPolygon originalComplexPol = + buildComplexGeoPolygon( + 19.84509, + -60.452631, + 20.119948, + -61.655652, + 23.207901, + -61.453298, + 22.820804, + -60.257713); - GeoBBox outRectangle = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-69), - Geo3DUtil.fromDegrees(-70), - Geo3DUtil.fromDegrees(22), - Geo3DUtil.fromDegrees(23)); + GeoBBox outRectangle = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-69), + Geo3DUtil.fromDegrees(-70), + Geo3DUtil.fromDegrees(22), + Geo3DUtil.fromDegrees(23)); int rel = originalConvexPol.getRelationship(outRectangle); assertEquals(GeoArea.DISJOINT, rel); rel = outRectangle.getRelationship(originalConvexPol); @@ -612,10 +671,13 @@ public class TestSimpleGeoPolygonRelationships { rel = originalComplexPol.getRelationship(outRectangle); assertEquals(GeoArea.DISJOINT, rel); - GeoBBox overlapRectangle = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61), - Geo3DUtil.fromDegrees(-62), - Geo3DUtil.fromDegrees(22), - Geo3DUtil.fromDegrees(23)); + GeoBBox overlapRectangle = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-61), + Geo3DUtil.fromDegrees(-62), + Geo3DUtil.fromDegrees(22), + Geo3DUtil.fromDegrees(23)); rel = originalConvexPol.getRelationship(overlapRectangle); assertEquals(GeoArea.OVERLAPS, rel); rel = overlapRectangle.getRelationship(originalConvexPol); @@ -625,10 +687,13 @@ public class TestSimpleGeoPolygonRelationships { rel = originalComplexPol.getRelationship(overlapRectangle); assertEquals(GeoArea.OVERLAPS, rel); - GeoBBox inRectangle = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-61), - Geo3DUtil.fromDegrees(-61.1), - Geo3DUtil.fromDegrees(22.5), - Geo3DUtil.fromDegrees(23)); + GeoBBox inRectangle = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-61), + Geo3DUtil.fromDegrees(-61.1), + Geo3DUtil.fromDegrees(22.5), + Geo3DUtil.fromDegrees(23)); rel = originalConvexPol.getRelationship(inRectangle); assertEquals(GeoArea.WITHIN, rel); rel = inRectangle.getRelationship(originalConvexPol); @@ -638,10 +703,13 @@ public class TestSimpleGeoPolygonRelationships { rel = originalComplexPol.getRelationship(inRectangle); assertEquals(GeoArea.WITHIN, rel); - GeoBBox onRectangle = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-59), - Geo3DUtil.fromDegrees(-64.1), - Geo3DUtil.fromDegrees(18.5), - Geo3DUtil.fromDegrees(27)); + GeoBBox onRectangle = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-59), + Geo3DUtil.fromDegrees(-64.1), + Geo3DUtil.fromDegrees(18.5), + Geo3DUtil.fromDegrees(27)); rel = originalConvexPol.getRelationship(onRectangle); assertEquals(GeoArea.CONTAINS, rel); rel = onRectangle.getRelationship(originalConvexPol); @@ -650,124 +718,117 @@ public class TestSimpleGeoPolygonRelationships { assertEquals(GeoArea.OVERLAPS, rel); rel = originalComplexPol.getRelationship(onRectangle); assertEquals(GeoArea.CONTAINS, rel); - } @Test public void testGeoSimplePolygonWithComposite() { GeoShape shape = getCompositeShape(); - //POLYGON((-145.8555 -5.13, -145.8540 -5.13, -145.8540 -5.12, -145.8555 -5.12, -145.8555 -5.13)) - GeoPolygon polConvex = buildConvexGeoPolygon(-145.8555, -5.13, - -145.8540, -5.13, - -145.8540, -5.12, - -145.8555, -5.12); + // POLYGON((-145.8555 -5.13, -145.8540 -5.13, -145.8540 -5.12, -145.8555 -5.12, -145.8555 + // -5.13)) + GeoPolygon polConvex = + buildConvexGeoPolygon( + -145.8555, -5.13, -145.8540, -5.13, -145.8540, -5.12, -145.8555, -5.12); - GeoPolygon polConcave = buildConcaveGeoPolygon(-145.8555, -5.13, - -145.8540, -5.13, - -145.8540, -5.12, - -145.8555, -5.12); + GeoPolygon polConcave = + buildConcaveGeoPolygon( + -145.8555, -5.13, -145.8540, -5.13, -145.8540, -5.12, -145.8555, -5.12); int rel = polConvex.getRelationship(shape); - assertEquals(GeoArea.DISJOINT, rel); + assertEquals(GeoArea.DISJOINT, rel); rel = polConcave.getRelationship(shape); - assertEquals(GeoArea.WITHIN, rel); + assertEquals(GeoArea.WITHIN, rel); - //POLYGON((-145.8555 -5.13, -145.85 -5.13, -145.85 -5.12, -145.8555 -5.12, -145.8555 -5.13)) - polConvex = buildConvexGeoPolygon(-145.8555, -5.13, - -145.85, -5.13, - -145.85, -5.12, - -145.8555, -5.12); + // POLYGON((-145.8555 -5.13, -145.85 -5.13, -145.85 -5.12, -145.8555 -5.12, -145.8555 -5.13)) + polConvex = + buildConvexGeoPolygon(-145.8555, -5.13, -145.85, -5.13, -145.85, -5.12, -145.8555, -5.12); - polConcave = buildConcaveGeoPolygon(-145.8555, -5.13, - -145.85, -5.13, - -145.85, -5.12, - -145.8555, -5.12); + polConcave = + buildConcaveGeoPolygon(-145.8555, -5.13, -145.85, -5.13, -145.85, -5.12, -145.8555, -5.12); rel = polConvex.getRelationship(shape); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(shape); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); - //POLYGON((-146 -5.18, -145.854 -5.18, -145.854 -5.11, -146 -5.11, -146 -5.18)) - //Case overlaping on of the shapes - polConvex = buildConvexGeoPolygon(-146, -5.18, - -145.854, -5.18, - -145.854, -5.11, - -146, -5.11); + // POLYGON((-146 -5.18, -145.854 -5.18, -145.854 -5.11, -146 -5.11, -146 -5.18)) + // Case overlaping on of the shapes + polConvex = buildConvexGeoPolygon(-146, -5.18, -145.854, -5.18, -145.854, -5.11, -146, -5.11); - polConcave = buildConcaveGeoPolygon(-146, -5.18, - -145.854, -5.18, - -145.854, -5.11, - -146, -5.11); + polConcave = buildConcaveGeoPolygon(-146, -5.18, -145.854, -5.18, -145.854, -5.11, -146, -5.11); rel = polConvex.getRelationship(shape); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); rel = polConcave.getRelationship(shape); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); - //POLYGON((-145.88 -5.13, -145.87 -5.13, -145.87 -5.12, -145.88 -5.12, -145.88 -5.13)) - polConvex = buildConvexGeoPolygon(-145.88, -5.13, - -145.87, -5.13, - -145.87, -5.12, - -145.88, -5.12); + // POLYGON((-145.88 -5.13, -145.87 -5.13, -145.87 -5.12, -145.88 -5.12, -145.88 -5.13)) + polConvex = + buildConvexGeoPolygon(-145.88, -5.13, -145.87, -5.13, -145.87, -5.12, -145.88, -5.12); - polConcave = buildConcaveGeoPolygon(-145.88, -5.13, - -145.87, -5.13, - -145.87, -5.12, - -145.88, -5.12); + polConcave = + buildConcaveGeoPolygon(-145.88, -5.13, -145.87, -5.13, -145.87, -5.12, -145.88, -5.12); rel = polConvex.getRelationship(shape); - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); rel = polConcave.getRelationship(shape); - assertEquals(GeoArea.OVERLAPS, rel); + assertEquals(GeoArea.OVERLAPS, rel); } @Test - public void testDegeneratedPointIntersectShape(){ - GeoBBox bBox1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 1,0,0,1); - GeoBBox bBox2 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 1,1,1,1); + public void testDegeneratedPointIntersectShape() { + GeoBBox bBox1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 1, 0, 0, 1); + GeoBBox bBox2 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 1, 1, 1, 1); int rel = bBox1.getRelationship(bBox2); - //OVERLAPS instead of WITHIN. In this case the degenerated point lies on the edge of the shape. - //intersects() returns true for one plane of the BBox and hence method return OVERLAPS. - assertEquals(GeoArea.OVERLAPS, rel); + // OVERLAPS instead of WITHIN. In this case the degenerated point lies on the edge of the shape. + // intersects() returns true for one plane of the BBox and hence method return OVERLAPS. + assertEquals(GeoArea.OVERLAPS, rel); rel = bBox2.getRelationship(bBox1); // The degenerated point cannot compute if it is on the edge. Uses WITHIN that is true // and therefore CONTAINS - assertEquals(GeoArea.CONTAINS, rel); + assertEquals(GeoArea.CONTAINS, rel); } @Test - public void testDegeneratedPointInPole(){ - GeoBBox bBox1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI*0.5, Math.PI*0.5, 0, 0); - GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Math.PI*0.5, Math.PI); - System.out.println("bbox1 = "+bBox1+"; point = "+point); + public void testDegeneratedPointInPole() { + GeoBBox bBox1 = + GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, Math.PI * 0.5, 0, 0); + GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.5, Math.PI); + System.out.println("bbox1 = " + bBox1 + "; point = " + point); assertTrue(bBox1.isWithin(point)); } @Test - public void testDegeneratePathShape(){ + public void testDegeneratePathShape() { GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, 0, 0); GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, 0, 1); GeoPoint[] pointPath1 = new GeoPoint[] {point1, point2}; GeoPath path1 = GeoPathFactory.makeGeoPath(PlanetModel.SPHERE, 0, pointPath1); GeoPath path2 = GeoPathFactory.makeGeoPath(PlanetModel.SPHERE, 1, pointPath1); int rel = path1.getRelationship(path2); - //if an end point is inside the shape it will always return intersects - assertEquals(GeoArea.CONTAINS, rel); //should be contains? + // if an end point is inside the shape it will always return intersects + assertEquals(GeoArea.CONTAINS, rel); // should be contains? rel = path2.getRelationship(path1); assertEquals(GeoArea.WITHIN, rel); } - - private GeoPolygon buildConvexGeoPolygon(double lon1, double lat1, - double lon2, double lat2, - double lon3, double lat3, - double lon4, double lat4) { - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); + private GeoPolygon buildConvexGeoPolygon( + double lon1, + double lat1, + double lon2, + double lat2, + double lon3, + double lat3, + double lon4, + double lat4) { + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); @@ -776,14 +837,23 @@ public class TestSimpleGeoPolygonRelationships { return GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); } - private GeoPolygon buildConcaveGeoPolygon(double lon1, double lat1, - double lon2, double lat2, - double lon3, double lat3, - double lon4, double lat4) { - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); + private GeoPolygon buildConcaveGeoPolygon( + double lon1, + double lat1, + double lon2, + double lat2, + double lon3, + double lat3, + double lon4, + double lat4) { + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); @@ -792,14 +862,23 @@ public class TestSimpleGeoPolygonRelationships { return GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE, points); } - private GeoPolygon buildComplexGeoPolygon(double lon1, double lat1, - double lon2, double lat2, - double lon3, double lat3, - double lon4, double lat4) { - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); + private GeoPolygon buildComplexGeoPolygon( + double lon1, + double lat1, + double lon2, + double lat2, + double lon3, + double lat3, + double lon4, + double lat4) { + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); @@ -809,58 +888,112 @@ public class TestSimpleGeoPolygonRelationships { return GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(pd)); } - private GeoPolygon buildConvexGeoPolygonWithHole(double lon1, double lat1, - double lon2, double lat2, - double lon3, double lat3, - double lon4, double lat4, - GeoPolygon hole) { - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); + private GeoPolygon buildConvexGeoPolygonWithHole( + double lon1, + double lat1, + double lon2, + double lat2, + double lon3, + double lat3, + double lon4, + double lat4, + GeoPolygon hole) { + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); points.add(point3); points.add(point4); - //return new GeoConvexPolygon(PlanetModel.SPHERE,points, Collections.singletonList(hole)); - return GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points, Collections.singletonList(hole)); + // return new GeoConvexPolygon(PlanetModel.SPHERE,points, Collections.singletonList(hole)); + return GeoPolygonFactory.makeGeoPolygon( + PlanetModel.SPHERE, points, Collections.singletonList(hole)); } - private GeoPolygon buildConcaveGeoPolygonWithHole(double lon1, double lat1, - double lon2, double lat2, - double lon3, double lat3, - double lon4, double lat4, - GeoPolygon hole) { - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); + private GeoPolygon buildConcaveGeoPolygonWithHole( + double lon1, + double lat1, + double lon2, + double lat2, + double lon3, + double lat3, + double lon4, + double lat4, + GeoPolygon hole) { + GeoPoint point1 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat1), Geo3DUtil.fromDegrees(lon1)); + GeoPoint point2 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat2), Geo3DUtil.fromDegrees(lon2)); + GeoPoint point3 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat3), Geo3DUtil.fromDegrees(lon3)); + GeoPoint point4 = + new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(lat4), Geo3DUtil.fromDegrees(lon4)); final List points = new ArrayList<>(); points.add(point1); points.add(point2); points.add(point3); points.add(point4); - return GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE, points, Collections.singletonList(hole)); + return GeoPolygonFactory.makeGeoConcavePolygon( + PlanetModel.SPHERE, points, Collections.singletonList(hole)); } - private GeoShape getCompositeShape(){ - //MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), - //((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) - GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.17543698881), Geo3DUtil.fromDegrees(-145.790967486)); - GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.11348060995), Geo3DUtil.fromDegrees(-145.790854979)); - GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.11339421216), Geo3DUtil.fromDegrees(-145.853073512)); - GeoPoint point4 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.17535061936), Geo3DUtil.fromDegrees(-145.853192037)); + private GeoShape getCompositeShape() { + // MULTIPOLYGON(((-145.790967486 -5.17543698881, -145.790854979 -5.11348060995, -145.853073512 + // -5.11339421216, -145.853192037 -5.17535061936, -145.790967486 -5.17543698881)), + // ((-145.8563923 -5.17527125408, -145.856222168 -5.11332154814, -145.918433943 -5.11317773171, + // -145.918610092 -5.17512738429, -145.8563923 -5.17527125408))) + GeoPoint point1 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.17543698881), + Geo3DUtil.fromDegrees(-145.790967486)); + GeoPoint point2 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.11348060995), + Geo3DUtil.fromDegrees(-145.790854979)); + GeoPoint point3 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.11339421216), + Geo3DUtil.fromDegrees(-145.853073512)); + GeoPoint point4 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.17535061936), + Geo3DUtil.fromDegrees(-145.853192037)); final List points1 = new ArrayList<>(); points1.add(point1); points1.add(point2); points1.add(point3); points1.add(point4); - GeoPolygon pol1 = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE,points1); - GeoPoint point5 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.17527125408), Geo3DUtil.fromDegrees(-145.8563923)); - GeoPoint point6 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.11332154814), Geo3DUtil.fromDegrees(-145.856222168)); - GeoPoint point7 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.11317773171), Geo3DUtil.fromDegrees(-145.918433943)); - GeoPoint point8 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.17512738429), Geo3DUtil.fromDegrees(-145.918610092)); + GeoPolygon pol1 = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points1); + GeoPoint point5 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.17527125408), + Geo3DUtil.fromDegrees(-145.8563923)); + GeoPoint point6 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.11332154814), + Geo3DUtil.fromDegrees(-145.856222168)); + GeoPoint point7 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.11317773171), + Geo3DUtil.fromDegrees(-145.918433943)); + GeoPoint point8 = + new GeoPoint( + PlanetModel.SPHERE, + Geo3DUtil.fromDegrees(-5.17512738429), + Geo3DUtil.fromDegrees(-145.918610092)); final List points2 = new ArrayList<>(); points2.add(point5); points2.add(point6); diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestXYZSolid.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestXYZSolid.java index b0feebc534f..6cf813b4c5b 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestXYZSolid.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestXYZSolid.java @@ -40,7 +40,7 @@ public class TestXYZSolid extends LuceneTestCase { // Some things should be disjoint... shape = new GeoStandardCircle(PlanetModel.SPHERE, 0.0, 0.0, 0.1); assertEquals(GeoArea.DISJOINT, s.getRelationship(shape)); - // And, some things should be within... + // And, some things should be within... shape = new GeoStandardCircle(PlanetModel.SPHERE, 0.0, Math.PI, 0.1); assertEquals(GeoArea.WITHIN, s.getRelationship(shape)); // And, some things should overlap. @@ -50,7 +50,7 @@ public class TestXYZSolid extends LuceneTestCase { // Partial world should be contained by GeoWorld object... shape = new GeoWorld(PlanetModel.SPHERE); assertEquals(GeoArea.CONTAINS, s.getRelationship(shape)); - + // Something inside the world s = new StandardXYZSolid(PlanetModel.SPHERE, -0.1, 0.1, -0.1, 0.1, -0.1, 0.1); // All shapes should be disjoint @@ -58,14 +58,13 @@ public class TestXYZSolid extends LuceneTestCase { assertEquals(GeoArea.DISJOINT, s.getRelationship(shape)); shape = new GeoWorld(PlanetModel.SPHERE); assertEquals(GeoArea.DISJOINT, s.getRelationship(shape)); - } @Test public void testDegenerateRelationships() { GeoArea solid; GeoShape shape; - + // Basic test of the factory method - non-degenerate solid = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, -2.0, 2.0, -2.0, 2.0, -2.0, 2.0); // Any shape, except whole world, should be within. @@ -125,7 +124,7 @@ public class TestXYZSolid extends LuceneTestCase { assertEquals(GeoArea.OVERLAPS, solid.getRelationship(shape)); shape = new GeoStandardCircle(PlanetModel.SPHERE, -Math.PI * 0.5, 0.0, 0.1); assertEquals(GeoArea.OVERLAPS, solid.getRelationship(shape)); - + // Build a shape degenerate in (x,z), which has no points on sphere solid = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, 0.0, 0.0, -0.1, 0.1, 0.0, 0.0); // disjoint with everything? @@ -159,7 +158,7 @@ public class TestXYZSolid extends LuceneTestCase { assertEquals(GeoArea.OVERLAPS, solid.getRelationship(shape)); // MHL for y-z check - + // Build a shape that is degenerate in x, which has zero points intersecting sphere solid = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, 0.0, 0.0, -0.1, 0.1, -0.1, 0.1); // disjoint with everything? @@ -168,7 +167,8 @@ public class TestXYZSolid extends LuceneTestCase { shape = new GeoWorld(PlanetModel.SPHERE); assertEquals(GeoArea.DISJOINT, solid.getRelationship(shape)); - // Build a shape that is degenerate in x, which has zero points intersecting sphere, second variation + // Build a shape that is degenerate in x, which has zero points intersecting sphere, second + // variation solid = GeoAreaFactory.makeGeoArea(PlanetModel.SPHERE, 0.0, 0.0, -0.1, 0.1, 1.1, 1.2); // disjoint with everything? shape = new GeoStandardCircle(PlanetModel.SPHERE, 0.0, 0.0, 0.1); @@ -214,26 +214,26 @@ public class TestXYZSolid extends LuceneTestCase { // MHL for degenerate Y // MHL for degenerate Z - + } @Test - //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8457") + // @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8457") public void testLUCENE8457() { - GeoShape shape = GeoBBoxFactory.makeGeoBBox(PlanetModel.WGS84, Math.PI, 1.2487354264870392, 0.0, 3.5181789305199657E-12); - //System.out.println("shape = "+shape); + GeoShape shape = + GeoBBoxFactory.makeGeoBBox( + PlanetModel.WGS84, Math.PI, 1.2487354264870392, 0.0, 3.5181789305199657E-12); + // System.out.println("shape = "+shape); XYZBounds bounds = new XYZBounds(); shape.getBounds(bounds); XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds); - //System.out.println("solid = "+solid); + // System.out.println("solid = "+solid); GeoPoint point = new GeoPoint(PlanetModel.WGS84, 1.4812439919751819, -3.141592653589793); - //System.out.println("point="+point); - //if the point is within the shape, it must be within the solid + // System.out.println("point="+point); + // if the point is within the shape, it must be within the solid if (shape.isWithin(point)) { assertTrue(solid.isWithin(point)); } - } - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/CombineSuggestion.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/CombineSuggestion.java index 4a6717755ac..49738b3fde3 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/CombineSuggestion.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/CombineSuggestion.java @@ -16,26 +16,19 @@ */ package org.apache.lucene.search.spell; -/** - *

    A suggestion generated by combining one or more original query terms

    - */ +/** A suggestion generated by combining one or more original query terms */ public class CombineSuggestion { - /** - *

    The indexes from the passed-in array of terms used to make this word combination

    - */ + /** The indexes from the passed-in array of terms used to make this word combination */ public final int[] originalTermIndexes; - /** - *

    The word combination suggestion

    - */ + /** The word combination suggestion */ public final SuggestWord suggestion; - + /** - * Creates a new CombineSuggestion from a suggestion and - * an array of term ids (referencing the indexes to the original terms that - * form this combined suggestion) + * Creates a new CombineSuggestion from a suggestion and an array of term ids + * (referencing the indexes to the original terms that form this combined suggestion) */ - public CombineSuggestion (SuggestWord suggestion, int[] originalTermIndexes) { + public CombineSuggestion(SuggestWord suggestion, int[] originalTermIndexes) { this.suggestion = suggestion; this.originalTermIndexes = originalTermIndexes; - } + } } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/Dictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/Dictionary.java index 5814059df2e..bc3f589ba34 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/Dictionary.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/Dictionary.java @@ -15,20 +15,19 @@ * limitations under the License. */ package org.apache.lucene.search.spell; -import java.io.IOException; +import java.io.IOException; import org.apache.lucene.search.suggest.InputIterator; /** - * A simple interface representing a Dictionary. A Dictionary - * here is a list of entries, where every entry consists of - * term, weight and payload. - * + * A simple interface representing a Dictionary. A Dictionary here is a list of entries, where every + * entry consists of term, weight and payload. */ public interface Dictionary { /** * Returns an iterator over all the entries + * * @return Iterator */ InputIterator getEntryIterator() throws IOException; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java index 3800596afd8..10bb1e9d092 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java @@ -23,7 +23,6 @@ import java.util.Comparator; import java.util.HashSet; import java.util.Locale; import java.util.PriorityQueue; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Term; @@ -36,26 +35,25 @@ import org.apache.lucene.util.automaton.LevenshteinAutomata; /** * Simple automaton-based spellchecker. - *

    - * Candidates are presented directly from the term dictionary, based on - * Levenshtein distance. This is an alternative to {@link SpellChecker} - * if you are using an edit-distance-like metric such as Levenshtein - * or {@link JaroWinklerDistance}. - *

    - * A practical benefit of this spellchecker is that it requires no additional - * datastructures (neither in RAM nor on disk) to do its work. - * + * + *

    Candidates are presented directly from the term dictionary, based on Levenshtein distance. + * This is an alternative to {@link SpellChecker} if you are using an edit-distance-like metric such + * as Levenshtein or {@link JaroWinklerDistance}. + * + *

    A practical benefit of this spellchecker is that it requires no additional datastructures + * (neither in RAM nor on disk) to do its work. + * * @see LevenshteinAutomata * @see FuzzyTermsEnum - * * @lucene.experimental */ public class DirectSpellChecker { - /** The default StringDistance, Damerau-Levenshtein distance implemented internally - * via {@link LevenshteinAutomata}. - *

    - * Note: this is the fastest distance metric, because Damerau-Levenshtein is used - * to draw candidates from the term dictionary: this just re-uses the scoring. + /** + * The default StringDistance, Damerau-Levenshtein distance implemented internally via {@link + * LevenshteinAutomata}. + * + *

    Note: this is the fastest distance metric, because Damerau-Levenshtein is used to draw + * candidates from the term dictionary: this just re-uses the scoring. */ public static final StringDistance INTERNAL_LEVENSHTEIN = new LuceneLevenshteinDistance(); @@ -67,16 +65,19 @@ public class DirectSpellChecker { private int maxInspections = 5; /** minimum accuracy for a term to match */ private float accuracy = SpellChecker.DEFAULT_ACCURACY; - /** value in [0..1] (or absolute number >= 1) representing the minimum - * number of documents (of the total) where a term should appear. */ + /** + * value in [0..1] (or absolute number >= 1) representing the minimum number of documents (of + * the total) where a term should appear. + */ private float thresholdFrequency = 0f; /** minimum length of a query word to return suggestions */ private int minQueryLength = 4; /** maximum length of a query word to return suggestions */ private int maxQueryLength = Integer.MAX_VALUE; - /** value in [0..1] (or absolute number >= 1) representing the maximum - * number of documents (of the total) a query term can appear in to - * be corrected. */ + /** + * value in [0..1] (or absolute number >= 1) representing the maximum number of documents (of + * the total) a query term can appear in to be corrected. + */ private float maxQueryFrequency = 0.01f; /** true if the spellchecker should lowercase terms */ private boolean lowerCaseTerms = true; @@ -88,97 +89,86 @@ public class DirectSpellChecker { /** Creates a DirectSpellChecker with default configuration values */ public DirectSpellChecker() {} - /** Get the maximum number of Levenshtein edit-distances to draw - * candidate terms from. */ + /** Get the maximum number of Levenshtein edit-distances to draw candidate terms from. */ public int getMaxEdits() { return maxEdits; } - /** Sets the maximum number of Levenshtein edit-distances to draw - * candidate terms from. This value can be 1 or 2. The default is 2. - *

    - * Note: a large number of spelling errors occur with an edit distance - * of 1, by setting this value to 1 you can increase both performance - * and precision at the cost of recall. + /** + * Sets the maximum number of Levenshtein edit-distances to draw candidate terms from. This value + * can be 1 or 2. The default is 2. + * + *

    Note: a large number of spelling errors occur with an edit distance of 1, by setting this + * value to 1 you can increase both performance and precision at the cost of recall. */ public void setMaxEdits(int maxEdits) { if (maxEdits < 1 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) throw new UnsupportedOperationException("Invalid maxEdits"); this.maxEdits = maxEdits; } - - /** - * Get the minimal number of characters that must match exactly - */ + + /** Get the minimal number of characters that must match exactly */ public int getMinPrefix() { return minPrefix; } - + /** - * Sets the minimal number of initial characters (default: 1) - * that must match exactly. - *

    - * This can improve both performance and accuracy of results, - * as misspellings are commonly not the first character. + * Sets the minimal number of initial characters (default: 1) that must match exactly. + * + *

    This can improve both performance and accuracy of results, as misspellings are commonly not + * the first character. */ public void setMinPrefix(int minPrefix) { this.minPrefix = minPrefix; } - - /** - * Get the maximum number of top-N inspections per suggestion - */ + + /** Get the maximum number of top-N inspections per suggestion */ public int getMaxInspections() { return maxInspections; } /** * Set the maximum number of top-N inspections (default: 5) per suggestion. - *

    - * Increasing this number can improve the accuracy of results, at the cost - * of performance. + * + *

    Increasing this number can improve the accuracy of results, at the cost of performance. */ public void setMaxInspections(int maxInspections) { this.maxInspections = maxInspections; } - /** - * Get the minimal accuracy from the StringDistance for a match - */ + /** Get the minimal accuracy from the StringDistance for a match */ public float getAccuracy() { return accuracy; } /** - * Set the minimal accuracy required (default: 0.5f) from a StringDistance - * for a suggestion match. + * Set the minimal accuracy required (default: 0.5f) from a StringDistance for a suggestion match. */ public void setAccuracy(float accuracy) { this.accuracy = accuracy; } - /** - * Get the minimal threshold of documents a term must appear for a match - */ + /** Get the minimal threshold of documents a term must appear for a match */ public float getThresholdFrequency() { return thresholdFrequency; } /** * Set the minimal threshold of documents a term must appear for a match. - *

    - * This can improve quality by only suggesting high-frequency terms. Note that - * very high values might decrease performance slightly, by forcing the spellchecker - * to draw more candidates from the term dictionary, but a practical value such - * as 1 can be very useful towards improving quality. - *

    - * This can be specified as a relative percentage of documents such as 0.5f, - * or it can be specified as an absolute whole document frequency, such as 4f. - * Absolute document frequencies may not be fractional. + * + *

    This can improve quality by only suggesting high-frequency terms. Note that very high values + * might decrease performance slightly, by forcing the spellchecker to draw more candidates from + * the term dictionary, but a practical value such as 1 can be very useful towards + * improving quality. + * + *

    This can be specified as a relative percentage of documents such as 0.5f, or it can be + * specified as an absolute whole document frequency, such as 4f. Absolute document frequencies + * may not be fractional. */ public void setThresholdFrequency(float thresholdFrequency) { if (thresholdFrequency >= 1f && thresholdFrequency != (int) thresholdFrequency) - throw new IllegalArgumentException("Fractional absolute document frequencies are not allowed"); + throw new IllegalArgumentException( + "Fractional absolute document frequencies are not allowed"); this.thresholdFrequency = thresholdFrequency; } @@ -187,11 +177,10 @@ public class DirectSpellChecker { return minQueryLength; } - /** - * Set the minimum length of a query term (default: 4) needed to return suggestions. - *

    - * Very short query terms will often cause only bad suggestions with any distance - * metric. + /** + * Set the minimum length of a query term (default: 4) needed to return suggestions. + * + *

    Very short query terms will often cause only bad suggestions with any distance metric. */ public void setMinQueryLength(int minQueryLength) { if (minQueryLength > this.maxQueryLength) @@ -204,10 +193,10 @@ public class DirectSpellChecker { return maxQueryLength; } - /** - * Set the maximum length of a query term to return suggestions. - *

    - * Long queries can be expensive to process and/or trigger exceptions. + /** + * Set the maximum length of a query term to return suggestions. + * + *

    Long queries can be expensive to process and/or trigger exceptions. */ public void setMaxQueryLength(int maxQueryLength) { if (maxQueryLength < this.minQueryLength) @@ -216,28 +205,27 @@ public class DirectSpellChecker { } /** - * Get the maximum threshold of documents a query term can appear in order - * to provide suggestions. + * Get the maximum threshold of documents a query term can appear in order to provide suggestions. */ public float getMaxQueryFrequency() { return maxQueryFrequency; } /** - * Set the maximum threshold (default: 0.01f) of documents a query term can - * appear in order to provide suggestions. - *

    - * Very high-frequency terms are typically spelled correctly. Additionally, - * this can increase performance as it will do no work for the common case - * of correctly-spelled input terms. - *

    - * This can be specified as a relative percentage of documents such as 0.5f, - * or it can be specified as an absolute whole document frequency, such as 4f. - * Absolute document frequencies may not be fractional. + * Set the maximum threshold (default: 0.01f) of documents a query term can appear in order to + * provide suggestions. + * + *

    Very high-frequency terms are typically spelled correctly. Additionally, this can increase + * performance as it will do no work for the common case of correctly-spelled input terms. + * + *

    This can be specified as a relative percentage of documents such as 0.5f, or it can be + * specified as an absolute whole document frequency, such as 4f. Absolute document frequencies + * may not be fractional. */ public void setMaxQueryFrequency(float maxQueryFrequency) { if (maxQueryFrequency >= 1f && maxQueryFrequency != (int) maxQueryFrequency) - throw new IllegalArgumentException("Fractional absolute document frequencies are not allowed"); + throw new IllegalArgumentException( + "Fractional absolute document frequencies are not allowed"); this.maxQueryFrequency = maxQueryFrequency; } @@ -245,83 +233,73 @@ public class DirectSpellChecker { public boolean getLowerCaseTerms() { return lowerCaseTerms; } - - /** + + /** * True if the spellchecker should lowercase terms (default: true) - *

    - * This is a convenience method, if your index field has more complicated - * analysis (such as StandardTokenizer removing punctuation), it's probably - * better to turn this off, and instead run your query terms through your - * Analyzer first. - *

    - * If this option is not on, case differences count as an edit! + * + *

    This is a convenience method, if your index field has more complicated analysis (such as + * StandardTokenizer removing punctuation), it's probably better to turn this off, and instead run + * your query terms through your Analyzer first. + * + *

    If this option is not on, case differences count as an edit! */ public void setLowerCaseTerms(boolean lowerCaseTerms) { this.lowerCaseTerms = lowerCaseTerms; } - - /** - * Get the current comparator in use. - */ + + /** Get the current comparator in use. */ public Comparator getComparator() { return comparator; } /** - * Set the comparator for sorting suggestions. - * The default is {@link SuggestWordQueue#DEFAULT_COMPARATOR} + * Set the comparator for sorting suggestions. The default is {@link + * SuggestWordQueue#DEFAULT_COMPARATOR} */ public void setComparator(Comparator comparator) { this.comparator = comparator; } - /** - * Get the string distance metric in use. - */ + /** Get the string distance metric in use. */ public StringDistance getDistance() { return distance; } /** - * Set the string distance metric. - * The default is {@link #INTERNAL_LEVENSHTEIN} - *

    - * Note: because this spellchecker draws its candidates from the term - * dictionary using Damerau-Levenshtein, it works best with an edit-distance-like - * string metric. If you use a different metric than the default, - * you might want to consider increasing {@link #setMaxInspections(int)} - * to draw more candidates for your metric to rank. + * Set the string distance metric. The default is {@link #INTERNAL_LEVENSHTEIN} + * + *

    Note: because this spellchecker draws its candidates from the term dictionary using + * Damerau-Levenshtein, it works best with an edit-distance-like string metric. If you use a + * different metric than the default, you might want to consider increasing {@link + * #setMaxInspections(int)} to draw more candidates for your metric to rank. */ public void setDistance(StringDistance distance) { this.distance = distance; } /** - * Calls {@link #suggestSimilar(Term, int, IndexReader, SuggestMode) - * suggestSimilar(term, numSug, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX)} + * Calls {@link #suggestSimilar(Term, int, IndexReader, SuggestMode) suggestSimilar(term, numSug, + * ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX)} */ - public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir) - throws IOException { + public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir) throws IOException { return suggestSimilar(term, numSug, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); } - + /** - * Calls {@link #suggestSimilar(Term, int, IndexReader, SuggestMode, float) - * suggestSimilar(term, numSug, ir, suggestMode, this.accuracy)} - * + * Calls {@link #suggestSimilar(Term, int, IndexReader, SuggestMode, float) suggestSimilar(term, + * numSug, ir, suggestMode, this.accuracy)} */ - public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir, - SuggestMode suggestMode) throws IOException { + public SuggestWord[] suggestSimilar( + Term term, int numSug, IndexReader ir, SuggestMode suggestMode) throws IOException { return suggestSimilar(term, numSug, ir, suggestMode, this.accuracy); } - + /** * Suggest similar words. - * - *

    Unlike {@link SpellChecker}, the similarity used to fetch the most - * relevant terms is an edit distance, therefore typically a low value - * for numSug will work very well. - * + * + *

    Unlike {@link SpellChecker}, the similarity used to fetch the most relevant terms is an edit + * distance, therefore typically a low value for numSug will work very well. + * * @param term Term you want to spell check on * @param numSug the maximum number of suggested words * @param ir IndexReader to find terms from @@ -330,44 +308,46 @@ public class DirectSpellChecker { * @return sorted list of the suggested words according to the comparator * @throws IOException If there is a low-level I/O error. */ - public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir, - SuggestMode suggestMode, float accuracy) throws IOException { + public SuggestWord[] suggestSimilar( + Term term, int numSug, IndexReader ir, SuggestMode suggestMode, float accuracy) + throws IOException { final CharsRefBuilder spare = new CharsRefBuilder(); String text = term.text(); int textLength = text.codePointCount(0, text.length()); - if (textLength < minQueryLength || textLength > maxQueryLength) + if (textLength < minQueryLength || textLength > maxQueryLength) { return new SuggestWord[0]; - + } + if (lowerCaseTerms) { term = new Term(term.field(), text.toLowerCase(Locale.ROOT)); } - + int docfreq = ir.docFreq(term); - - if (suggestMode==SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX && docfreq > 0) { + + if (suggestMode == SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX && docfreq > 0) { return new SuggestWord[0]; } - + int maxDoc = ir.maxDoc(); - + if (maxQueryFrequency >= 1f && docfreq > maxQueryFrequency) { return new SuggestWord[0]; - } else if (docfreq > (int) Math.ceil(maxQueryFrequency * (float)maxDoc)) { + } else if (docfreq > (int) Math.ceil(maxQueryFrequency * (float) maxDoc)) { return new SuggestWord[0]; } - - if (suggestMode!=SuggestMode.SUGGEST_MORE_POPULAR) docfreq = 0; - + + if (suggestMode != SuggestMode.SUGGEST_MORE_POPULAR) docfreq = 0; + if (thresholdFrequency >= 1f) { docfreq = Math.max(docfreq, (int) thresholdFrequency); } else if (thresholdFrequency > 0f) { - docfreq = Math.max(docfreq, (int)(thresholdFrequency * (float)maxDoc)-1); + docfreq = Math.max(docfreq, (int) (thresholdFrequency * (float) maxDoc) - 1); } - + Collection terms = null; int inspections = numSug * maxInspections; - + // try ed=1 first, in case we get lucky terms = suggestSimilar(term, inspections, ir, docfreq, 1, accuracy, spare); if (maxEdits > 1 && terms.size() < inspections) { @@ -376,9 +356,9 @@ public class DirectSpellChecker { moreTerms.addAll(suggestSimilar(term, inspections, ir, docfreq, maxEdits, accuracy, spare)); terms = moreTerms; } - + // create the suggestword response, sort it, and trim it to size. - + SuggestWord suggestions[] = new SuggestWord[terms.size()]; int index = suggestions.length - 1; for (ScoreTerm s : terms) { @@ -392,7 +372,7 @@ public class DirectSpellChecker { suggestion.freq = s.docfreq; suggestions[index--] = suggestion; } - + ArrayUtil.timSort(suggestions, Collections.reverseOrder(comparator)); if (numSug < suggestions.length) { SuggestWord trimmed[] = new SuggestWord[numSug]; @@ -408,23 +388,33 @@ public class DirectSpellChecker { * @param term The term to suggest spelling corrections for * @param numSug The maximum number of spelling corrections * @param ir The index reader to fetch the candidate spelling corrections from - * @param docfreq The minimum document frequency a potential suggestion need to have in order to be included + * @param docfreq The minimum document frequency a potential suggestion need to have in order to + * be included * @param editDistance The maximum edit distance candidates are allowed to have - * @param accuracy The minimum accuracy a suggested spelling correction needs to have in order to be included + * @param accuracy The minimum accuracy a suggested spelling correction needs to have in order to + * be included * @param spare a chars scratch * @return a collection of spelling corrections sorted by ScoreTerm's natural order. * @throws IOException If I/O related errors occur */ - protected Collection suggestSimilar(Term term, int numSug, IndexReader ir, int docfreq, int editDistance, - float accuracy, final CharsRefBuilder spare) throws IOException { + protected Collection suggestSimilar( + Term term, + int numSug, + IndexReader ir, + int docfreq, + int editDistance, + float accuracy, + final CharsRefBuilder spare) + throws IOException { Terms terms = MultiTerms.getTerms(ir, term.field()); if (terms == null) { return Collections.emptyList(); } - FuzzyTermsEnum e = new FuzzyTermsEnum(terms, term, editDistance, Math.max(minPrefix, editDistance - 1), true); + FuzzyTermsEnum e = + new FuzzyTermsEnum(terms, term, editDistance, Math.max(minPrefix, editDistance - 1), true); final PriorityQueue stQueue = new PriorityQueue<>(); - + BytesRef queryTerm = new BytesRef(term.text()); BytesRef candidateTerm; ScoreTerm st = new ScoreTerm(); @@ -435,19 +425,19 @@ public class DirectSpellChecker { if (stQueue.size() >= numSug && score <= stQueue.peek().boost) { continue; } - + // ignore exact match of the same term if (queryTerm.bytesEquals(candidateTerm)) { continue; } - + int df = e.docFreq(); - + // check docFreq if required if (df <= docfreq) { continue; } - + final String termAsString; if (distance == INTERNAL_LEVENSHTEIN) { // delay creating strings until the end @@ -457,11 +447,11 @@ public class DirectSpellChecker { termAsString = spare.toString(); score = distance.getDistance(term.text(), termAsString); } - + if (score < accuracy) { continue; } - + // add new entry in PQ st.term = BytesRef.deepCopyOf(candidateTerm); st.boost = score; @@ -471,56 +461,44 @@ public class DirectSpellChecker { stQueue.offer(st); // possibly drop entries from queue st = (stQueue.size() > numSug) ? stQueue.poll() : new ScoreTerm(); - e.setMaxNonCompetitiveBoost((stQueue.size() >= numSug) ? stQueue.peek().boost : Float.NEGATIVE_INFINITY); + e.setMaxNonCompetitiveBoost( + (stQueue.size() >= numSug) ? stQueue.peek().boost : Float.NEGATIVE_INFINITY); } - + return stQueue; } - /** - * Holds a spelling correction for internal usage inside {@link DirectSpellChecker}. - */ + /** Holds a spelling correction for internal usage inside {@link DirectSpellChecker}. */ protected static class ScoreTerm implements Comparable { - /** - * The actual spellcheck correction. - */ + /** The actual spellcheck correction. */ public BytesRef term; - /** - * The boost representing the similarity from the FuzzyTermsEnum (internal similarity score) - */ + /** The boost representing the similarity from the FuzzyTermsEnum (internal similarity score) */ public float boost; - /** - * The df of the spellcheck correction. - */ + /** The df of the spellcheck correction. */ public int docfreq; - /** - * The spellcheck correction represented as string, can be null. - */ + /** The spellcheck correction represented as string, can be null. */ public String termAsString; - /** - * The similarity score. - */ + /** The similarity score. */ public float score; - /** - * Constructor. - */ - public ScoreTerm() { - } + /** Constructor. */ + public ScoreTerm() {} @Override public int compareTo(ScoreTerm other) { - if (term.bytesEquals(other.term)) + if (term.bytesEquals(other.term)) { return 0; // consistent with equals - if (this.boost == other.boost) + } + if (this.boost == other.boost) { return other.term.compareTo(this.term); - else + } else { return Float.compare(this.boost, other.boost); + } } @Override diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java index 3be681632ec..7e491ff924a 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java @@ -18,24 +18,22 @@ package org.apache.lucene.search.spell; import java.io.IOException; import java.util.Set; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiTerms; -import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.suggest.InputIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; /** - * HighFrequencyDictionary: terms taken from the given field - * of a Lucene index, which appear in a number of documents - * above a given threshold. + * HighFrequencyDictionary: terms taken from the given field of a Lucene index, which appear in a + * number of documents above a given threshold. * - * Threshold is a value in [0..1] representing the minimum - * number of documents (of the total) where a term should appear. - * - * Based on LuceneDictionary. + *

    Threshold is a value in [0..1] representing the minimum number of documents (of the total) + * where a term should appear. + * + *

    Based on LuceneDictionary. */ public class HighFrequencyDictionary implements Dictionary { private IndexReader reader; @@ -43,11 +41,10 @@ public class HighFrequencyDictionary implements Dictionary { private float thresh; /** - * Creates a new Dictionary, pulling source terms from - * the specified field in the provided reader. - *

    - * Terms appearing in less than thresh percentage of documents - * will be excluded. + * Creates a new Dictionary, pulling source terms from the specified field in the + * provided reader. + * + *

    Terms appearing in less than thresh percentage of documents will be excluded. */ public HighFrequencyDictionary(IndexReader reader, String field, float thresh) { this.reader = reader; @@ -73,13 +70,13 @@ public class HighFrequencyDictionary implements Dictionary { } else { termsEnum = null; } - minNumDocs = (int)(thresh * (float)reader.numDocs()); + minNumDocs = (int) (thresh * (float) reader.numDocs()); } private boolean isFrequent(int freq) { return freq >= minNumDocs; } - + @Override public long weight() { return freq; @@ -89,7 +86,7 @@ public class HighFrequencyDictionary implements Dictionary { public BytesRef next() throws IOException { if (termsEnum != null) { BytesRef next; - while((next = termsEnum.next()) != null) { + while ((next = termsEnum.next()) != null) { if (isFrequent(termsEnum.docFreq())) { freq = termsEnum.docFreq(); spare.copyBytes(next); @@ -97,7 +94,7 @@ public class HighFrequencyDictionary implements Dictionary { } } } - return null; + return null; } @Override diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java index 7f5d1a245f8..ad4a297dab7 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java @@ -20,16 +20,19 @@ import java.util.Arrays; /** * Similarity measure for short strings such as person names. + * *

    - * @see http://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance + * + * @see http://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance */ public class JaroWinklerDistance implements StringDistance { private float threshold = 0.7f; - + /** - * Creates a new distance metric with the default threshold - * for the Jaro Winkler bonus (0.7) + * Creates a new distance metric with the default threshold for the Jaro Winkler bonus (0.7) + * * @see #setThreshold(float) */ public JaroWinklerDistance() {} @@ -50,8 +53,9 @@ public class JaroWinklerDistance implements StringDistance { int matches = 0; for (int mi = 0; mi < min.length(); mi++) { char c1 = min.charAt(mi); - for (int xi = Math.max(mi - range, 0), xn = Math.min(mi + range + 1, max - .length()); xi < xn; xi++) { + for (int xi = Math.max(mi - range, 0), xn = Math.min(mi + range + 1, max.length()); + xi < xn; + xi++) { if (!matchFlags[xi] && c1 == max.charAt(xi)) { matchIndexes[mi] = xi; matchFlags[xi] = true; @@ -88,7 +92,7 @@ public class JaroWinklerDistance implements StringDistance { break; } } - return new int[] { matches, transpositions / 2, prefix, max.length() }; + return new int[] {matches, transpositions / 2, prefix, max.length()}; } @Override @@ -99,14 +103,14 @@ public class JaroWinklerDistance implements StringDistance { return 0f; } float j = ((m / s1.length() + m / s2.length() + (m - mtp[1]) / m)) / 3; - float jw = j < getThreshold() ? j : j + Math.min(0.1f, 1f / mtp[3]) * mtp[2] - * (1 - j); + float jw = j < getThreshold() ? j : j + Math.min(0.1f, 1f / mtp[3]) * mtp[2] * (1 - j); return jw; } /** - * Sets the threshold used to determine when Winkler bonus should be used. - * Set to a negative value to get the Jaro distance. + * Sets the threshold used to determine when Winkler bonus should be used. Set to a negative value + * to get the Jaro distance. + * * @param threshold the new value of the threshold */ public void setThreshold(float threshold) { @@ -114,8 +118,9 @@ public class JaroWinklerDistance implements StringDistance { } /** - * Returns the current value of the threshold used for adding the Winkler bonus. - * The default value is 0.7. + * Returns the current value of the threshold used for adding the Winkler bonus. The default value + * is 0.7. + * * @return the current value of the threshold */ public float getThreshold() { @@ -131,15 +136,13 @@ public class JaroWinklerDistance implements StringDistance { public boolean equals(Object obj) { if (this == obj) return true; if (null == obj || getClass() != obj.getClass()) return false; - - JaroWinklerDistance o = (JaroWinklerDistance)obj; - return (Float.floatToIntBits(o.threshold) - == Float.floatToIntBits(this.threshold)); + + JaroWinklerDistance o = (JaroWinklerDistance) obj; + return (Float.floatToIntBits(o.threshold) == Float.floatToIntBits(this.threshold)); } @Override public String toString() { return "jarowinkler(" + threshold + ")"; } - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/LevenshteinDistance.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/LevenshteinDistance.java index c42aeaf7bec..94e63882acb 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/LevenshteinDistance.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/LevenshteinDistance.java @@ -16,96 +16,91 @@ */ package org.apache.lucene.search.spell; -/** - * Levenshtein edit distance class. - */ +/** Levenshtein edit distance class. */ public final class LevenshteinDistance implements StringDistance { - /** - * Optimized to run a bit faster than the static getDistance(). - * In one benchmark times were 5.3sec using ctr vs 8.5sec w/ static method, thus 37% faster. - */ - public LevenshteinDistance () { + /** + * Optimized to run a bit faster than the static getDistance(). In one benchmark times were 5.3sec + * using ctr vs 8.5sec w/ static method, thus 37% faster. + */ + public LevenshteinDistance() {} + + // ***************************** + // Compute Levenshtein distance: see + // org.apache.commons.lang.StringUtils#getLevenshteinDistance(String, String) + // ***************************** + @Override + public float getDistance(String target, String other) { + char[] sa; + int n; + int p[]; // 'previous' cost array, horizontally + int d[]; // cost array, horizontally + int _d[]; // placeholder to assist in swapping p and d + + /* + The difference between this impl. and the previous is that, rather + than creating and retaining a matrix of size s.length()+1 by t.length()+1, + we maintain two single-dimensional arrays of length s.length()+1. The first, d, + is the 'current working' distance array that maintains the newest distance cost + counts as we iterate through the characters of String s. Each time we increment + the index of String t we are comparing, d is copied to p, the second int[]. Doing so + allows us to retain the previous cost counts as required by the algorithm (taking + the minimum of the cost count to the left, up one, and diagonally up and to the left + of the current cost count being calculated). (Note that the arrays aren't really + copied anymore, just switched...this is clearly much better than cloning an array + or doing a System.arraycopy() each time through the outer loop.) + + Effectively, the difference between the two implementations is this one does not + cause an out of memory condition when calculating the LD over two very large strings. + */ + + sa = target.toCharArray(); + n = sa.length; + p = new int[n + 1]; + d = new int[n + 1]; + + final int m = other.length(); + if (n == 0 || m == 0) { + if (n == m) { + return 1; + } else { + return 0; + } } + // indexes into strings s and t + int i; // iterates through s + int j; // iterates through t - //***************************** - // Compute Levenshtein distance: see org.apache.commons.lang.StringUtils#getLevenshteinDistance(String, String) - //***************************** - @Override - public float getDistance (String target, String other) { - char[] sa; - int n; - int p[]; //'previous' cost array, horizontally - int d[]; // cost array, horizontally - int _d[]; //placeholder to assist in swapping p and d - - /* - The difference between this impl. and the previous is that, rather - than creating and retaining a matrix of size s.length()+1 by t.length()+1, - we maintain two single-dimensional arrays of length s.length()+1. The first, d, - is the 'current working' distance array that maintains the newest distance cost - counts as we iterate through the characters of String s. Each time we increment - the index of String t we are comparing, d is copied to p, the second int[]. Doing so - allows us to retain the previous cost counts as required by the algorithm (taking - the minimum of the cost count to the left, up one, and diagonally up and to the left - of the current cost count being calculated). (Note that the arrays aren't really - copied anymore, just switched...this is clearly much better than cloning an array - or doing a System.arraycopy() each time through the outer loop.) + char t_j; // jth character of t - Effectively, the difference between the two implementations is this one does not - cause an out of memory condition when calculating the LD over two very large strings. - */ + int cost; // cost - sa = target.toCharArray(); - n = sa.length; - p = new int[n+1]; - d = new int[n+1]; - - final int m = other.length(); - if (n == 0 || m == 0) { - if (n == m) { - return 1; - } - else { - return 0; - } - } - - - // indexes into strings s and t - int i; // iterates through s - int j; // iterates through t - - char t_j; // jth character of t - - int cost; // cost - - for (i = 0; i<=n; i++) { - p[i] = i; - } - - for (j = 1; j<=m; j++) { - t_j = other.charAt(j-1); - d[0] = j; - - for (i=1; i<=n; i++) { - cost = sa[i-1]==t_j ? 0 : 1; - // minimum of cell to the left+1, to the top+1, diagonally left and up +cost - d[i] = Math.min(Math.min(d[i-1]+1, p[i]+1), p[i-1]+cost); - } - - // copy current distance counts to 'previous row' distance counts - _d = p; - p = d; - d = _d; - } - - // our last action in the above loop was to switch d and p, so p now - // actually has the most recent cost counts - return 1.0f - ((float) p[n] / Math.max(other.length(), sa.length)); + for (i = 0; i <= n; i++) { + p[i] = i; } + for (j = 1; j <= m; j++) { + t_j = other.charAt(j - 1); + d[0] = j; + + for (i = 1; i <= n; i++) { + cost = sa[i - 1] == t_j ? 0 : 1; + // minimum of cell to the left+1, to the top+1, diagonally left and up +cost + d[i] = Math.min(Math.min(d[i - 1] + 1, p[i] + 1), p[i - 1] + cost); + } + + // copy current distance counts to 'previous row' distance counts + _d = p; + p = d; + d = _d; + } + + // our last action in the above loop was to switch d and p, so p now + // actually has the most recent cost counts + return 1.0f - ((float) p[n] / Math.max(other.length(), sa.length)); + } + @Override public int hashCode() { return 163 * getClass().hashCode(); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java index 6da863d5a4d..8e158610382 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java @@ -16,24 +16,20 @@ */ package org.apache.lucene.search.spell; +import java.io.*; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiTerms; -import org.apache.lucene.search.suggest.InputIterator; import org.apache.lucene.index.Terms; +import org.apache.lucene.search.suggest.InputIterator; -import java.io.*; - -/** - * Lucene Dictionary: terms taken from the given field - * of a Lucene index. - */ +/** Lucene Dictionary: terms taken from the given field of a Lucene index. */ public class LuceneDictionary implements Dictionary { private IndexReader reader; private String field; /** - * Creates a new Dictionary, pulling source terms from - * the specified field in the provided reader + * Creates a new Dictionary, pulling source terms from the specified field in the + * provided reader */ public LuceneDictionary(IndexReader reader, String field) { this.reader = reader; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneLevenshteinDistance.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneLevenshteinDistance.java index 16ff24dfb01..bd77de71875 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneLevenshteinDistance.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneLevenshteinDistance.java @@ -19,29 +19,25 @@ package org.apache.lucene.search.spell; import org.apache.lucene.util.IntsRef; /** - * Damerau-Levenshtein (optimal string alignment) implemented in a consistent - * way as Lucene's FuzzyTermsEnum with the transpositions option enabled. - * - * Notes: - *

      - *
    • This metric treats full unicode codepoints as characters - *
    • This metric scales raw edit distances into a floating point score - * based upon the shortest of the two terms - *
    • Transpositions of two adjacent codepoints are treated as primitive - * edits. - *
    • Edits are applied in parallel: for example, "ab" and "bca" have - * distance 3. - *
    - * - * NOTE: this class is not particularly efficient. It is only intended - * for merging results from multiple DirectSpellCheckers. + * Damerau-Levenshtein (optimal string alignment) implemented in a consistent way as Lucene's + * FuzzyTermsEnum with the transpositions option enabled. + * + *

    Notes: + * + *

      + *
    • This metric treats full unicode codepoints as characters + *
    • This metric scales raw edit distances into a floating point score based upon the shortest + * of the two terms + *
    • Transpositions of two adjacent codepoints are treated as primitive edits. + *
    • Edits are applied in parallel: for example, "ab" and "bca" have distance 3. + *
    + * + * NOTE: this class is not particularly efficient. It is only intended for merging results from + * multiple DirectSpellCheckers. */ public final class LuceneLevenshteinDistance implements StringDistance { - /** - * Creates a new comparator, mimicing the behavior of Lucene's internal - * edit distance. - */ + /** Creates a new comparator, mimicing the behavior of Lucene's internal edit distance. */ public LuceneLevenshteinDistance() {} @Override @@ -51,10 +47,10 @@ public final class LuceneLevenshteinDistance implements StringDistance { int n; int d[][]; // cost array - // NOTE: if we cared, we could 3*m space instead of m*n space, similar to - // what LevenshteinDistance does, except cycling thru a ring of three - // horizontal cost arrays... but this comparator is never actually used by - // DirectSpellChecker, it's only used for merging results from multiple shards + // NOTE: if we cared, we could 3*m space instead of m*n space, similar to + // what LevenshteinDistance does, except cycling thru a ring of three + // horizontal cost arrays... but this comparator is never actually used by + // DirectSpellChecker, it's only used for merging results from multiple shards // in "distributed spellcheck", and it's inefficient in other ways too... // cheaper to do this up front once @@ -62,13 +58,12 @@ public final class LuceneLevenshteinDistance implements StringDistance { otherPoints = toIntsRef(other); n = targetPoints.length; final int m = otherPoints.length; - d = new int[n+1][m+1]; + d = new int[n + 1][m + 1]; if (n == 0 || m == 0) { if (n == m) { return 0; - } - else { + } else { return Math.max(n, m); } } @@ -81,24 +76,27 @@ public final class LuceneLevenshteinDistance implements StringDistance { int cost; // cost - for (i = 0; i<=n; i++) { + for (i = 0; i <= n; i++) { d[i][0] = i; } - for (j = 0; j<=m; j++) { + for (j = 0; j <= m; j++) { d[0][j] = j; } - for (j = 1; j<=m; j++) { - t_j = otherPoints.ints[j-1]; + for (j = 1; j <= m; j++) { + t_j = otherPoints.ints[j - 1]; - for (i=1; i<=n; i++) { - cost = targetPoints.ints[i-1]==t_j ? 0 : 1; + for (i = 1; i <= n; i++) { + cost = targetPoints.ints[i - 1] == t_j ? 0 : 1; // minimum of cell to the left+1, to the top+1, diagonally left and up +cost - d[i][j] = Math.min(Math.min(d[i-1][j]+1, d[i][j-1]+1), d[i-1][j-1]+cost); + d[i][j] = Math.min(Math.min(d[i - 1][j] + 1, d[i][j - 1] + 1), d[i - 1][j - 1] + cost); // transposition - if (i > 1 && j > 1 && targetPoints.ints[i-1] == otherPoints.ints[j-2] && targetPoints.ints[i-2] == otherPoints.ints[j-1]) { - d[i][j] = Math.min(d[i][j], d[i-2][j-2] + cost); + if (i > 1 + && j > 1 + && targetPoints.ints[i - 1] == otherPoints.ints[j - 2] + && targetPoints.ints[i - 2] == otherPoints.ints[j - 1]) { + d[i][j] = Math.min(d[i][j], d[i - 2][j - 2] + cost); } } } @@ -127,5 +125,4 @@ public final class LuceneLevenshteinDistance implements StringDistance { // constant hashCode since all instances of this class are equal() return 6; } - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/NGramDistance.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/NGramDistance.java index 3625570f137..98559c47984 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/NGramDistance.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/NGramDistance.java @@ -17,123 +17,115 @@ package org.apache.lucene.search.spell; /** - * N-Gram version of edit distance based on paper by Grzegorz Kondrak, - * "N-gram similarity and distance". Proceedings of the Twelfth International - * Conference on String Processing and Information Retrieval (SPIRE 2005), pp. 115-126, - * Buenos Aires, Argentina, November 2005. + * N-Gram version of edit distance based on paper by Grzegorz Kondrak, "N-gram similarity and + * distance". Proceedings of the Twelfth International Conference on String Processing and + * Information Retrieval (SPIRE 2005), pp. 115-126, Buenos Aires, Argentina, November 2005. * http://www.cs.ualberta.ca/~kondrak/papers/spire05.pdf - * - * This implementation uses the position-based optimization to compute partial - * matches of n-gram sub-strings and adds a null-character prefix of size n-1 - * so that the first character is contained in the same number of n-grams as - * a middle character. Null-character prefix matches are discounted so that - * strings with no matching characters will return a distance of 0. - * + * + *

    This implementation uses the position-based optimization to compute partial matches of n-gram + * sub-strings and adds a null-character prefix of size n-1 so that the first character is contained + * in the same number of n-grams as a middle character. Null-character prefix matches are discounted + * so that strings with no matching characters will return a distance of 0. */ public class NGramDistance implements StringDistance { private int n; - + /** * Creates an N-Gram distance measure using n-grams of the specified size. + * * @param size The size of the n-gram to be used to compute the string distance. */ public NGramDistance(int size) { this.n = size; } - - /** - * Creates an N-Gram distance measure using n-grams of size 2. - */ + + /** Creates an N-Gram distance measure using n-grams of size 2. */ public NGramDistance() { this(2); } - + @Override public float getDistance(String source, String target) { final int sl = source.length(); final int tl = target.length(); - + if (sl == 0 || tl == 0) { if (sl == tl) { return 1; - } - else { + } else { return 0; } } int cost = 0; if (sl < n || tl < n) { - for (int i=0,ni=Math.min(sl,tl);iFormat allowed: 1 word per line:
    * word1
    * word2
    @@ -45,8 +43,8 @@ public class PlainTextDictionary implements Dictionary { /** * Creates a dictionary based on a Path. - *

    - * NOTE: content is treated as UTF-8 + * + *

    NOTE: content is treated as UTF-8 */ public PlainTextDictionary(Path path) throws IOException { in = Files.newBufferedReader(path, StandardCharsets.UTF_8); @@ -54,16 +52,14 @@ public class PlainTextDictionary implements Dictionary { /** * Creates a dictionary based on an inputstream. - *

    - * NOTE: content is treated as UTF-8 + * + *

    NOTE: content is treated as UTF-8 */ public PlainTextDictionary(InputStream dictFile) { in = new BufferedReader(IOUtils.getDecodingReader(dictFile, StandardCharsets.UTF_8)); } - /** - * Creates a dictionary based on a reader. - */ + /** Creates a dictionary based on a reader. */ public PlainTextDictionary(Reader reader) { in = new BufferedReader(reader); } @@ -76,6 +72,7 @@ public class PlainTextDictionary implements Dictionary { final class FileIterator implements BytesRefIterator { private boolean done = false; private final BytesRefBuilder spare = new BytesRefBuilder(); + @Override public BytesRef next() throws IOException { if (done) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java index a3b103d6082..1aee0ab8174 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.List; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -29,8 +28,8 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -48,13 +47,11 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; /** - *

    - * Spell Checker class (Main class).
    - * (initially inspired by the David Spencer code). - *

    + * Spell Checker class (Main class).
    + * (initially inspired by the David Spencer code). * *

    Example Usage: - * + * *

      *  SpellChecker spellchecker = new SpellChecker(spellIndexDirectory);
      *  // To index a field of a user index:
    @@ -63,30 +60,20 @@ import org.apache.lucene.util.BytesRefIterator;
      *  spellchecker.indexDictionary(new PlainTextDictionary(new File("myfile.txt")));
      *  String[] suggestions = spellchecker.suggestSimilar("misspelt", 5);
      * 
    - * - * */ public class SpellChecker implements java.io.Closeable { - /** - * The default minimum score to use, if not specified by calling {@link #setAccuracy(float)} . - */ + /** The default minimum score to use, if not specified by calling {@link #setAccuracy(float)} . */ public static final float DEFAULT_ACCURACY = 0.5f; - /** - * Field name for each word in the ngram index. - */ + /** Field name for each word in the ngram index. */ public static final String F_WORD = "word"; - /** - * the spell index - */ + /** the spell index */ // don't modify the directory directly - see #swapSearcher() // TODO: why is this package private? Directory spellIndex; - /** - * Boost value for start and end grams - */ + /** Boost value for start and end grams */ private float bStart = 2.0f; private float bEnd = 1.0f; @@ -115,61 +102,61 @@ public class SpellChecker implements java.io.Closeable { private Comparator comparator; /** - * Use the given directory as a spell checker index. The directory - * is created if it doesn't exist yet. + * Use the given directory as a spell checker index. The directory is created if it doesn't exist + * yet. + * * @param spellIndex the spell index directory - * @param sd the {@link StringDistance} measurement to use + * @param sd the {@link StringDistance} measurement to use * @throws IOException if Spellchecker can not open the directory */ public SpellChecker(Directory spellIndex, StringDistance sd) throws IOException { this(spellIndex, sd, SuggestWordQueue.DEFAULT_COMPARATOR); } /** - * Use the given directory as a spell checker index with a - * {@link LevenshteinDistance} as the default {@link StringDistance}. The - * directory is created if it doesn't exist yet. - * - * @param spellIndex - * the spell index directory - * @throws IOException - * if spellchecker can not open the directory + * Use the given directory as a spell checker index with a {@link LevenshteinDistance} as the + * default {@link StringDistance}. The directory is created if it doesn't exist yet. + * + * @param spellIndex the spell index directory + * @throws IOException if spellchecker can not open the directory */ public SpellChecker(Directory spellIndex) throws IOException { this(spellIndex, new LevenshteinDistance()); } /** - * Use the given directory as a spell checker index with the given {@link org.apache.lucene.search.spell.StringDistance} measure - * and the given {@link java.util.Comparator} for sorting the results. + * Use the given directory as a spell checker index with the given {@link + * org.apache.lucene.search.spell.StringDistance} measure and the given {@link + * java.util.Comparator} for sorting the results. + * * @param spellIndex The spelling index * @param sd The distance * @param comparator The comparator * @throws IOException if there is a problem opening the index */ - public SpellChecker(Directory spellIndex, StringDistance sd, Comparator comparator) throws IOException { + public SpellChecker(Directory spellIndex, StringDistance sd, Comparator comparator) + throws IOException { setSpellIndex(spellIndex); setStringDistance(sd); this.comparator = comparator; } - + /** - * Use a different index as the spell checker index or re-open - * the existing index if spellIndex is the same value - * as given in the constructor. + * Use a different index as the spell checker index or re-open the existing index if + * spellIndex is the same value as given in the constructor. + * * @param spellIndexDir the spell directory to use * @throws AlreadyClosedException if the Spellchecker is already closed - * @throws IOException if spellchecker can not open the directory + * @throws IOException if spellchecker can not open the directory */ // TODO: we should make this final as it is called in the constructor public void setSpellIndex(Directory spellIndexDir) throws IOException { // this could be the same directory as the current spellIndex - // modifications to the directory should be synchronized + // modifications to the directory should be synchronized synchronized (modifyCurrentIndexLock) { ensureOpen(); if (!DirectoryReader.indexExists(spellIndexDir)) { - IndexWriter writer = new IndexWriter(spellIndexDir, - new IndexWriterConfig(null)); - writer.close(); + IndexWriter writer = new IndexWriter(spellIndexDir, new IndexWriterConfig(null)); + writer.close(); } swapSearcher(spellIndexDir); } @@ -177,6 +164,7 @@ public class SpellChecker implements java.io.Closeable { /** * Sets the {@link java.util.Comparator} for the {@link SuggestWordQueue}. + * * @param comparator the comparator */ public void setComparator(Comparator comparator) { @@ -185,6 +173,7 @@ public class SpellChecker implements java.io.Closeable { /** * Gets the comparator in use for ranking suggestions. + * * @see #setComparator(Comparator) */ public Comparator getComparator() { @@ -192,21 +181,17 @@ public class SpellChecker implements java.io.Closeable { } /** - * Sets the {@link StringDistance} implementation for this - * {@link SpellChecker} instance. - * - * @param sd the {@link StringDistance} implementation for this - * {@link SpellChecker} instance + * Sets the {@link StringDistance} implementation for this {@link SpellChecker} instance. + * + * @param sd the {@link StringDistance} implementation for this {@link SpellChecker} instance */ public void setStringDistance(StringDistance sd) { this.sd = sd; } /** - * Returns the {@link StringDistance} instance used by this - * {@link SpellChecker} instance. - * - * @return the {@link StringDistance} instance used by this - * {@link SpellChecker} instance. + * Returns the {@link StringDistance} instance used by this {@link SpellChecker} instance. + * + * @return the {@link StringDistance} instance used by this {@link SpellChecker} instance. */ public StringDistance getStringDistance() { return sd; @@ -214,6 +199,7 @@ public class SpellChecker implements java.io.Closeable { /** * Sets the accuracy 0 < minScore < 1; default {@link #DEFAULT_ACCURACY} + * * @param acc The new accuracy */ public void setAccuracy(float acc) { @@ -221,8 +207,10 @@ public class SpellChecker implements java.io.Closeable { } /** - * The accuracy (minimum score) to be used, unless overridden in {@link #suggestSimilar(String, int, IndexReader, String, SuggestMode, float)}, to - * decide whether a suggestion is included or not. + * The accuracy (minimum score) to be used, unless overridden in {@link #suggestSimilar(String, + * int, IndexReader, String, SuggestMode, float)}, to decide whether a suggestion is included or + * not. + * * @return The current accuracy setting */ public float getAccuracy() { @@ -231,22 +219,21 @@ public class SpellChecker implements java.io.Closeable { /** * Suggest similar words. - * - *

    As the Lucene similarity that is used to fetch the most relevant n-grammed terms - * is not the same as the edit distance strategy used to calculate the best - * matching spell-checked word from the hits that Lucene found, one usually has - * to retrieve a couple of numSug's in order to get the true best match. * - *

    I.e. if numSug == 1, don't count on that suggestion being the best one. - * Thus, you should set this value to at least 5 for a good suggestion. + *

    As the Lucene similarity that is used to fetch the most relevant n-grammed terms is not the + * same as the edit distance strategy used to calculate the best matching spell-checked word from + * the hits that Lucene found, one usually has to retrieve a couple of numSug's in order to get + * the true best match. + * + *

    I.e. if numSug == 1, don't count on that suggestion being the best one. Thus, you should set + * this value to at least 5 for a good suggestion. * * @param word the word you want a spell check done on * @param numSug the number of suggested words * @throws IOException if the underlying index throws an {@link IOException} * @throws AlreadyClosedException if the Spellchecker is already closed * @return String[] - * - * @see #suggestSimilar(String, int, IndexReader, String, SuggestMode, float) + * @see #suggestSimilar(String, int, IndexReader, String, SuggestMode, float) */ public String[] suggestSimilar(String word, int numSug) throws IOException { return this.suggestSimilar(word, numSug, null, null, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); @@ -255,65 +242,72 @@ public class SpellChecker implements java.io.Closeable { /** * Suggest similar words. * - *

    As the Lucene similarity that is used to fetch the most relevant n-grammed terms - * is not the same as the edit distance strategy used to calculate the best - * matching spell-checked word from the hits that Lucene found, one usually has - * to retrieve a couple of numSug's in order to get the true best match. + *

    As the Lucene similarity that is used to fetch the most relevant n-grammed terms is not the + * same as the edit distance strategy used to calculate the best matching spell-checked word from + * the hits that Lucene found, one usually has to retrieve a couple of numSug's in order to get + * the true best match. * - *

    I.e. if numSug == 1, don't count on that suggestion being the best one. - * Thus, you should set this value to at least 5 for a good suggestion. + *

    I.e. if numSug == 1, don't count on that suggestion being the best one. Thus, you should set + * this value to at least 5 for a good suggestion. * * @param word the word you want a spell check done on * @param numSug the number of suggested words - * @param accuracy The minimum score a suggestion must have in order to qualify for inclusion in the results + * @param accuracy The minimum score a suggestion must have in order to qualify for inclusion in + * the results * @throws IOException if the underlying index throws an {@link IOException} * @throws AlreadyClosedException if the Spellchecker is already closed * @return String[] - * * @see #suggestSimilar(String, int, IndexReader, String, SuggestMode, float) */ public String[] suggestSimilar(String word, int numSug, float accuracy) throws IOException { - return this.suggestSimilar(word, numSug, null, null, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, accuracy); + return this.suggestSimilar( + word, numSug, null, null, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, accuracy); } /** - * Calls {@link #suggestSimilar(String, int, IndexReader, String, SuggestMode, float) - * suggestSimilar(word, numSug, ir, suggestMode, field, this.accuracy)} - * + * Calls {@link #suggestSimilar(String, int, IndexReader, String, SuggestMode, float) + * suggestSimilar(word, numSug, ir, suggestMode, field, this.accuracy)} */ - public String[] suggestSimilar(String word, int numSug, IndexReader ir, - String field, SuggestMode suggestMode) throws IOException { + public String[] suggestSimilar( + String word, int numSug, IndexReader ir, String field, SuggestMode suggestMode) + throws IOException { return suggestSimilar(word, numSug, ir, field, suggestMode, this.accuracy); } - + /** * Suggest similar words (optionally restricted to a field of an index). * - *

    As the Lucene similarity that is used to fetch the most relevant n-grammed terms - * is not the same as the edit distance strategy used to calculate the best - * matching spell-checked word from the hits that Lucene found, one usually has - * to retrieve a couple of numSug's in order to get the true best match. + *

    As the Lucene similarity that is used to fetch the most relevant n-grammed terms is not the + * same as the edit distance strategy used to calculate the best matching spell-checked word from + * the hits that Lucene found, one usually has to retrieve a couple of numSug's in order to get + * the true best match. * - *

    I.e. if numSug == 1, don't count on that suggestion being the best one. - * Thus, you should set this value to at least 5 for a good suggestion. + *

    I.e. if numSug == 1, don't count on that suggestion being the best one. Thus, you should set + * this value to at least 5 for a good suggestion. * * @param word the word you want a spell check done on * @param numSug the number of suggested words * @param ir the indexReader of the user index (can be null see field param) - * @param field the field of the user index: if field is not null, the suggested - * words are restricted to the words present in this field. - * @param suggestMode - * (NOTE: if indexReader==null and/or field==null, then this is overridden with SuggestMode.SUGGEST_ALWAYS) - * @param accuracy The minimum score a suggestion must have in order to qualify for inclusion in the results + * @param field the field of the user index: if field is not null, the suggested words are + * restricted to the words present in this field. + * @param suggestMode (NOTE: if indexReader==null and/or field==null, then this is overridden with + * SuggestMode.SUGGEST_ALWAYS) + * @param accuracy The minimum score a suggestion must have in order to qualify for inclusion in + * the results * @throws IOException if the underlying index throws an {@link IOException} * @throws AlreadyClosedException if the Spellchecker is already closed - * @return String[] the sorted list of the suggest words with these 2 criteria: - * first criteria: the edit distance, second criteria (only if restricted mode): the popularity - * of the suggest words in the field of the user index - * + * @return String[] the sorted list of the suggest words with these 2 criteria: first criteria: + * the edit distance, second criteria (only if restricted mode): the popularity of the suggest + * words in the field of the user index */ - public String[] suggestSimilar(String word, int numSug, IndexReader ir, - String field, SuggestMode suggestMode, float accuracy) throws IOException { + public String[] suggestSimilar( + String word, + int numSug, + IndexReader ir, + String field, + SuggestMode suggestMode, + float accuracy) + throws IOException { // obtainSearcher calls ensureOpen final IndexSearcher indexSearcher = obtainSearcher(); try { @@ -328,10 +322,11 @@ public class SpellChecker implements java.io.Closeable { final int lengthWord = word.length(); final int freq = (ir != null && field != null) ? ir.docFreq(new Term(field, word)) : 0; - final int goalFreq = suggestMode==SuggestMode.SUGGEST_MORE_POPULAR ? freq : 0; - // if the word exists in the real index and we don't care for word frequency, return the word itself - if (suggestMode==SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX && freq > 0) { - return new String[] { word }; + final int goalFreq = suggestMode == SuggestMode.SUGGEST_MORE_POPULAR ? freq : 0; + // if the word exists in the real index and we don't care for word frequency, return the word + // itself + if (suggestMode == SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX && freq > 0) { + return new String[] {word}; } BooleanQuery.Builder query = new BooleanQuery.Builder(); @@ -350,11 +345,9 @@ public class SpellChecker implements java.io.Closeable { if (bStart > 0) { // should we boost prefixes? add(query, "start" + ng, grams[0], bStart); // matches start of word - } if (bEnd > 0) { // should we boost suffixes add(query, "end" + ng, grams[grams.length - 1], bEnd); // matches end of word - } for (int i = 0; i < grams.length; i++) { add(query, key, grams[i]); @@ -363,9 +356,9 @@ public class SpellChecker implements java.io.Closeable { int maxHits = 10 * numSug; - // System.out.println("Q: " + query); + // System.out.println("Q: " + query); ScoreDoc[] hits = indexSearcher.search(query.build(), maxHits).scoreDocs; - // System.out.println("HITS: " + hits.length()); + // System.out.println("HITS: " + hits.length()); SuggestWordQueue sugQueue = new SuggestWordQueue(numSug, comparator); // go thru more than 'maxr' matches in case the distance filter triggers @@ -381,7 +374,7 @@ public class SpellChecker implements java.io.Closeable { } // edit distance - sugWord.score = sd.getDistance(word,sugWord.string); + sugWord.score = sd.getDistance(word, sugWord.string); if (sugWord.score < accuracy) { continue; } @@ -389,7 +382,8 @@ public class SpellChecker implements java.io.Closeable { if (ir != null && field != null) { // use the user index sugWord.freq = ir.docFreq(new Term(field, sugWord.string)); // freq in the index // don't suggest a word that is not present in the field - if ((suggestMode==SuggestMode.SUGGEST_MORE_POPULAR && goalFreq > sugWord.freq) || sugWord.freq < 1) { + if ((suggestMode == SuggestMode.SUGGEST_MORE_POPULAR && goalFreq > sugWord.freq) + || sugWord.freq < 1) { continue; } } @@ -412,23 +406,20 @@ public class SpellChecker implements java.io.Closeable { releaseSearcher(indexSearcher); } } - /** - * Add a clause to a boolean query. - */ + /** Add a clause to a boolean query. */ private static void add(BooleanQuery.Builder q, String name, String value, float boost) { Query tq = new TermQuery(new Term(name, value)); q.add(new BooleanClause(new BoostQuery(tq, boost), BooleanClause.Occur.SHOULD)); } - /** - * Add a clause to a boolean query. - */ + /** Add a clause to a boolean query. */ private static void add(BooleanQuery.Builder q, String name, String value) { q.add(new BooleanClause(new TermQuery(new Term(name, value)), BooleanClause.Occur.SHOULD)); } /** * Form all ngrams for a given word. + * * @param text the word to parse * @param ng the ngram length e.g. 3 * @return an array of all ngrams in the word and note that duplicates are not removed @@ -444,6 +435,7 @@ public class SpellChecker implements java.io.Closeable { /** * Removes all terms from the spell check index. + * * @throws IOException If there is a low-level I/O error. * @throws AlreadyClosedException if the Spellchecker is already closed */ @@ -451,8 +443,8 @@ public class SpellChecker implements java.io.Closeable { synchronized (modifyCurrentIndexLock) { ensureOpen(); final Directory dir = this.spellIndex; - final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(null) - .setOpenMode(OpenMode.CREATE)); + final IndexWriter writer = + new IndexWriter(dir, new IndexWriterConfig(null).setOpenMode(OpenMode.CREATE)); writer.close(); swapSearcher(dir); } @@ -460,6 +452,7 @@ public class SpellChecker implements java.io.Closeable { /** * Check whether the word exists in the index. + * * @param word word to check * @throws IOException If there is a low-level I/O error. * @throws AlreadyClosedException if the Spellchecker is already closed @@ -468,7 +461,7 @@ public class SpellChecker implements java.io.Closeable { public boolean exist(String word) throws IOException { // obtainSearcher calls ensureOpen final IndexSearcher indexSearcher = obtainSearcher(); - try{ + try { // TODO: we should use ReaderUtil+seekExact, we dont care about the docFreq // this is just an existence check return indexSearcher.getIndexReader().docFreq(new Term(F_WORD, word)) > 0; @@ -479,13 +472,15 @@ public class SpellChecker implements java.io.Closeable { /** * Indexes the data from the given {@link Dictionary}. + * * @param dict Dictionary to index * @param config {@link IndexWriterConfig} to use * @param fullMerge whether or not the spellcheck index should be fully merged * @throws AlreadyClosedException if the Spellchecker is already closed * @throws IOException If there is a low-level I/O error. */ - public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boolean fullMerge) throws IOException { + public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boolean fullMerge) + throws IOException { synchronized (modifyCurrentIndexLock) { ensureOpen(); final Directory dir = this.spellIndex; @@ -497,25 +492,27 @@ public class SpellChecker implements java.io.Closeable { if (reader.maxDoc() > 0) { for (final LeafReaderContext ctx : reader.leaves()) { Terms terms = ctx.reader().terms(F_WORD); - if (terms != null) + if (terms != null) { termsEnums.add(terms.iterator()); + } } } - + boolean isEmpty = termsEnums.isEmpty(); - try { + try { BytesRefIterator iter = dict.getEntryIterator(); BytesRef currentTerm; - - terms: while ((currentTerm = iter.next()) != null) { - + + terms: + while ((currentTerm = iter.next()) != null) { + String word = currentTerm.utf8ToString(); int len = word.length(); if (len < 3) { continue; // too short we bail but "too long" is fine... } - + if (!isEmpty) { for (TermsEnum te : termsEnums) { if (te.seekExact(currentTerm)) { @@ -523,7 +520,7 @@ public class SpellChecker implements java.io.Closeable { } } } - + // ok index the word Document doc = createDocument(word, getMin(len), getMax(len)); writer.addDocument(doc); @@ -538,7 +535,7 @@ public class SpellChecker implements java.io.Closeable { writer.close(); // TODO: this isn't that great, maybe in the future SpellChecker should take // IWC in its ctor / keep its writer open? - + // also re-open the spell index to see our own changes when the next suggestion // is fetched: swapSearcher(dir); @@ -602,7 +599,7 @@ public class SpellChecker implements java.io.Closeable { } } } - + private IndexSearcher obtainSearcher() { synchronized (searcherLock) { ensureOpen(); @@ -610,21 +607,22 @@ public class SpellChecker implements java.io.Closeable { return searcher; } } - - private void releaseSearcher(final IndexSearcher aSearcher) throws IOException{ - // don't check if open - always decRef - // don't decrement the private searcher - could have been swapped - aSearcher.getIndexReader().decRef(); + + private void releaseSearcher(final IndexSearcher aSearcher) throws IOException { + // don't check if open - always decRef + // don't decrement the private searcher - could have been swapped + aSearcher.getIndexReader().decRef(); } - + private void ensureOpen() { if (closed) { throw new AlreadyClosedException("Spellchecker has been closed"); } } - + /** * Close the IndexSearcher used by this SpellChecker + * * @throws IOException if the close operation causes an {@link IOException} * @throws AlreadyClosedException if the {@link SpellChecker} is already closed */ @@ -639,7 +637,7 @@ public class SpellChecker implements java.io.Closeable { searcher = null; } } - + private void swapSearcher(final Directory dir) throws IOException { /* * opening a searcher is possibly very expensive. @@ -648,7 +646,7 @@ public class SpellChecker implements java.io.Closeable { */ final IndexSearcher indexSearcher = createSearcher(dir); synchronized (searcherLock) { - if(closed){ + if (closed) { indexSearcher.getIndexReader().close(); throw new AlreadyClosedException("Spellchecker has been closed"); } @@ -660,27 +658,27 @@ public class SpellChecker implements java.io.Closeable { this.spellIndex = dir; } } - + /** - * Creates a new read-only IndexSearcher + * Creates a new read-only IndexSearcher + * * @param dir the directory used to open the searcher * @return a new read-only IndexSearcher * @throws IOException f there is a low-level IO error */ // for testing purposes - IndexSearcher createSearcher(final Directory dir) throws IOException{ + IndexSearcher createSearcher(final Directory dir) throws IOException { return new IndexSearcher(DirectoryReader.open(dir)); } - + /** - * Returns true if and only if the {@link SpellChecker} is - * closed, otherwise false. - * - * @return true if and only if the {@link SpellChecker} is - * closed, otherwise false. + * Returns true if and only if the {@link SpellChecker} is closed, otherwise + * false. + * + * @return true if and only if the {@link SpellChecker} is closed, otherwise + * false. */ - boolean isClosed(){ + boolean isClosed() { return closed; } - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/StringDistance.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/StringDistance.java index ca09288e4e5..82fc166a5b7 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/StringDistance.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/StringDistance.java @@ -16,19 +16,17 @@ */ package org.apache.lucene.search.spell; -/** - * Interface for string distances. - */ +/** Interface for string distances. */ public interface StringDistance { /** - * Returns a float between 0 and 1 based on how similar the specified strings are to one another. - * Returning a value of 1 means the specified strings are identical and 0 means the - * string are maximally different. + * Returns a float between 0 and 1 based on how similar the specified strings are to one another. + * Returning a value of 1 means the specified strings are identical and 0 means the string are + * maximally different. + * * @param s1 The first string. * @param s2 The second string. * @return a float between 0 and 1 based on how similar the specified strings are to one another. */ - public float getDistance(String s1,String s2); - + public float getDistance(String s1, String s2); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestMode.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestMode.java index 5e42f1644a5..cb56cebfbff 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestMode.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestMode.java @@ -18,24 +18,19 @@ package org.apache.lucene.search.spell; /** * Set of strategies for suggesting related terms + * * @lucene.experimental */ public enum SuggestMode { - /** - * Generate suggestions only for terms not in the index (default) - */ + /** Generate suggestions only for terms not in the index (default) */ SUGGEST_WHEN_NOT_IN_INDEX, - /** - * Return only suggested words that are as frequent or more frequent than the - * searched word - */ + /** Return only suggested words that are as frequent or more frequent than the searched word */ SUGGEST_MORE_POPULAR, /** - * Always attempt to offer suggestions (however, other parameters may limit - * suggestions. For example, see - * {@link DirectSpellChecker#setMaxQueryFrequency(float)} ). + * Always attempt to offer suggestions (however, other parameters may limit suggestions. For + * example, see {@link DirectSpellChecker#setMaxQueryFrequency(float)} ). */ SUGGEST_ALWAYS } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWord.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWord.java index 6b584d118f6..37ede70e4cf 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWord.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWord.java @@ -16,37 +16,27 @@ */ package org.apache.lucene.search.spell; - /** * SuggestWord, used in suggestSimilar method in SpellChecker class. - *

    - * Default sort is first by score, then by frequency. + * + *

    Default sort is first by score, then by frequency. */ -public final class SuggestWord{ - - /** - * Creates a new empty suggestion with null text. - */ +public final class SuggestWord { + + /** Creates a new empty suggestion with null text. */ public SuggestWord() {} - - /** - * the score of the word - */ + + /** the score of the word */ public float score; - /** - * The freq of the word - */ + /** The freq of the word */ public int freq; - /** - * the suggested word - */ + /** the suggested word */ public String string; @Override public String toString() { return "SuggestWord(string=" + string + ", score=" + score + ", freq=" + freq + ")"; } - -} \ No newline at end of file +} diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordFrequencyComparator.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordFrequencyComparator.java index 7f9c14f2364..2baee33ba09 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordFrequencyComparator.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordFrequencyComparator.java @@ -17,15 +17,13 @@ package org.apache.lucene.search.spell; import java.util.Comparator; -/** - * Frequency first, then score. - * - **/ + +/** Frequency first, then score. */ public class SuggestWordFrequencyComparator implements Comparator { - + /** - * Creates a new comparator that will compare by {@link SuggestWord#freq}, - * then by {@link SuggestWord#score}, then by {@link SuggestWord#string}. + * Creates a new comparator that will compare by {@link SuggestWord#freq}, then by {@link + * SuggestWord#score}, then by {@link SuggestWord#string}. */ public SuggestWordFrequencyComparator() {} diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordQueue.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordQueue.java index b7910b78f49..bab8eb166e9 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordQueue.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordQueue.java @@ -16,50 +16,48 @@ */ package org.apache.lucene.search.spell; - -import org.apache.lucene.util.PriorityQueue; - import java.util.Comparator; - +import org.apache.lucene.util.PriorityQueue; /** * Sorts SuggestWord instances * * @see org.apache.lucene.search.spell.SuggestWordScoreComparator * @see org.apache.lucene.search.spell.SuggestWordFrequencyComparator - * */ public final class SuggestWordQueue extends PriorityQueue { /** * Default comparator: score then frequency. + * * @see SuggestWordScoreComparator */ public static final Comparator DEFAULT_COMPARATOR = new SuggestWordScoreComparator(); - private Comparator comparator; /** * Use the {@link #DEFAULT_COMPARATOR} + * * @param size The size of the queue */ - public SuggestWordQueue (int size) { + public SuggestWordQueue(int size) { super(size); comparator = DEFAULT_COMPARATOR; } /** * Specify the size of the queue and the comparator to use for sorting. + * * @param size The size * @param comparator The comparator. */ - public SuggestWordQueue(int size, Comparator comparator){ + public SuggestWordQueue(int size, Comparator comparator) { super(size); this.comparator = comparator; } @Override - protected final boolean lessThan (SuggestWord wa, SuggestWord wb) { + protected final boolean lessThan(SuggestWord wa, SuggestWord wb) { int val = comparator.compare(wa, wb); return val < 0; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordScoreComparator.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordScoreComparator.java index f6e7703c8d5..59bbc0083ef 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordScoreComparator.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWordScoreComparator.java @@ -15,18 +15,15 @@ * limitations under the License. */ package org.apache.lucene.search.spell; + import java.util.Comparator; - -/** - * Score first, then frequency - * - **/ +/** Score first, then frequency */ public class SuggestWordScoreComparator implements Comparator { - + /** - * Creates a new comparator that will compare by {@link SuggestWord#score}, - * then by {@link SuggestWord#freq}, then by {@link SuggestWord#string}. + * Creates a new comparator that will compare by {@link SuggestWord#score}, then by {@link + * SuggestWord#freq}, then by {@link SuggestWord#string}. */ public SuggestWordScoreComparator() {} diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java index 4ceb83b0333..56bc2a9388d 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java @@ -20,15 +20,12 @@ import java.io.IOException; import java.util.Comparator; import java.util.PriorityQueue; import java.util.Queue; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; /** - *

    - * A spell checker whose sole function is to offer suggestions by combining - * multiple terms into one word and/or breaking terms into multiple words. - *

    + * A spell checker whose sole function is to offer suggestions by combining multiple terms into one + * word and/or breaking terms into multiple words. */ public class WordBreakSpellChecker { private int minSuggestionFrequency = 1; @@ -36,12 +33,13 @@ public class WordBreakSpellChecker { private int maxCombineWordLength = 20; private int maxChanges = 1; private int maxEvaluations = 1000; - + /** Term that can be used to prohibit adjacent terms from being combined */ public static final Term SEPARATOR_TERM = new Term("", ""); - - /** + + /** * Creates a new spellchecker with default configuration values + * * @see #setMaxChanges(int) * @see #setMaxCombineWordLength(int) * @see #setMaxEvaluations(int) @@ -50,46 +48,33 @@ public class WordBreakSpellChecker { */ public WordBreakSpellChecker() {} - /** - *

    - * Determines the order to list word break suggestions - *

    - */ + /** Determines the order to list word break suggestions */ public enum BreakSuggestionSortMethod { - /** - *

    - * Sort by Number of word breaks, then by the Sum of all the component - * term's frequencies - *

    - */ + /** Sort by Number of word breaks, then by the Sum of all the component term's frequencies */ NUM_CHANGES_THEN_SUMMED_FREQUENCY, /** - *

    - * Sort by Number of word breaks, then by the Maximum of all the component - * term's frequencies - *

    + * Sort by Number of word breaks, then by the Maximum of all the component term's frequencies */ NUM_CHANGES_THEN_MAX_FREQUENCY } - + /** - *

    - * Generate suggestions by breaking the passed-in term into multiple words. - * The scores returned are equal to the number of word breaks needed so a - * lower score is generally preferred over a higher score. - *

    - * - * @param suggestMode - * - default = {@link SuggestMode#SUGGEST_WHEN_NOT_IN_INDEX} - * @param sortMethod - * - default = - * {@link BreakSuggestionSortMethod#NUM_CHANGES_THEN_MAX_FREQUENCY} + * Generate suggestions by breaking the passed-in term into multiple words. The scores returned + * are equal to the number of word breaks needed so a lower score is generally preferred over a + * higher score. + * + * @param suggestMode - default = {@link SuggestMode#SUGGEST_WHEN_NOT_IN_INDEX} + * @param sortMethod - default = {@link BreakSuggestionSortMethod#NUM_CHANGES_THEN_MAX_FREQUENCY} * @return one or more arrays of words formed by breaking up the original term * @throws IOException If there is a low-level I/O error. */ - public SuggestWord[][] suggestWordBreaks(Term term, int maxSuggestions, - IndexReader ir, SuggestMode suggestMode, - BreakSuggestionSortMethod sortMethod) throws IOException { + public SuggestWord[][] suggestWordBreaks( + Term term, + int maxSuggestions, + IndexReader ir, + SuggestMode suggestMode, + BreakSuggestionSortMethod sortMethod) + throws IOException { if (maxSuggestions < 1) { return new SuggestWord[0][0]; } @@ -99,70 +84,71 @@ public class WordBreakSpellChecker { if (sortMethod == null) { sortMethod = BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY; } - + int queueInitialCapacity = maxSuggestions > 10 ? 10 : maxSuggestions; - Comparator queueComparator = sortMethod == BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY ? new LengthThenMaxFreqComparator() - : new LengthThenSumFreqComparator(); - Queue suggestions = new PriorityQueue<>( - queueInitialCapacity, queueComparator); - + Comparator queueComparator = + sortMethod == BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY + ? new LengthThenMaxFreqComparator() + : new LengthThenSumFreqComparator(); + Queue suggestions = + new PriorityQueue<>(queueInitialCapacity, queueComparator); + int origFreq = ir.docFreq(term); if (origFreq > 0 && suggestMode == SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX) { return new SuggestWord[0][]; } - + int useMinSuggestionFrequency = minSuggestionFrequency; if (suggestMode == SuggestMode.SUGGEST_MORE_POPULAR) { useMinSuggestionFrequency = (origFreq == 0 ? 1 : origFreq); } - - generateBreakUpSuggestions(term, ir, 1, maxSuggestions, - useMinSuggestionFrequency, new SuggestWord[0], suggestions, 0, + + generateBreakUpSuggestions( + term, + ir, + 1, + maxSuggestions, + useMinSuggestionFrequency, + new SuggestWord[0], + suggestions, + 0, sortMethod); - + SuggestWord[][] suggestionArray = new SuggestWord[suggestions.size()][]; for (int i = suggestions.size() - 1; i >= 0; i--) { suggestionArray[i] = suggestions.remove().suggestWords; } - + return suggestionArray; } - + /** - *

    - * Generate suggestions by combining one or more of the passed-in terms into - * single words. The returned {@link CombineSuggestion} contains both a - * {@link SuggestWord} and also an array detailing which passed-in terms were - * involved in creating this combination. The scores returned are equal to the - * number of word combinations needed, also one less than the length of the - * array {@link CombineSuggestion#originalTermIndexes}. Generally, a - * suggestion with a lower score is preferred over a higher score. - *

    - *

    - * To prevent two adjacent terms from being combined (for instance, if one is - * mandatory and the other is prohibited), separate the two terms with - * {@link WordBreakSpellChecker#SEPARATOR_TERM} - *

    - *

    - * When suggestMode equals {@link SuggestMode#SUGGEST_WHEN_NOT_IN_INDEX}, each - * suggestion will include at least one term not in the index. - *

    - *

    - * When suggestMode equals {@link SuggestMode#SUGGEST_MORE_POPULAR}, each - * suggestion will have the same, or better frequency than the most-popular - * included term. - *

    - * + * Generate suggestions by combining one or more of the passed-in terms into single words. The + * returned {@link CombineSuggestion} contains both a {@link SuggestWord} and also an array + * detailing which passed-in terms were involved in creating this combination. The scores returned + * are equal to the number of word combinations needed, also one less than the length of the array + * {@link CombineSuggestion#originalTermIndexes}. Generally, a suggestion with a lower score is + * preferred over a higher score. + * + *

    To prevent two adjacent terms from being combined (for instance, if one is mandatory and the + * other is prohibited), separate the two terms with {@link WordBreakSpellChecker#SEPARATOR_TERM} + * + *

    When suggestMode equals {@link SuggestMode#SUGGEST_WHEN_NOT_IN_INDEX}, each suggestion will + * include at least one term not in the index. + * + *

    When suggestMode equals {@link SuggestMode#SUGGEST_MORE_POPULAR}, each suggestion will have + * the same, or better frequency than the most-popular included term. + * * @return an array of words generated by combining original terms * @throws IOException If there is a low-level I/O error. */ - public CombineSuggestion[] suggestWordCombinations(Term[] terms, - int maxSuggestions, IndexReader ir, SuggestMode suggestMode) + public CombineSuggestion[] suggestWordCombinations( + Term[] terms, int maxSuggestions, IndexReader ir, SuggestMode suggestMode) throws IOException { if (maxSuggestions < 1) { return new CombineSuggestion[0]; } - + int[] origFreqs = null; if (suggestMode != SuggestMode.SUGGEST_ALWAYS) { origFreqs = new int[terms.length]; @@ -170,28 +156,28 @@ public class WordBreakSpellChecker { origFreqs[i] = ir.docFreq(terms[i]); } } - + int queueInitialCapacity = maxSuggestions > 10 ? 10 : maxSuggestions; Comparator queueComparator = new CombinationsThenFreqComparator(); - Queue suggestions = new PriorityQueue<>( - queueInitialCapacity, queueComparator); - + Queue suggestions = + new PriorityQueue<>(queueInitialCapacity, queueComparator); + int thisTimeEvaluations = 0; for (int i = 0; i < terms.length - 1; i++) { if (terms[i].equals(SEPARATOR_TERM)) { continue; - } + } String leftTermText = terms[i].text(); int leftTermLength = leftTermText.codePointCount(0, leftTermText.length()); if (leftTermLength > maxCombineWordLength) { - continue; - } + continue; + } int maxFreq = 0; int minFreq = Integer.MAX_VALUE; if (origFreqs != null) { maxFreq = origFreqs[i]; minFreq = origFreqs[i]; - } + } String combinedTermText = leftTermText; int combinedLength = leftTermLength; for (int j = i + 1; j < terms.length && j - i <= maxChanges; j++) { @@ -201,23 +187,21 @@ public class WordBreakSpellChecker { String rightTermText = terms[j].text(); int rightTermLength = rightTermText.codePointCount(0, rightTermText.length()); combinedTermText += rightTermText; - combinedLength +=rightTermLength; + combinedLength += rightTermLength; if (combinedLength > maxCombineWordLength) { break; } - + if (origFreqs != null) { maxFreq = Math.max(maxFreq, origFreqs[j]); minFreq = Math.min(minFreq, origFreqs[j]); } - + Term combinedTerm = new Term(terms[0].field(), combinedTermText); int combinedTermFreq = ir.docFreq(combinedTerm); - - if (suggestMode != SuggestMode.SUGGEST_MORE_POPULAR - || combinedTermFreq >= maxFreq) { - if (suggestMode != SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX - || minFreq == 0) { + + if (suggestMode != SuggestMode.SUGGEST_MORE_POPULAR || combinedTermFreq >= maxFreq) { + if (suggestMode != SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX || minFreq == 0) { if (combinedTermFreq >= minSuggestionFrequency) { int[] origIndexes = new int[j - i + 1]; origIndexes[0] = i; @@ -228,9 +212,9 @@ public class WordBreakSpellChecker { word.freq = combinedTermFreq; word.score = origIndexes.length - 1; word.string = combinedTerm.text(); - CombineSuggestionWrapper suggestion = new CombineSuggestionWrapper( - new CombineSuggestion(word, origIndexes), - (origIndexes.length - 1)); + CombineSuggestionWrapper suggestion = + new CombineSuggestionWrapper( + new CombineSuggestion(word, origIndexes), (origIndexes.length - 1)); suggestions.offer(suggestion); if (suggestions.size() > maxSuggestions) { suggestions.poll(); @@ -244,18 +228,23 @@ public class WordBreakSpellChecker { } } } - CombineSuggestion[] combineSuggestions = new CombineSuggestion[suggestions - .size()]; + CombineSuggestion[] combineSuggestions = new CombineSuggestion[suggestions.size()]; for (int i = suggestions.size() - 1; i >= 0; i--) { combineSuggestions[i] = suggestions.remove().combineSuggestion; } return combineSuggestions; } - - private int generateBreakUpSuggestions(Term term, IndexReader ir, - int numberBreaks, int maxSuggestions, int useMinSuggestionFrequency, - SuggestWord[] prefix, Queue suggestions, - int totalEvaluations, BreakSuggestionSortMethod sortMethod) + + private int generateBreakUpSuggestions( + Term term, + IndexReader ir, + int numberBreaks, + int maxSuggestions, + int useMinSuggestionFrequency, + SuggestWord[] prefix, + Queue suggestions, + int totalEvaluations, + BreakSuggestionSortMethod sortMethod) throws IOException { String termText = term.text(); int termLength = termText.codePointCount(0, termText.length()); @@ -265,35 +254,42 @@ public class WordBreakSpellChecker { } if (termLength < (useMinBreakWordLength * 2)) { return 0; - } - + } + int thisTimeEvaluations = 0; for (int i = useMinBreakWordLength; i <= (termLength - useMinBreakWordLength); i++) { int end = termText.offsetByCodePoints(0, i); String leftText = termText.substring(0, end); String rightText = termText.substring(end); SuggestWord leftWord = generateSuggestWord(ir, term.field(), leftText); - + if (leftWord.freq >= useMinSuggestionFrequency) { SuggestWord rightWord = generateSuggestWord(ir, term.field(), rightText); if (rightWord.freq >= useMinSuggestionFrequency) { - SuggestWordArrayWrapper suggestion = new SuggestWordArrayWrapper( - newSuggestion(prefix, leftWord, rightWord)); + SuggestWordArrayWrapper suggestion = + new SuggestWordArrayWrapper(newSuggestion(prefix, leftWord, rightWord)); suggestions.offer(suggestion); if (suggestions.size() > maxSuggestions) { suggestions.poll(); } - } + } int newNumberBreaks = numberBreaks + 1; if (newNumberBreaks <= maxChanges) { - int evaluations = generateBreakUpSuggestions(new Term(term.field(), - rightWord.string), ir, newNumberBreaks, maxSuggestions, - useMinSuggestionFrequency, newPrefix(prefix, leftWord), - suggestions, totalEvaluations, sortMethod); + int evaluations = + generateBreakUpSuggestions( + new Term(term.field(), rightWord.string), + ir, + newNumberBreaks, + maxSuggestions, + useMinSuggestionFrequency, + newPrefix(prefix, leftWord), + suggestions, + totalEvaluations, + sortMethod); totalEvaluations += evaluations; } } - + thisTimeEvaluations++; totalEvaluations++; if (totalEvaluations >= maxEvaluations) { @@ -302,16 +298,16 @@ public class WordBreakSpellChecker { } return thisTimeEvaluations; } - + private SuggestWord[] newPrefix(SuggestWord[] oldPrefix, SuggestWord append) { SuggestWord[] newPrefix = new SuggestWord[oldPrefix.length + 1]; System.arraycopy(oldPrefix, 0, newPrefix, 0, oldPrefix.length); newPrefix[newPrefix.length - 1] = append; return newPrefix; } - - private SuggestWord[] newSuggestion(SuggestWord[] prefix, - SuggestWord append1, SuggestWord append2) { + + private SuggestWord[] newSuggestion( + SuggestWord[] prefix, SuggestWord append1, SuggestWord append2) { SuggestWord[] newSuggestion = new SuggestWord[prefix.length + 2]; int score = prefix.length + 1; for (int i = 0; i < prefix.length; i++) { @@ -327,8 +323,9 @@ public class WordBreakSpellChecker { newSuggestion[newSuggestion.length - 1] = append2; return newSuggestion; } - - private SuggestWord generateSuggestWord(IndexReader ir, String fieldname, String text) throws IOException { + + private SuggestWord generateSuggestWord(IndexReader ir, String fieldname, String text) + throws IOException { Term term = new Term(fieldname, text); int freq = ir.docFreq(term); SuggestWord word = new SuggestWord(); @@ -337,111 +334,101 @@ public class WordBreakSpellChecker { word.string = text; return word; } - + /** - * Returns the minimum frequency a term must have - * to be part of a suggestion. + * Returns the minimum frequency a term must have to be part of a suggestion. + * * @see #setMinSuggestionFrequency(int) */ public int getMinSuggestionFrequency() { return minSuggestionFrequency; } - + /** * Returns the maximum length of a combined suggestion + * * @see #setMaxCombineWordLength(int) */ public int getMaxCombineWordLength() { return maxCombineWordLength; } - + /** * Returns the minimum size of a broken word + * * @see #setMinBreakWordLength(int) */ public int getMinBreakWordLength() { return minBreakWordLength; } - + /** * Returns the maximum number of changes to perform on the input + * * @see #setMaxChanges(int) */ public int getMaxChanges() { return maxChanges; } - + /** * Returns the maximum number of word combinations to evaluate. + * * @see #setMaxEvaluations(int) */ public int getMaxEvaluations() { return maxEvaluations; } - + /** - *

    - * The minimum frequency a term must have to be included as part of a - * suggestion. Default=1 Not applicable when used with - * {@link SuggestMode#SUGGEST_MORE_POPULAR} - *

    - * + * The minimum frequency a term must have to be included as part of a suggestion. Default=1 Not + * applicable when used with {@link SuggestMode#SUGGEST_MORE_POPULAR} + * * @see #getMinSuggestionFrequency() */ public void setMinSuggestionFrequency(int minSuggestionFrequency) { this.minSuggestionFrequency = minSuggestionFrequency; } - + /** - *

    - * The maximum length of a suggestion made by combining 1 or more original - * terms. Default=20 - *

    - * + * The maximum length of a suggestion made by combining 1 or more original terms. Default=20 + * * @see #getMaxCombineWordLength() */ public void setMaxCombineWordLength(int maxCombineWordLength) { this.maxCombineWordLength = maxCombineWordLength; } - + /** - *

    * The minimum length to break words down to. Default=1 - *

    - * + * * @see #getMinBreakWordLength() */ public void setMinBreakWordLength(int minBreakWordLength) { this.minBreakWordLength = minBreakWordLength; } - + /** - *

    - * The maximum numbers of changes (word breaks or combinations) to make on the - * original term(s). Default=1 - *

    - * + * The maximum numbers of changes (word breaks or combinations) to make on the original term(s). + * Default=1 + * * @see #getMaxChanges() */ public void setMaxChanges(int maxChanges) { this.maxChanges = maxChanges; } - + /** - *

    - * The maximum number of word combinations to evaluate. Default=1000. A higher - * value might improve result quality. A lower value might improve - * performance. - *

    - * + * The maximum number of word combinations to evaluate. Default=1000. A higher value might improve + * result quality. A lower value might improve performance. + * * @see #getMaxEvaluations() */ public void setMaxEvaluations(int maxEvaluations) { this.maxEvaluations = maxEvaluations; } - - private static class LengthThenMaxFreqComparator implements - Comparator { + + private static class LengthThenMaxFreqComparator implements Comparator { @Override public int compare(SuggestWordArrayWrapper o1, SuggestWordArrayWrapper o2) { if (o1.suggestWords.length != o2.suggestWords.length) { @@ -453,9 +440,8 @@ public class WordBreakSpellChecker { return 0; } } - - private static class LengthThenSumFreqComparator implements - Comparator { + + private static class LengthThenSumFreqComparator implements Comparator { @Override public int compare(SuggestWordArrayWrapper o1, SuggestWordArrayWrapper o2) { if (o1.suggestWords.length != o2.suggestWords.length) { @@ -467,27 +453,26 @@ public class WordBreakSpellChecker { return 0; } } - - private static class CombinationsThenFreqComparator implements - Comparator { + + private static class CombinationsThenFreqComparator + implements Comparator { @Override public int compare(CombineSuggestionWrapper o1, CombineSuggestionWrapper o2) { if (o1.numCombinations != o2.numCombinations) { return o2.numCombinations - o1.numCombinations; } if (o1.combineSuggestion.suggestion.freq != o2.combineSuggestion.suggestion.freq) { - return o1.combineSuggestion.suggestion.freq - - o2.combineSuggestion.suggestion.freq; + return o1.combineSuggestion.suggestion.freq - o2.combineSuggestion.suggestion.freq; } return 0; } } - + private static class SuggestWordArrayWrapper { final SuggestWord[] suggestWords; final int freqMax; final int freqSum; - + SuggestWordArrayWrapper(SuggestWord[] suggestWords) { this.suggestWords = suggestWords; int aFreqSum = 0; @@ -500,13 +485,12 @@ public class WordBreakSpellChecker { this.freqMax = aFreqMax; } } - + private static class CombineSuggestionWrapper { final CombineSuggestion combineSuggestion; final int numCombinations; - - CombineSuggestionWrapper(CombineSuggestion combineSuggestion, - int numCombinations) { + + CombineSuggestionWrapper(CombineSuggestion combineSuggestion, int numCombinations) { this.combineSuggestion = combineSuggestion; this.numCombinations = numCombinations; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/package-info.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/package-info.java index bae3d298bbf..64cf6a8b573 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/package-info.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/package-info.java @@ -15,9 +15,8 @@ * limitations under the License. */ - /** - * Suggest alternate spellings for words. - * Also see the spell checker Wiki page. + * Suggest alternate spellings for words. Also see the spell checker Wiki page. */ package org.apache.lucene.search.spell; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/BitsProducer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/BitsProducer.java index 7052a314019..17599a3d0f0 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/BitsProducer.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/BitsProducer.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest; import java.io.IOException; - import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.Bits; @@ -28,9 +27,9 @@ public abstract class BitsProducer { /** Sole constructor, typically invoked by sub-classes. */ protected BitsProducer() {} - /** Return {@link Bits} for the given leaf. The returned instance must - * be non-null and have a {@link Bits#length() length} equal to - * {@link LeafReader#maxDoc() maxDoc}. */ + /** + * Return {@link Bits} for the given leaf. The returned instance must be non-null and have a + * {@link Bits#length() length} equal to {@link LeafReader#maxDoc() maxDoc}. + */ public abstract Bits getBits(LeafReaderContext context) throws IOException; - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/BufferedInputIterator.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/BufferedInputIterator.java index 97bf845159b..7084eba13f9 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/BufferedInputIterator.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/BufferedInputIterator.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Set; - import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefArray; @@ -29,6 +28,7 @@ import org.apache.lucene.util.Counter; /** * This wrapper buffers incoming elements. + * * @lucene.experimental */ public class BufferedInputIterator implements InputIterator { @@ -43,6 +43,7 @@ public class BufferedInputIterator implements InputIterator { protected int curPos = -1; /** buffered weights, parallel with {@link #entries} */ protected long[] freqs = new long[1]; + private final BytesRefBuilder spare = new BytesRefBuilder(); private final BytesRefBuilder payloadSpare = new BytesRefBuilder(); private final boolean hasPayloads; @@ -54,7 +55,7 @@ public class BufferedInputIterator implements InputIterator { int freqIndex = 0; hasPayloads = source.hasPayloads(); hasContexts = source.hasContexts(); - while((spare = source.next()) != null) { + while ((spare = source.next()) != null) { entries.append(spare); if (hasPayloads) { payloads.append(source.payload()); @@ -63,11 +64,10 @@ public class BufferedInputIterator implements InputIterator { contextSets.add(source.contexts()); } if (freqIndex >= freqs.length) { - freqs = ArrayUtil.grow(freqs, freqs.length+1); + freqs = ArrayUtil.grow(freqs, freqs.length + 1); } freqs[freqIndex++] = source.weight(); } - } @Override diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java index ca58f9954ac..f2d5bb99848 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; @@ -31,29 +30,19 @@ import org.apache.lucene.search.spell.Dictionary; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; - - /** - *

    - * Dictionary with terms, weights, payload (optional) and contexts (optional) - * information taken from stored/indexed fields in a Lucene index. - *

    - * NOTE: - *
      - *
    • - * The term field has to be stored; if it is missing, the document is skipped. - *
    • - *
    • - * The payload and contexts field are optional and are not required to be stored. - *
    • - *
    • - * The weight field can be stored or can be a {@link NumericDocValues}. - * If the weight field is not defined, the value of the weight is 0 - *
    • - *
    + * Dictionary with terms, weights, payload (optional) and contexts (optional) information taken from + * stored/indexed fields in a Lucene index. NOTE: + * + *
      + *
    • The term field has to be stored; if it is missing, the document is skipped. + *
    • The payload and contexts field are optional and are not required to be stored. + *
    • The weight field can be stored or can be a {@link NumericDocValues}. If the weight field is + * not defined, the value of the weight is 0 + *
    */ public class DocumentDictionary implements Dictionary { - + /** {@link IndexReader} to load documents from */ protected final IndexReader reader; @@ -61,35 +50,40 @@ public class DocumentDictionary implements Dictionary { protected final String payloadField; /** Field to read contexts from */ protected final String contextsField; + private final String field; private final String weightField; - + /** - * Creates a new dictionary with the contents of the fields named field - * for the terms and weightField for the weights that will be used for - * the corresponding terms. + * Creates a new dictionary with the contents of the fields named field for the terms + * and weightField for the weights that will be used for the corresponding terms. */ public DocumentDictionary(IndexReader reader, String field, String weightField) { this(reader, field, weightField, null); } - + /** - * Creates a new dictionary with the contents of the fields named field - * for the terms, weightField for the weights that will be used for the - * the corresponding terms and payloadField for the corresponding payloads - * for the entry. + * Creates a new dictionary with the contents of the fields named field for the + * terms, weightField for the weights that will be used for the the corresponding + * terms and payloadField for the corresponding payloads for the entry. */ - public DocumentDictionary(IndexReader reader, String field, String weightField, String payloadField) { + public DocumentDictionary( + IndexReader reader, String field, String weightField, String payloadField) { this(reader, field, weightField, payloadField, null); } /** - * Creates a new dictionary with the contents of the fields named field - * for the terms, weightField for the weights that will be used for the - * the corresponding terms, payloadField for the corresponding payloads - * for the entry and contextsField for associated contexts. + * Creates a new dictionary with the contents of the fields named field for the + * terms, weightField for the weights that will be used for the the corresponding + * terms, payloadField for the corresponding payloads for the entry and + * contextsField for associated contexts. */ - public DocumentDictionary(IndexReader reader, String field, String weightField, String payloadField, String contextsField) { + public DocumentDictionary( + IndexReader reader, + String field, + String weightField, + String payloadField, + String contextsField) { this.reader = reader; this.field = field; this.weightField = weightField; @@ -99,7 +93,7 @@ public class DocumentDictionary implements Dictionary { @Override public InputIterator getEntryIterator() throws IOException { - return new DocumentInputIterator(payloadField!=null, contextsField!=null); + return new DocumentInputIterator(payloadField != null, contextsField != null); } /** Implements {@link InputIterator} from stored fields. */ @@ -119,17 +113,18 @@ public class DocumentDictionary implements Dictionary { int nextFieldsPosition = 0; /** - * Creates an iterator over term, weight and payload fields from the lucene - * index. setting withPayload to false, implies an iterator - * over only term and weight. + * Creates an iterator over term, weight and payload fields from the lucene index. setting + * withPayload to false, implies an iterator over only term and weight. */ public DocumentInputIterator(boolean hasPayloads, boolean hasContexts) throws IOException { this.hasPayloads = hasPayloads; this.hasContexts = hasContexts; docCount = reader.maxDoc() - 1; - weightValues = (weightField != null) ? MultiDocValues.getNumericValues(reader, weightField) : null; + weightValues = + (weightField != null) ? MultiDocValues.getNumericValues(reader, weightField) : null; liveDocs = (reader.leaves().size() > 0) ? MultiBits.getLiveDocs(reader) : null; - relevantFields = getRelevantFields(new String [] {field, weightField, payloadField, contextsField}); + relevantFields = + getRelevantFields(new String[] {field, weightField, payloadField, contextsField}); } @Override @@ -158,7 +153,7 @@ public class DocumentDictionary implements Dictionary { } currentDocId++; - if (liveDocs != null && !liveDocs.get(currentDocId)) { + if (liveDocs != null && !liveDocs.get(currentDocId)) { continue; } @@ -169,7 +164,7 @@ public class DocumentDictionary implements Dictionary { IndexableField payload = doc.getField(payloadField); if (payload != null) { if (payload.binaryValue() != null) { - tempPayload = payload.binaryValue(); + tempPayload = payload.binaryValue(); } else if (payload.stringValue() != null) { tempPayload = new BytesRef(payload.stringValue()); } @@ -232,18 +227,18 @@ public class DocumentDictionary implements Dictionary { public boolean hasPayloads() { return hasPayloads; } - - /** - * Returns the value of the weightField for the current document. - * Retrieves the value for the weightField if it's stored (using doc) - * or if it's indexed as {@link NumericDocValues} (using docId) for the document. - * If no value is found, then the weight is 0. + + /** + * Returns the value of the weightField for the current document. Retrieves the + * value for the weightField if it's stored (using doc) or if it's + * indexed as {@link NumericDocValues} (using docId) for the document. If no value + * is found, then the weight is 0. */ protected long getWeight(Document doc, int docId) throws IOException { IndexableField weight = doc.getField(weightField); if (weight != null) { // found weight as stored return (weight.numericValue() != null) ? weight.numericValue().longValue() : 0; - } else if (weightValues != null) { // found weight as NumericDocValue + } else if (weightValues != null) { // found weight as NumericDocValue if (weightValues.docID() < docId) { weightValues.advance(docId); } @@ -257,7 +252,7 @@ public class DocumentDictionary implements Dictionary { return 0; } } - + private Set getRelevantFields(String... fields) { Set relevantFields = new HashSet<>(); for (String relevantField : fields) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java index 93569756127..adfd202aff1 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.suggest; import java.io.IOException; import java.util.List; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; @@ -26,85 +25,74 @@ import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.LongValues; import org.apache.lucene.search.LongValuesSource; - /** - *

    - * Dictionary with terms and optionally payload and - * optionally contexts information - * taken from stored fields in a Lucene index. Similar to - * {@link DocumentDictionary}, except it obtains the weight - * of the terms in a document based on a {@link LongValuesSource}. - *

    - * NOTE: - *
      - *
    • - * The term field has to be stored; if it is missing, the document is skipped. - *
    • - *
    • - * The payload and contexts field are optional and are not required to be stored. - *
    • - *
    - *

    - * In practice the {@link LongValuesSource} will likely be obtained - * using the lucene expression module. The following example shows - * how to create a {@link LongValuesSource} from a simple addition of two - * fields: - * + * Dictionary with terms and optionally payload and optionally contexts information taken from + * stored fields in a Lucene index. Similar to {@link DocumentDictionary}, except it obtains the + * weight of the terms in a document based on a {@link LongValuesSource}. NOTE: + * + *

      + *
    • The term field has to be stored; if it is missing, the document is skipped. + *
    • The payload and contexts field are optional and are not required to be stored. + *
    + * + *

    In practice the {@link LongValuesSource} will likely be obtained using the lucene expression + * module. The following example shows how to create a {@link LongValuesSource} from a simple + * addition of two fields: * Expression expression = JavascriptCompiler.compile("f1 + f2"); * SimpleBindings bindings = new SimpleBindings(); * bindings.add(new SortField("f1", SortField.Type.LONG)); * bindings.add(new SortField("f2", SortField.Type.LONG)); * LongValuesSource valueSource = expression.getDoubleValuesSource(bindings).toLongValuesSource(); * - *

    - * */ public class DocumentValueSourceDictionary extends DocumentDictionary { - + private final LongValuesSource weightsValueSource; /** - * Creates a new dictionary with the contents of the fields named field - * for the terms, payload for the corresponding payloads, contexts - * for the associated contexts and uses the weightsValueSource supplied - * to determine the score. + * Creates a new dictionary with the contents of the fields named field for the + * terms, payload for the corresponding payloads, contexts for the + * associated contexts and uses the weightsValueSource supplied to determine the + * score. */ - public DocumentValueSourceDictionary(IndexReader reader, String field, - LongValuesSource weightsValueSource, String payload, String contexts) { + public DocumentValueSourceDictionary( + IndexReader reader, + String field, + LongValuesSource weightsValueSource, + String payload, + String contexts) { super(reader, field, null, payload, contexts); this.weightsValueSource = weightsValueSource; } /** - * Creates a new dictionary with the contents of the fields named field - * for the terms, payloadField for the corresponding payloads - * and uses the weightsValueSource supplied to determine the - * score. + * Creates a new dictionary with the contents of the fields named field for the + * terms, payloadField for the corresponding payloads and uses the + * weightsValueSource supplied to determine the score. */ - public DocumentValueSourceDictionary(IndexReader reader, String field, - LongValuesSource weightsValueSource, String payload) { + public DocumentValueSourceDictionary( + IndexReader reader, String field, LongValuesSource weightsValueSource, String payload) { super(reader, field, null, payload); this.weightsValueSource = weightsValueSource; } /** - * Creates a new dictionary with the contents of the fields named field - * for the terms and uses the weightsValueSource supplied to determine the - * score. + * Creates a new dictionary with the contents of the fields named field for the terms + * and uses the weightsValueSource supplied to determine the score. */ - public DocumentValueSourceDictionary(IndexReader reader, String field, - LongValuesSource weightsValueSource) { + public DocumentValueSourceDictionary( + IndexReader reader, String field, LongValuesSource weightsValueSource) { super(reader, field, null, null); this.weightsValueSource = weightsValueSource; } - + @Override public InputIterator getEntryIterator() throws IOException { - return new DocumentValueSourceInputIterator(payloadField!=null, contextsField!=null); + return new DocumentValueSourceInputIterator(payloadField != null, contextsField != null); } - + final class DocumentValueSourceInputIterator extends DocumentDictionary.DocumentInputIterator { - + private LongValues currentWeightValues; /** leaves of the reader */ private final List leaves; @@ -122,15 +110,16 @@ public class DocumentValueSourceDictionary extends DocumentDictionary { starts[i] = leaves.get(i).docBase; } starts[leaves.size()] = reader.maxDoc(); - currentWeightValues = (leaves.size() > 0) - ? weightsValueSource.getValues(leaves.get(currentLeafIndex), null) - : null; + currentWeightValues = + (leaves.size() > 0) + ? weightsValueSource.getValues(leaves.get(currentLeafIndex), null) + : null; } - - /** - * Returns the weight for the current docId as computed - * by the weightsValueSource - * */ + + /** + * Returns the weight for the current docId as computed by the + * weightsValueSource + */ @Override protected long getWeight(Document doc, int docId) throws IOException { if (currentWeightValues == null) { @@ -141,12 +130,11 @@ public class DocumentValueSourceDictionary extends DocumentDictionary { currentLeafIndex = subIndex; currentWeightValues = weightsValueSource.getValues(leaves.get(currentLeafIndex), null); } - if (currentWeightValues.advanceExact(docId - starts[subIndex])) + if (currentWeightValues.advanceExact(docId - starts[subIndex])) { return currentWeightValues.longValue(); - else + } else { return 0; - + } } - } } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/FileDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/FileDictionary.java index b0660a9e763..9d72e25cf02 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/FileDictionary.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/FileDictionary.java @@ -19,36 +19,37 @@ package org.apache.lucene.search.suggest; import java.io.*; import java.nio.charset.StandardCharsets; import java.util.Set; - import org.apache.lucene.search.spell.Dictionary; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IOUtils; - /** * Dictionary represented by a text file. - * + * *

    Format allowed: 1 entry per line:
    * An entry can be:
    + * *

      - *
    • suggestion
    • - *
    • suggestion fieldDelimiter weight
    • - *
    • suggestion fieldDelimiter weight fieldDelimiter payload
    • + *
    • suggestion + *
    • suggestion fieldDelimiter weight + *
    • suggestion fieldDelimiter weight fieldDelimiter payload *
    + * * where the default fieldDelimiter is {@value #DEFAULT_FIELD_DELIMITER}
    - *

    - * NOTE: + * + *

    NOTE: + * *

      - *
    • In order to have payload enabled, the first entry has to have a payload
    • - *
    • If the weight for an entry is not specified then a value of 1 is used
    • - *
    • A payload cannot be specified without having the weight specified for an entry
    • - *
    • If the payload for an entry is not specified (assuming payload is enabled) - * then an empty payload is returned
    • - *
    • An entry cannot have more than two fieldDelimiter
    • + *
    • In order to have payload enabled, the first entry has to have a payload + *
    • If the weight for an entry is not specified then a value of 1 is used + *
    • A payload cannot be specified without having the weight specified for an entry + *
    • If the payload for an entry is not specified (assuming payload is enabled) then an empty + * payload is returned + *
    • An entry cannot have more than two fieldDelimiter *
    - *

    - * Example:
    + * + *

    Example:
    * word1 word2 TAB 100 TAB payload1
    * word3 TAB 101
    * word4 word3 TAB 102
    @@ -56,50 +57,48 @@ import org.apache.lucene.util.IOUtils; public class FileDictionary implements Dictionary { /** - * Tab-delimited fields are most common thus the default, but one can override this via the constructor + * Tab-delimited fields are most common thus the default, but one can override this via the + * constructor */ - public final static String DEFAULT_FIELD_DELIMITER = "\t"; + public static final String DEFAULT_FIELD_DELIMITER = "\t"; + private BufferedReader in; private String line; private boolean done = false; private final String fieldDelimiter; /** - * Creates a dictionary based on an inputstream. - * Using {@link #DEFAULT_FIELD_DELIMITER} as the + * Creates a dictionary based on an inputstream. Using {@link #DEFAULT_FIELD_DELIMITER} as the * field separator in a line. - *

    - * NOTE: content is treated as UTF-8 + * + *

    NOTE: content is treated as UTF-8 */ public FileDictionary(InputStream dictFile) { this(dictFile, DEFAULT_FIELD_DELIMITER); } /** - * Creates a dictionary based on a reader. - * Using {@link #DEFAULT_FIELD_DELIMITER} as the - * field separator in a line. + * Creates a dictionary based on a reader. Using {@link #DEFAULT_FIELD_DELIMITER} as the field + * separator in a line. */ public FileDictionary(Reader reader) { this(reader, DEFAULT_FIELD_DELIMITER); } - + /** - * Creates a dictionary based on a reader. - * Using fieldDelimiter to separate out the + * Creates a dictionary based on a reader. Using fieldDelimiter to separate out the * fields in a line. */ public FileDictionary(Reader reader, String fieldDelimiter) { in = new BufferedReader(reader); this.fieldDelimiter = fieldDelimiter; } - + /** - * Creates a dictionary based on an inputstream. - * Using fieldDelimiter to separate out the - * fields in a line. - *

    - * NOTE: content is treated as UTF-8 + * Creates a dictionary based on an inputstream. Using fieldDelimiter to separate out + * the fields in a line. + * + *

    NOTE: content is treated as UTF-8 */ public FileDictionary(InputStream dictFile, String fieldDelimiter) { in = new BufferedReader(IOUtils.getDecodingReader(dictFile, StandardCharsets.UTF_8)); @@ -121,7 +120,7 @@ public class FileDictionary implements Dictionary { private BytesRefBuilder curPayload = new BytesRefBuilder(); private boolean isFirstLine = true; private boolean hasPayloads = false; - + private FileIterator() throws IOException { line = in.readLine(); if (line == null) { @@ -145,7 +144,7 @@ public class FileDictionary implements Dictionary { } } } - + @Override public long weight() { return curWeight; @@ -201,13 +200,13 @@ public class FileDictionary implements Dictionary { public boolean hasPayloads() { return hasPayloads; } - + private void readWeight(String weight) { // keep reading floats for bw compat try { curWeight = Long.parseLong(weight); } catch (NumberFormatException e) { - curWeight = (long)Double.parseDouble(weight); + curWeight = (long) Double.parseDouble(weight); } } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/InMemorySorter.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/InMemorySorter.java index fec615c7255..1bfe136c93a 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/InMemorySorter.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/InMemorySorter.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest; import java.util.Comparator; - import org.apache.lucene.search.suggest.fst.BytesRefSorter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefArray; @@ -26,6 +25,7 @@ import org.apache.lucene.util.Counter; /** * An {@link BytesRefSorter} that keeps all the entries in memory. + * * @lucene.experimental * @lucene.internal */ @@ -34,14 +34,11 @@ public final class InMemorySorter implements BytesRefSorter { private boolean closed = false; private final Comparator comparator; - /** - * Creates an InMemorySorter, sorting entries by the - * provided comparator. - */ + /** Creates an InMemorySorter, sorting entries by the provided comparator. */ public InMemorySorter(Comparator comparator) { this.comparator = comparator; } - + @Override public void add(BytesRef utf8) { if (closed) throw new IllegalStateException(); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/InputIterator.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/InputIterator.java index d2224f9679b..894c5d56419 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/InputIterator.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/InputIterator.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.suggest; import java.io.IOException; import java.util.Set; - import org.apache.lucene.search.suggest.Lookup.LookupResult; // javadocs import org.apache.lucene.search.suggest.analyzing.AnalyzingInfixSuggester; // javadocs import org.apache.lucene.search.suggest.analyzing.AnalyzingSuggester; // javadocs @@ -27,46 +26,46 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; /** - * Interface for enumerating term,weight,payload triples for suggester consumption; - * currently only {@link AnalyzingSuggester}, {@link - * FuzzySuggester} and {@link AnalyzingInfixSuggester} support payloads. + * Interface for enumerating term,weight,payload triples for suggester consumption; currently only + * {@link AnalyzingSuggester}, {@link FuzzySuggester} and {@link AnalyzingInfixSuggester} support + * payloads. */ public interface InputIterator extends BytesRefIterator { /** A term's weight, higher numbers mean better suggestions. */ public long weight(); - - /** An arbitrary byte[] to record per suggestion. See - * {@link LookupResult#payload} to retrieve the payload - * for each suggestion. */ + + /** + * An arbitrary byte[] to record per suggestion. See {@link LookupResult#payload} to retrieve the + * payload for each suggestion. + */ public BytesRef payload(); /** Returns true if the iterator has payloads */ public boolean hasPayloads(); - - /** - * A term's contexts context can be used to filter suggestions. - * May return null, if suggest entries do not have any context - * */ + + /** + * A term's contexts context can be used to filter suggestions. May return null, if suggest + * entries do not have any context + */ public Set contexts(); - + /** Returns true if the iterator has contexts */ public boolean hasContexts(); - + /** Singleton InputIterator that iterates over 0 BytesRefs. */ public static final InputIterator EMPTY = new InputIteratorWrapper(BytesRefIterator.EMPTY); - + /** - * Wraps a BytesRefIterator as a suggester InputIterator, with all weights - * set to 1 and carries no payload + * Wraps a BytesRefIterator as a suggester InputIterator, with all weights set to 1 + * and carries no payload */ public static class InputIteratorWrapper implements InputIterator { private final BytesRefIterator wrapped; - - /** - * Creates a new wrapper, wrapping the specified iterator and - * specifying a weight value of 1 for all terms - * and nullifies associated payloads. + + /** + * Creates a new wrapper, wrapping the specified iterator and specifying a weight value of + * 1 for all terms and nullifies associated payloads. */ public InputIteratorWrapper(BytesRefIterator wrapped) { this.wrapped = wrapped; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/Lookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/Lookup.java index 138042467f3..2fc7108c2a7 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/Lookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/Lookup.java @@ -22,7 +22,6 @@ import java.io.OutputStream; import java.util.Comparator; import java.util.List; import java.util.Set; - import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.spell.Dictionary; import org.apache.lucene.store.DataInput; @@ -36,20 +35,21 @@ import org.apache.lucene.util.PriorityQueue; /** * Simple Lookup interface for {@link CharSequence} suggestions. + * * @lucene.experimental */ public abstract class Lookup implements Accountable { /** * Result of a lookup. + * * @lucene.experimental */ public static final class LookupResult implements Comparable { /** the key's text */ public final CharSequence key; - /** Expert: custom Object to hold the result of a - * highlighted suggestion. */ + /** Expert: custom Object to hold the result of a highlighted suggestion. */ public final Object highlightKey; /** the key's weight */ @@ -57,49 +57,42 @@ public abstract class Lookup implements Accountable { /** the key's payload (null if not present) */ public final BytesRef payload; - + /** the key's contexts (null if not present) */ public final Set contexts; - - /** - * Create a new result from a key+weight pair. - */ + + /** Create a new result from a key+weight pair. */ public LookupResult(CharSequence key, long value) { this(key, null, value, null, null); } - /** - * Create a new result from a key+weight+payload triple. - */ + /** Create a new result from a key+weight+payload triple. */ public LookupResult(CharSequence key, long value, BytesRef payload) { this(key, null, value, payload, null); } - - /** - * Create a new result from a key+highlightKey+weight+payload triple. - */ + + /** Create a new result from a key+highlightKey+weight+payload triple. */ public LookupResult(CharSequence key, Object highlightKey, long value, BytesRef payload) { this(key, highlightKey, value, payload, null); } - - /** - * Create a new result from a key+weight+payload+contexts triple. - */ + + /** Create a new result from a key+weight+payload+contexts triple. */ public LookupResult(CharSequence key, long value, BytesRef payload, Set contexts) { this(key, null, value, payload, contexts); } - /** - * Create a new result from a key+weight+contexts triple. - */ + /** Create a new result from a key+weight+contexts triple. */ public LookupResult(CharSequence key, long value, Set contexts) { this(key, null, value, null, contexts); } - - /** - * Create a new result from a key+highlightKey+weight+payload+contexts triple. - */ - public LookupResult(CharSequence key, Object highlightKey, long value, BytesRef payload, Set contexts) { + + /** Create a new result from a key+highlightKey+weight+payload+contexts triple. */ + public LookupResult( + CharSequence key, + Object highlightKey, + long value, + BytesRef payload, + Set contexts) { this.key = key; this.highlightKey = highlightKey; this.value = value; @@ -118,19 +111,18 @@ public abstract class Lookup implements Accountable { return CHARSEQUENCE_COMPARATOR.compare(key, o.key); } } - - /** - * A simple char-by-char comparator for {@link CharSequence} - */ - public static final Comparator CHARSEQUENCE_COMPARATOR = new CharSequenceComparator(); - + + /** A simple char-by-char comparator for {@link CharSequence} */ + public static final Comparator CHARSEQUENCE_COMPARATOR = + new CharSequenceComparator(); + private static class CharSequenceComparator implements Comparator { @Override public int compare(CharSequence o1, CharSequence o2) { final int l1 = o1.length(); final int l2 = o2.length(); - + final int aStop = Math.min(l1, l2); for (int i = 0; i < aStop; i++) { int diff = o1.charAt(i) - o2.charAt(i); @@ -141,17 +133,12 @@ public abstract class Lookup implements Accountable { // One is a prefix of the other, or, they are equal: return l1 - l2; } - } - - /** - * A {@link PriorityQueue} collecting a fixed size of high priority {@link LookupResult} - */ + + /** A {@link PriorityQueue} collecting a fixed size of high priority {@link LookupResult} */ public static final class LookupPriorityQueue extends PriorityQueue { - // TODO: should we move this out of the interface into a utility class? - /** - * Creates a new priority queue of the specified size. - */ + // TODO: should we move this out of the interface into a utility class? + /** Creates a new priority queue of the specified size. */ public LookupPriorityQueue(int size) { super(size); } @@ -160,9 +147,10 @@ public abstract class Lookup implements Accountable { protected boolean lessThan(LookupResult a, LookupResult b) { return a.value < b.value; } - + /** * Returns the top N results in descending order. + * * @return the top N results in descending order. */ public LookupResult[] getResults() { @@ -174,26 +162,20 @@ public abstract class Lookup implements Accountable { return res; } } - - /** - * Sole constructor. (For invocation by subclass - * constructors, typically implicit.) - */ + + /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */ public Lookup() {} - - /** Build lookup from a dictionary. Some implementations may require sorted - * or unsorted keys from the dictionary's iterator - use - * {@link SortedInputIterator} or - * {@link UnsortedInputIterator} in such case. + + /** + * Build lookup from a dictionary. Some implementations may require sorted or unsorted keys from + * the dictionary's iterator - use {@link SortedInputIterator} or {@link UnsortedInputIterator} in + * such case. */ public void build(Dictionary dict) throws IOException { build(dict.getEntryIterator()); } - - /** - * Calls {@link #load(DataInput)} after converting - * {@link InputStream} to {@link DataInput} - */ + + /** Calls {@link #load(DataInput)} after converting {@link InputStream} to {@link DataInput} */ public boolean load(InputStream input) throws IOException { DataInput dataIn = new InputStreamDataInput(input); try { @@ -202,10 +184,9 @@ public abstract class Lookup implements Accountable { IOUtils.close(input); } } - + /** - * Calls {@link #store(DataOutput)} after converting - * {@link OutputStream} to {@link DataOutput} + * Calls {@link #store(DataOutput)} after converting {@link OutputStream} to {@link DataOutput} */ public boolean store(OutputStream output) throws IOException { DataOutput dataOut = new OutputStreamDataOutput(output); @@ -215,60 +196,75 @@ public abstract class Lookup implements Accountable { IOUtils.close(output); } } - + /** * Get the number of entries the lookup was built with + * * @return total number of suggester entries */ public abstract long getCount() throws IOException; - + /** - * Builds up a new internal {@link Lookup} representation based on the given {@link InputIterator}. - * The implementation might re-sort the data internally. + * Builds up a new internal {@link Lookup} representation based on the given {@link + * InputIterator}. The implementation might re-sort the data internally. */ public abstract void build(InputIterator inputIterator) throws IOException; - + /** * Look up a key and return possible completion for this key. - * @param key lookup key. Depending on the implementation this may be - * a prefix, misspelling, or even infix. + * + * @param key lookup key. Depending on the implementation this may be a prefix, misspelling, or + * even infix. * @param onlyMorePopular return only more popular results * @param num maximum number of results to return * @return a list of possible completions, with their relative weight (e.g. popularity) */ - public List lookup(CharSequence key, boolean onlyMorePopular, int num) throws IOException { + public List lookup(CharSequence key, boolean onlyMorePopular, int num) + throws IOException { return lookup(key, null, onlyMorePopular, num); } /** * Look up a key and return possible completion for this key. - * @param key lookup key. Depending on the implementation this may be - * a prefix, misspelling, or even infix. - * @param contexts contexts to filter the lookup by, or null if all contexts are allowed; if the suggestion contains any of the contexts, it's a match + * + * @param key lookup key. Depending on the implementation this may be a prefix, misspelling, or + * even infix. + * @param contexts contexts to filter the lookup by, or null if all contexts are allowed; if the + * suggestion contains any of the contexts, it's a match * @param onlyMorePopular return only more popular results * @param num maximum number of results to return * @return a list of possible completions, with their relative weight (e.g. popularity) */ - public abstract List lookup(CharSequence key, Set contexts, boolean onlyMorePopular, int num) throws IOException; + public abstract List lookup( + CharSequence key, Set contexts, boolean onlyMorePopular, int num) + throws IOException; /** - * Look up a key and return possible completion for this key. - * This needs to be overridden by all implementing classes as the default implementation just returns null + * Look up a key and return possible completion for this key. This needs to be overridden by all + * implementing classes as the default implementation just returns null * * @param key the lookup key * @param contextFilerQuery A query for further filtering the result of the key lookup * @param num maximum number of results to return * @param allTermsRequired true is all terms are required * @param doHighlight set to true if key should be highlighted - * @return a list of suggestions/completions. The default implementation returns null, meaning each @Lookup implementation should override this and provide their own implementation + * @return a list of suggestions/completions. The default implementation returns null, meaning + * each @Lookup implementation should override this and provide their own implementation * @throws IOException when IO exception occurs */ - public List lookup(CharSequence key, BooleanQuery contextFilerQuery, int num, boolean allTermsRequired, boolean doHighlight) throws IOException{ + public List lookup( + CharSequence key, + BooleanQuery contextFilerQuery, + int num, + boolean allTermsRequired, + boolean doHighlight) + throws IOException { return null; } /** * Persist the constructed lookup data to a directory. Optional operation. + * * @param output {@link DataOutput} to write the data to. * @return true if successful, false if unsuccessful or not supported. * @throws IOException when fatal IO error occurs. @@ -276,8 +272,8 @@ public abstract class Lookup implements Accountable { public abstract boolean store(DataOutput output) throws IOException; /** - * Discard current lookup data and load it from a previously saved copy. - * Optional operation. + * Discard current lookup data and load it from a previously saved copy. Optional operation. + * * @param input the {@link DataInput} to load the lookup data. * @return true if completed successfully, false if unsuccessful or not supported. * @throws IOException when fatal IO error occurs. diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/SortedInputIterator.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/SortedInputIterator.java index 977df371323..a5cab2e3cf7 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/SortedInputIterator.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/SortedInputIterator.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Comparator; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.ByteArrayDataOutput; @@ -30,16 +29,17 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.OfflineSorter; import org.apache.lucene.util.OfflineSorter.ByteSequencesReader; import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter; -import org.apache.lucene.util.OfflineSorter; /** * This wrapper buffers incoming elements and makes sure they are sorted based on given comparator. + * * @lucene.experimental */ public class SortedInputIterator implements InputIterator { - + private final InputIterator source; private IndexOutput tempInput; private String tempSortedFileName; @@ -50,23 +50,27 @@ public class SortedInputIterator implements InputIterator { private final Directory tempDir; private final String tempFileNamePrefix; private boolean done = false; - + private long weight; private BytesRef payload = new BytesRef(); private Set contexts = null; - + /** - * Creates a new sorted wrapper, using {@linkplain Comparator#naturalOrder() natural order} - * for sorting. */ - public SortedInputIterator(Directory tempDir, String tempFileNamePrefix, InputIterator source) throws IOException { + * Creates a new sorted wrapper, using {@linkplain Comparator#naturalOrder() natural order} for + * sorting. + */ + public SortedInputIterator(Directory tempDir, String tempFileNamePrefix, InputIterator source) + throws IOException { this(tempDir, tempFileNamePrefix, source, Comparator.naturalOrder()); } - /** - * Creates a new sorted wrapper, sorting by BytesRef - * (ascending) then cost (ascending). - */ - public SortedInputIterator(Directory tempDir, String tempFileNamePrefix, InputIterator source, Comparator comparator) throws IOException { + /** Creates a new sorted wrapper, sorting by BytesRef (ascending) then cost (ascending). */ + public SortedInputIterator( + Directory tempDir, + String tempFileNamePrefix, + InputIterator source, + Comparator comparator) + throws IOException { this.hasPayloads = source.hasPayloads(); this.hasContexts = source.hasContexts(); this.source = source; @@ -75,7 +79,7 @@ public class SortedInputIterator implements InputIterator { this.tempFileNamePrefix = tempFileNamePrefix; this.reader = sort(); } - + @Override public BytesRef next() throws IOException { boolean success = false; @@ -106,7 +110,7 @@ public class SortedInputIterator implements InputIterator { } } } - + @Override public long weight() { return weight; @@ -124,7 +128,7 @@ public class SortedInputIterator implements InputIterator { public boolean hasPayloads() { return hasPayloads; } - + @Override public Set contexts() { return contexts; @@ -136,46 +140,48 @@ public class SortedInputIterator implements InputIterator { } /** Sortes by BytesRef (ascending) then cost (ascending). */ - private final Comparator tieBreakByCostComparator = new Comparator() { + private final Comparator tieBreakByCostComparator = + new Comparator() { + + private final BytesRef leftScratch = new BytesRef(); + private final BytesRef rightScratch = new BytesRef(); + private final ByteArrayDataInput input = new ByteArrayDataInput(); + + @Override + public int compare(BytesRef left, BytesRef right) { + // Make shallow copy in case decode changes the BytesRef: + assert left != right; + leftScratch.bytes = left.bytes; + leftScratch.offset = left.offset; + leftScratch.length = left.length; + rightScratch.bytes = right.bytes; + rightScratch.offset = right.offset; + rightScratch.length = right.length; + long leftCost = decode(leftScratch, input); + long rightCost = decode(rightScratch, input); + if (hasPayloads) { + decodePayload(leftScratch, input); + decodePayload(rightScratch, input); + } + if (hasContexts) { + decodeContexts(leftScratch, input); + decodeContexts(rightScratch, input); + } + int cmp = comparator.compare(leftScratch, rightScratch); + if (cmp != 0) { + return cmp; + } + return Long.compare(leftCost, rightCost); + } + }; - private final BytesRef leftScratch = new BytesRef(); - private final BytesRef rightScratch = new BytesRef(); - private final ByteArrayDataInput input = new ByteArrayDataInput(); - - @Override - public int compare(BytesRef left, BytesRef right) { - // Make shallow copy in case decode changes the BytesRef: - assert left != right; - leftScratch.bytes = left.bytes; - leftScratch.offset = left.offset; - leftScratch.length = left.length; - rightScratch.bytes = right.bytes; - rightScratch.offset = right.offset; - rightScratch.length = right.length; - long leftCost = decode(leftScratch, input); - long rightCost = decode(rightScratch, input); - if (hasPayloads) { - decodePayload(leftScratch, input); - decodePayload(rightScratch, input); - } - if (hasContexts) { - decodeContexts(leftScratch, input); - decodeContexts(rightScratch, input); - } - int cmp = comparator.compare(leftScratch, rightScratch); - if (cmp != 0) { - return cmp; - } - return Long.compare(leftCost, rightCost); - } - }; - private ByteSequencesReader sort() throws IOException { OfflineSorter sorter = new OfflineSorter(tempDir, tempFileNamePrefix, tieBreakByCostComparator); tempInput = tempDir.createTempOutput(tempFileNamePrefix, "input", IOContext.DEFAULT); - - try (OfflineSorter.ByteSequencesWriter writer = new OfflineSorter.ByteSequencesWriter(tempInput)) { + + try (OfflineSorter.ByteSequencesWriter writer = + new OfflineSorter.ByteSequencesWriter(tempInput)) { BytesRef spare; byte[] buffer = new byte[0]; ByteArrayDataOutput output = new ByteArrayDataOutput(buffer); @@ -187,24 +193,32 @@ public class SortedInputIterator implements InputIterator { } tempSortedFileName = sorter.sort(tempInput.getName()); - return new OfflineSorter.ByteSequencesReader(tempDir.openChecksumInput(tempSortedFileName, IOContext.READONCE), tempSortedFileName); + return new OfflineSorter.ByteSequencesReader( + tempDir.openChecksumInput(tempSortedFileName, IOContext.READONCE), tempSortedFileName); } - + private void close() throws IOException { try { IOUtils.close(reader); } finally { - IOUtils.deleteFilesIgnoringExceptions(tempDir, - tempInput == null ? null : tempInput.getName(), - tempSortedFileName); + IOUtils.deleteFilesIgnoringExceptions( + tempDir, tempInput == null ? null : tempInput.getName(), tempSortedFileName); } } - + /** encodes an entry (bytes+(contexts)+(payload)+weight) to the provided writer */ - protected void encode(ByteSequencesWriter writer, ByteArrayDataOutput output, byte[] buffer, BytesRef spare, BytesRef payload, Set contexts, long weight) throws IOException { + protected void encode( + ByteSequencesWriter writer, + ByteArrayDataOutput output, + byte[] buffer, + BytesRef spare, + BytesRef payload, + Set contexts, + long weight) + throws IOException { int requiredLength = spare.length + 8 + ((hasPayloads) ? 2 + payload.length : 0); if (hasContexts) { - for(BytesRef ctx : contexts) { + for (BytesRef ctx : contexts) { requiredLength += 2 + ctx.length; } requiredLength += 2; // for length of contexts @@ -228,7 +242,7 @@ public class SortedInputIterator implements InputIterator { output.writeLong(weight); writer.write(buffer, 0, output.getPosition()); } - + /** decodes the weight at the current position */ protected long decode(BytesRef scratch, ByteArrayDataInput tmpInput) { tmpInput.reset(scratch.bytes, scratch.offset, scratch.length); @@ -236,11 +250,11 @@ public class SortedInputIterator implements InputIterator { scratch.length -= Long.BYTES; // long return tmpInput.readLong(); } - + /** decodes the contexts at the current position */ protected Set decodeContexts(BytesRef scratch, ByteArrayDataInput tmpInput) { tmpInput.reset(scratch.bytes, scratch.offset, scratch.length); - tmpInput.skipBytes(scratch.length - 2); //skip to context set size + tmpInput.skipBytes(scratch.length - 2); // skip to context set size short ctxSetSize = tmpInput.readShort(); scratch.length -= 2; final Set contextSet = new HashSet<>(); @@ -257,15 +271,16 @@ public class SortedInputIterator implements InputIterator { } return contextSet; } - + /** decodes the payload at the current position */ protected BytesRef decodePayload(BytesRef scratch, ByteArrayDataInput tmpInput) { tmpInput.reset(scratch.bytes, scratch.offset, scratch.length); tmpInput.skipBytes(scratch.length - 2); // skip to payload size short payloadLength = tmpInput.readShort(); // read payload size - assert payloadLength >= 0: payloadLength; - tmpInput.setPosition(scratch.offset + scratch.length - 2 - payloadLength); // setPosition to start of payload - BytesRef payloadScratch = new BytesRef(payloadLength); + assert payloadLength >= 0 : payloadLength; + tmpInput.setPosition( + scratch.offset + scratch.length - 2 - payloadLength); // setPosition to start of payload + BytesRef payloadScratch = new BytesRef(payloadLength); tmpInput.readBytes(payloadScratch.bytes, 0, payloadLength); // read payload payloadScratch.length = payloadLength; scratch.length -= 2; // payload length info (short) diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/UnsortedInputIterator.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/UnsortedInputIterator.java index f7d1efe6af7..3ce5eb65441 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/UnsortedInputIterator.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/UnsortedInputIterator.java @@ -19,13 +19,12 @@ package org.apache.lucene.search.suggest; import java.io.IOException; import java.util.Random; import java.util.Set; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; /** - * This wrapper buffers the incoming elements and makes sure they are in - * random order. + * This wrapper buffers the incoming elements and makes sure they are in random order. + * * @lucene.experimental */ public class UnsortedInputIterator extends BufferedInputIterator { @@ -34,9 +33,9 @@ public class UnsortedInputIterator extends BufferedInputIterator { private int currentOrd = -1; private final BytesRefBuilder spare = new BytesRefBuilder(); private final BytesRefBuilder payloadSpare = new BytesRefBuilder(); - /** - * Creates a new iterator, wrapping the specified iterator and - * returning elements in a random order. + /** + * Creates a new iterator, wrapping the specified iterator and returning elements in a random + * order. */ public UnsortedInputIterator(InputIterator source) throws IOException { super(source); @@ -52,7 +51,7 @@ public class UnsortedInputIterator extends BufferedInputIterator { ords[randomPosition] = temp; } } - + @Override public long weight() { assert currentOrd == ords[curPos]; @@ -63,11 +62,11 @@ public class UnsortedInputIterator extends BufferedInputIterator { public BytesRef next() throws IOException { if (++curPos < entries.size()) { currentOrd = ords[curPos]; - return entries.get(spare, currentOrd); + return entries.get(spare, currentOrd); } return null; } - + @Override public BytesRef payload() { if (hasPayloads() && curPos < payloads.size()) { @@ -76,7 +75,7 @@ public class UnsortedInputIterator extends BufferedInputIterator { } return null; } - + @Override public Set contexts() { if (hasContexts() && curPos < contextSets.size()) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java index 530d52eace7..6f84bf7fa8c 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java @@ -27,7 +27,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.analysis.TokenFilter; @@ -56,8 +55,8 @@ import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; @@ -88,48 +87,49 @@ import org.apache.lucene.util.RamUsageEstimator; // DocumentDictionary and NRT? so that your suggester // "automatically" keeps in sync w/ your index -/** Analyzes the input text and then suggests matches based - * on prefix matches to any tokens in the indexed text. - * This also highlights the tokens that match. +/** + * Analyzes the input text and then suggests matches based on prefix matches to any tokens in the + * indexed text. This also highlights the tokens that match. * - *

    This suggester supports payloads. Matches are sorted only - * by the suggest weight; it would be nice to support - * blended score + weight sort in the future. This means - * this suggester best applies when there is a strong - * a-priori ranking of all the suggestions. + *

    This suggester supports payloads. Matches are sorted only by the suggest weight; it would be + * nice to support blended score + weight sort in the future. This means this suggester best applies + * when there is a strong a-priori ranking of all the suggestions. * - *

    This suggester supports contexts, including arbitrary binary - * terms. + *

    This suggester supports contexts, including arbitrary binary terms. * - * @lucene.experimental */ - + * @lucene.experimental + */ public class AnalyzingInfixSuggester extends Lookup implements Closeable { - /** edgegrams for searching short prefixes without Prefix Query - * that's controlled by {@linkplain #minPrefixChars} */ - protected final static String TEXTGRAMS_FIELD_NAME = "textgrams"; + /** + * edgegrams for searching short prefixes without Prefix Query that's controlled by {@linkplain + * #minPrefixChars} + */ + protected static final String TEXTGRAMS_FIELD_NAME = "textgrams"; /** Field name used for the indexed text. */ - protected final static String TEXT_FIELD_NAME = "text"; + protected static final String TEXT_FIELD_NAME = "text"; - /** Field name used for the indexed text, as a - * StringField, for exact lookup. */ - protected final static String EXACT_TEXT_FIELD_NAME = "exacttext"; + /** Field name used for the indexed text, as a StringField, for exact lookup. */ + protected static final String EXACT_TEXT_FIELD_NAME = "exacttext"; - /** Field name used for the indexed context, as a - * StringField and a SortedSetDVField, for filtering. */ - protected final static String CONTEXTS_FIELD_NAME = "contexts"; + /** + * Field name used for the indexed context, as a StringField and a SortedSetDVField, for + * filtering. + */ + protected static final String CONTEXTS_FIELD_NAME = "contexts"; /** Analyzer used at search time */ protected final Analyzer queryAnalyzer; /** Analyzer used at index time */ protected final Analyzer indexAnalyzer; + private final Directory dir; final int minPrefixChars; - + private final boolean allTermsRequired; private final boolean highlight; - + private final boolean commitOnBuild; private final boolean closeIndexWriterOnBuild; @@ -138,104 +138,131 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { /** {@link IndexSearcher} used for lookups. */ protected SearcherManager searcherMgr; - + /** Used to manage concurrent access to searcherMgr */ protected final Object searcherMgrLock = new Object(); - /** Default minimum number of leading characters before - * PrefixQuery is used (4). */ + /** Default minimum number of leading characters before PrefixQuery is used (4). */ public static final int DEFAULT_MIN_PREFIX_CHARS = 4; - + /** Default boolean clause option for multiple terms matching (all terms required). */ public static final boolean DEFAULT_ALL_TERMS_REQUIRED = true; - + /** Default higlighting option. */ public static final boolean DEFAULT_HIGHLIGHT = true; /** Default option to close the IndexWriter once the index has been built. */ - protected final static boolean DEFAULT_CLOSE_INDEXWRITER_ON_BUILD = true; + protected static final boolean DEFAULT_CLOSE_INDEXWRITER_ON_BUILD = true; /** How we sort the postings and search results. */ private static final Sort SORT = new Sort(new SortField("weight", SortField.Type.LONG, true)); - /** Create a new instance, loading from a previously built - * AnalyzingInfixSuggester directory, if it exists. This directory must be - * private to the infix suggester (i.e., not an external - * Lucene index). Note that {@link #close} - * will also close the provided directory. */ + /** + * Create a new instance, loading from a previously built AnalyzingInfixSuggester directory, if it + * exists. This directory must be private to the infix suggester (i.e., not an external Lucene + * index). Note that {@link #close} will also close the provided directory. + */ public AnalyzingInfixSuggester(Directory dir, Analyzer analyzer) throws IOException { - this(dir, analyzer, analyzer, DEFAULT_MIN_PREFIX_CHARS, false, DEFAULT_ALL_TERMS_REQUIRED, DEFAULT_HIGHLIGHT); - } - - /** Create a new instance, loading from a previously built - * AnalyzingInfixSuggester directory, if it exists. This directory must be - * private to the infix suggester (i.e., not an external - * Lucene index). Note that {@link #close} - * will also close the provided directory. - * - * @param minPrefixChars Minimum number of leading characters - * before PrefixQuery is used (default 4). - * Prefixes shorter than this are indexed as character - * ngrams (increasing index size but making lookups - * faster). - * - * @param commitOnBuild Call commit after the index has finished building. This would persist the - * suggester index to disk and future instances of this suggester can use this pre-built dictionary. - */ - public AnalyzingInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars, - boolean commitOnBuild) throws IOException { - this(dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild, DEFAULT_ALL_TERMS_REQUIRED, DEFAULT_HIGHLIGHT); - } - - /** Create a new instance, loading from a previously built - * AnalyzingInfixSuggester directory, if it exists. This directory must be - * private to the infix suggester (i.e., not an external - * Lucene index). Note that {@link #close} - * will also close the provided directory. - * - * @param minPrefixChars Minimum number of leading characters - * before PrefixQuery is used (default 4). - * Prefixes shorter than this are indexed as character - * ngrams (increasing index size but making lookups - * faster). - * - * @param commitOnBuild Call commit after the index has finished building. This would persist the - * suggester index to disk and future instances of this suggester can use this pre-built dictionary. - * - * @param allTermsRequired All terms in the suggest query must be matched. - * @param highlight Highlight suggest query in suggestions. - * - */ - public AnalyzingInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars, - boolean commitOnBuild, - boolean allTermsRequired, boolean highlight) throws IOException { - this(dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild, allTermsRequired, highlight, - DEFAULT_CLOSE_INDEXWRITER_ON_BUILD); + this( + dir, + analyzer, + analyzer, + DEFAULT_MIN_PREFIX_CHARS, + false, + DEFAULT_ALL_TERMS_REQUIRED, + DEFAULT_HIGHLIGHT); } - /** Create a new instance, loading from a previously built - * AnalyzingInfixSuggester directory, if it exists. This directory must be - * private to the infix suggester (i.e., not an external - * Lucene index). Note that {@link #close} - * will also close the provided directory. - * - * @param minPrefixChars Minimum number of leading characters - * before PrefixQuery is used (default 4). - * Prefixes shorter than this are indexed as character - * ngrams (increasing index size but making lookups - * faster). - * - * @param commitOnBuild Call commit after the index has finished building. This would persist the - * suggester index to disk and future instances of this suggester can use this pre-built dictionary. - * - * @param allTermsRequired All terms in the suggest query must be matched. - * @param highlight Highlight suggest query in suggestions. - * @param closeIndexWriterOnBuild If true, the IndexWriter will be closed after the index has finished building. - */ - public AnalyzingInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars, - boolean commitOnBuild, boolean allTermsRequired, - boolean highlight, boolean closeIndexWriterOnBuild) throws IOException { - + /** + * Create a new instance, loading from a previously built AnalyzingInfixSuggester directory, if it + * exists. This directory must be private to the infix suggester (i.e., not an external Lucene + * index). Note that {@link #close} will also close the provided directory. + * + * @param minPrefixChars Minimum number of leading characters before PrefixQuery is used (default + * 4). Prefixes shorter than this are indexed as character ngrams (increasing index size but + * making lookups faster). + * @param commitOnBuild Call commit after the index has finished building. This would persist the + * suggester index to disk and future instances of this suggester can use this pre-built + * dictionary. + */ + public AnalyzingInfixSuggester( + Directory dir, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer, + int minPrefixChars, + boolean commitOnBuild) + throws IOException { + this( + dir, + indexAnalyzer, + queryAnalyzer, + minPrefixChars, + commitOnBuild, + DEFAULT_ALL_TERMS_REQUIRED, + DEFAULT_HIGHLIGHT); + } + + /** + * Create a new instance, loading from a previously built AnalyzingInfixSuggester directory, if it + * exists. This directory must be private to the infix suggester (i.e., not an external Lucene + * index). Note that {@link #close} will also close the provided directory. + * + * @param minPrefixChars Minimum number of leading characters before PrefixQuery is used (default + * 4). Prefixes shorter than this are indexed as character ngrams (increasing index size but + * making lookups faster). + * @param commitOnBuild Call commit after the index has finished building. This would persist the + * suggester index to disk and future instances of this suggester can use this pre-built + * dictionary. + * @param allTermsRequired All terms in the suggest query must be matched. + * @param highlight Highlight suggest query in suggestions. + */ + public AnalyzingInfixSuggester( + Directory dir, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer, + int minPrefixChars, + boolean commitOnBuild, + boolean allTermsRequired, + boolean highlight) + throws IOException { + this( + dir, + indexAnalyzer, + queryAnalyzer, + minPrefixChars, + commitOnBuild, + allTermsRequired, + highlight, + DEFAULT_CLOSE_INDEXWRITER_ON_BUILD); + } + + /** + * Create a new instance, loading from a previously built AnalyzingInfixSuggester directory, if it + * exists. This directory must be private to the infix suggester (i.e., not an external Lucene + * index). Note that {@link #close} will also close the provided directory. + * + * @param minPrefixChars Minimum number of leading characters before PrefixQuery is used (default + * 4). Prefixes shorter than this are indexed as character ngrams (increasing index size but + * making lookups faster). + * @param commitOnBuild Call commit after the index has finished building. This would persist the + * suggester index to disk and future instances of this suggester can use this pre-built + * dictionary. + * @param allTermsRequired All terms in the suggest query must be matched. + * @param highlight Highlight suggest query in suggestions. + * @param closeIndexWriterOnBuild If true, the IndexWriter will be closed after the index has + * finished building. + */ + public AnalyzingInfixSuggester( + Directory dir, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer, + int minPrefixChars, + boolean commitOnBuild, + boolean allTermsRequired, + boolean highlight, + boolean closeIndexWriterOnBuild) + throws IOException { + if (minPrefixChars < 0) { throw new IllegalArgumentException("minPrefixChars must be >= 0; got: " + minPrefixChars); } @@ -255,9 +282,9 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } } - /** Override this to customize index settings, e.g. which - * codec to use. */ - protected IndexWriterConfig getIndexWriterConfig(Analyzer indexAnalyzer, IndexWriterConfig.OpenMode openMode) { + /** Override this to customize index settings, e.g. which codec to use. */ + protected IndexWriterConfig getIndexWriterConfig( + Analyzer indexAnalyzer, IndexWriterConfig.OpenMode openMode) { IndexWriterConfig iwc = new IndexWriterConfig(indexAnalyzer); iwc.setOpenMode(openMode); @@ -269,15 +296,14 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { return iwc; } - /** Subclass can override to choose a specific {@link - * Directory} implementation. */ + /** Subclass can override to choose a specific {@link Directory} implementation. */ protected Directory getDirectory(Path path) throws IOException { return FSDirectory.open(path); } @Override public void build(InputIterator iter) throws IOException { - + synchronized (searcherMgrLock) { if (searcherMgr != null) { searcherMgr.close(); @@ -293,9 +319,10 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { try { // First pass: build a temporary normal Lucene index, // just indexing the suggestions as they iterate: - writer = new IndexWriter(dir, - getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE)); - //long t0 = System.nanoTime(); + writer = + new IndexWriter( + dir, getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE)); + // long t0 = System.nanoTime(); // TODO: use threads? BytesRef text; @@ -310,7 +337,8 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { add(text, iter.contexts(), iter.weight(), payload); } - //System.out.println("initial indexing time: " + ((System.nanoTime()-t0)/1000000) + " msec"); + // System.out.println("initial indexing time: " + ((System.nanoTime()-t0)/1000000) + " + // msec"); if (commitOnBuild || closeIndexWriterOnBuild) { commit(); } @@ -322,7 +350,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { writer.close(); writer = null; } - } else { // failure + } else { // failure if (writer != null) { writer.rollback(); writer = null; @@ -332,15 +360,18 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } } - /** Commits all pending changes made to this suggester to disk. + /** + * Commits all pending changes made to this suggester to disk. * - * @see IndexWriter#commit */ + * @see IndexWriter#commit + */ public void commit() throws IOException { if (writer == null) { if (searcherMgr == null || closeIndexWriterOnBuild == false) { throw new IllegalStateException("Cannot commit on an closed writer. Add documents first"); } - // else no-op: writer was committed and closed after the index was built, so commit is unnecessary + // else no-op: writer was committed and closed after the index was built, so commit is + // unnecessary } else { writer.commit(); } @@ -354,12 +385,14 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } @Override - protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - assert !(fieldName.equals(TEXTGRAMS_FIELD_NAME) && minPrefixChars == 0) - : "no need \"textgrams\" when minPrefixChars="+minPrefixChars; + protected TokenStreamComponents wrapComponents( + String fieldName, TokenStreamComponents components) { + assert !(fieldName.equals(TEXTGRAMS_FIELD_NAME) && minPrefixChars == 0) + : "no need \"textgrams\" when minPrefixChars=" + minPrefixChars; if (fieldName.equals(TEXTGRAMS_FIELD_NAME) && minPrefixChars > 0) { // TODO: should use an EdgeNGramTokenFilterFactory here - TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), 1, minPrefixChars, false); + TokenFilter filter = + new EdgeNGramTokenFilter(components.getTokenStream(), 1, minPrefixChars, false); return new TokenStreamComponents(components.getSource(), filter); } else { return components; @@ -373,9 +406,13 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { if (writer == null) { if (DirectoryReader.indexExists(dir)) { // Already built; open it: - writer = new IndexWriter(dir, getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.APPEND)); + writer = + new IndexWriter( + dir, getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.APPEND)); } else { - writer = new IndexWriter(dir, getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE)); + writer = + new IndexWriter( + dir, getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE)); } SearcherManager oldSearcherMgr = searcherMgr; @@ -387,35 +424,38 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } } - /** Adds a new suggestion. Be sure to use {@link #update} - * instead if you want to replace a previous suggestion. - * After adding or updating a batch of new suggestions, - * you must call {@link #refresh} in the end in order to - * see the suggestions in {@link #lookup} */ - public void add(BytesRef text, Set contexts, long weight, BytesRef payload) throws IOException { + /** + * Adds a new suggestion. Be sure to use {@link #update} instead if you want to replace a previous + * suggestion. After adding or updating a batch of new suggestions, you must call {@link #refresh} + * in the end in order to see the suggestions in {@link #lookup} + */ + public void add(BytesRef text, Set contexts, long weight, BytesRef payload) + throws IOException { ensureOpen(); writer.addDocument(buildDocument(text, contexts, weight, payload)); } - /** Updates a previous suggestion, matching the exact same - * text as before. Use this to change the weight or - * payload of an already added suggestion. If you know - * this text is not already present you can use {@link - * #add} instead. After adding or updating a batch of - * new suggestions, you must call {@link #refresh} in the - * end in order to see the suggestions in {@link #lookup} */ - public void update(BytesRef text, Set contexts, long weight, BytesRef payload) throws IOException { + /** + * Updates a previous suggestion, matching the exact same text as before. Use this to change the + * weight or payload of an already added suggestion. If you know this text is not already present + * you can use {@link #add} instead. After adding or updating a batch of new suggestions, you must + * call {@link #refresh} in the end in order to see the suggestions in {@link #lookup} + */ + public void update(BytesRef text, Set contexts, long weight, BytesRef payload) + throws IOException { ensureOpen(); - writer.updateDocument(new Term(EXACT_TEXT_FIELD_NAME, text.utf8ToString()), - buildDocument(text, contexts, weight, payload)); + writer.updateDocument( + new Term(EXACT_TEXT_FIELD_NAME, text.utf8ToString()), + buildDocument(text, contexts, weight, payload)); } - private Document buildDocument(BytesRef text, Set contexts, long weight, BytesRef payload) throws IOException { + private Document buildDocument( + BytesRef text, Set contexts, long weight, BytesRef payload) throws IOException { String textString = text.utf8ToString(); Document doc = new Document(); FieldType ft = getTextFieldType(); doc.add(new Field(TEXT_FIELD_NAME, textString, ft)); - if (minPrefixChars>0) { + if (minPrefixChars > 0) { doc.add(new Field(TEXTGRAMS_FIELD_NAME, textString, ft)); } doc.add(new StringField(EXACT_TEXT_FIELD_NAME, textString, Field.Store.NO)); @@ -425,7 +465,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { doc.add(new BinaryDocValuesField("payloads", payload)); } if (contexts != null) { - for(BytesRef context : contexts) { + for (BytesRef context : contexts) { doc.add(new StringField(CONTEXTS_FIELD_NAME, context, Field.Store.NO)); doc.add(new SortedSetDocValuesField(CONTEXTS_FIELD_NAME, context)); } @@ -433,9 +473,10 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { return doc; } - /** Reopens the underlying searcher; it's best to "batch - * up" many additions/updates, and then call refresh - * once in the end. */ + /** + * Reopens the underlying searcher; it's best to "batch up" many additions/updates, and then call + * refresh once in the end. + */ public void refresh() throws IOException { if (searcherMgr == null) { throw new IllegalStateException("suggester was not built"); @@ -448,10 +489,10 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } /** - * Subclass can override this method to change the field type of the text field - * e.g. to change the index options + * Subclass can override this method to change the field type of the text field e.g. to change the + * index options */ - protected FieldType getTextFieldType(){ + protected FieldType getTextFieldType() { FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); ft.setIndexOptions(IndexOptions.DOCS); ft.setOmitNorms(true); @@ -460,24 +501,36 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } @Override - public List lookup(CharSequence key, Set contexts, boolean onlyMorePopular, int num) throws IOException { + public List lookup( + CharSequence key, Set contexts, boolean onlyMorePopular, int num) + throws IOException { return lookup(key, contexts, num, allTermsRequired, highlight); } /** Lookup, without any context. */ - public List lookup(CharSequence key, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { - return lookup(key, (BooleanQuery)null, num, allTermsRequired, doHighlight); + public List lookup( + CharSequence key, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { + return lookup(key, (BooleanQuery) null, num, allTermsRequired, doHighlight); } - /** Lookup, with context but without booleans. Context booleans default to SHOULD, - * so each suggestion must have at least one of the contexts. */ - public List lookup(CharSequence key, Set contexts, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { + /** + * Lookup, with context but without booleans. Context booleans default to SHOULD, so each + * suggestion must have at least one of the contexts. + */ + public List lookup( + CharSequence key, + Set contexts, + int num, + boolean allTermsRequired, + boolean doHighlight) + throws IOException { return lookup(key, toQuery(contexts), num, allTermsRequired, doHighlight); } - /** This is called if the last token isn't ended - * (e.g. user did not type a space after it). Return an - * appropriate Query clause to add to the BooleanQuery. */ + /** + * This is called if the last token isn't ended (e.g. user did not type a space after it). Return + * an appropriate Query clause to add to the BooleanQuery. + */ protected Query getLastTokenQuery(String token) throws IOException { if (token.length() < minPrefixChars) { // The leading ngram was directly indexed: @@ -487,23 +540,30 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { return new PrefixQuery(new Term(TEXT_FIELD_NAME, token)); } - /** Retrieve suggestions, specifying whether all terms - * must match ({@code allTermsRequired}) and whether the hits - * should be highlighted ({@code doHighlight}). */ - public List lookup(CharSequence key, Map contextInfo, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { - return lookup(key, toQuery(contextInfo), num, allTermsRequired, doHighlight); + /** + * Retrieve suggestions, specifying whether all terms must match ({@code allTermsRequired}) and + * whether the hits should be highlighted ({@code doHighlight}). + */ + public List lookup( + CharSequence key, + Map contextInfo, + int num, + boolean allTermsRequired, + boolean doHighlight) + throws IOException { + return lookup(key, toQuery(contextInfo), num, allTermsRequired, doHighlight); } - private BooleanQuery toQuery(Map contextInfo) { + private BooleanQuery toQuery(Map contextInfo) { if (contextInfo == null || contextInfo.isEmpty()) { return null; } - + BooleanQuery.Builder contextFilter = new BooleanQuery.Builder(); - for (Map.Entry entry : contextInfo.entrySet()) { + for (Map.Entry entry : contextInfo.entrySet()) { addContextToQuery(contextFilter, entry.getKey(), entry.getValue()); } - + return contextFilter.build(); } @@ -511,7 +571,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { if (contextInfo == null || contextInfo.isEmpty()) { return null; } - + BooleanQuery.Builder contextFilter = new BooleanQuery.Builder(); for (BytesRef context : contextInfo) { addContextToQuery(contextFilter, context, BooleanClause.Occur.SHOULD); @@ -519,38 +579,45 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { return contextFilter.build(); } - /** - * This method is handy as we do not need access to internal fields such as CONTEXTS_FIELD_NAME in order to build queries - * However, here may not be its best location. - * + * This method is handy as we do not need access to internal fields such as CONTEXTS_FIELD_NAME in + * order to build queries However, here may not be its best location. + * * @param query an instance of @See {@link BooleanQuery} * @param context the context * @param clause one of {@link Occur} */ - public void addContextToQuery(BooleanQuery.Builder query, BytesRef context, BooleanClause.Occur clause) { + public void addContextToQuery( + BooleanQuery.Builder query, BytesRef context, BooleanClause.Occur clause) { // NOTE: we "should" wrap this in // ConstantScoreQuery, or maybe send this as a // Filter instead to search. - + // TODO: if we had a BinaryTermField we could fix // this "must be valid ut8f" limitation: query.add(new TermQuery(new Term(CONTEXTS_FIELD_NAME, context)), clause); } /** - * This is an advanced method providing the capability to send down to the suggester any - * arbitrary lucene query to be used to filter the result of the suggester - * + * This is an advanced method providing the capability to send down to the suggester any arbitrary + * lucene query to be used to filter the result of the suggester + * * @param key the keyword being looked for - * @param contextQuery an arbitrary Lucene query to be used to filter the result of the suggester. {@link #addContextToQuery} could be used to build this contextQuery. + * @param contextQuery an arbitrary Lucene query to be used to filter the result of the suggester. + * {@link #addContextToQuery} could be used to build this contextQuery. * @param num number of items to return * @param allTermsRequired all searched terms must match or not * @param doHighlight if true, the matching term will be highlighted in the search result * @return the result of the suggester * @throws IOException f the is IO exception while reading data from the index */ - public List lookup(CharSequence key, BooleanQuery contextQuery, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { + public List lookup( + CharSequence key, + BooleanQuery contextQuery, + int num, + boolean allTermsRequired, + boolean doHighlight) + throws IOException { if (searcherMgr == null) { throw new IllegalStateException("suggester was not built"); @@ -568,7 +635,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { String prefixToken = null; try (TokenStream ts = queryAnalyzer.tokenStream("", new StringReader(key.toString()))) { - //long t0 = System.currentTimeMillis(); + // long t0 = System.currentTimeMillis(); ts.reset(); final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); final OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); @@ -577,7 +644,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { int maxEndOffset = -1; matchedTokens = new HashSet<>(); while (ts.incrementToken()) { - if (lastToken != null) { + if (lastToken != null) { matchedTokens.add(lastToken); query.add(new TermQuery(new Term(TEXT_FIELD_NAME, lastToken)), occur); } @@ -606,7 +673,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { matchedTokens.add(lastToken); lastQuery = new TermQuery(new Term(TEXT_FIELD_NAME, lastToken)); } - + if (lastQuery != null) { query.add(lastQuery, occur); } @@ -620,7 +687,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { break; } } - + if (allMustNot) { // All are MUST_NOT: add the contextQuery to the main query instead (not as sub-query) for (BooleanClause clause : contextQuery.clauses()) { @@ -638,14 +705,14 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } } } - + // TODO: we could allow blended sort here, combining // weight w/ score. Now we ignore score and sort only // by weight: Query finalQuery = finishQuery(query, allTermsRequired); - //System.out.println("finalQuery=" + finalQuery); + // System.out.println("finalQuery=" + finalQuery); // Sort by weight, descending: TopFieldCollector c = TopFieldCollector.create(SORT, num, 1); @@ -657,7 +724,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { searcher = mgr.acquire(); } try { - //System.out.println("got searcher=" + searcher); + // System.out.println("got searcher=" + searcher); searcher.search(finalQuery, c); TopFieldDocs hits = c.topDocs(); @@ -669,31 +736,36 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { mgr.release(searcher); } - //System.out.println((System.currentTimeMillis() - t0) + " msec for infix suggest"); - //System.out.println(results); + // System.out.println((System.currentTimeMillis() - t0) + " msec for infix suggest"); + // System.out.println(results); return results; } - + /** - * Create the results based on the search hits. - * Can be overridden by subclass to add particular behavior (e.g. weight transformation). - * Note that there is no prefix token (the {@code prefixToken} argument will - * be null) whenever the final token in the incoming request was in fact finished - * (had trailing characters, such as white-space). + * Create the results based on the search hits. Can be overridden by subclass to add particular + * behavior (e.g. weight transformation). Note that there is no prefix token (the {@code + * prefixToken} argument will be null) whenever the final token in the incoming request was in + * fact finished (had trailing characters, such as white-space). * * @throws IOException If there are problems reading fields from the underlying Lucene index. */ - protected List createResults(IndexSearcher searcher, TopFieldDocs hits, int num, - CharSequence charSequence, - boolean doHighlight, Set matchedTokens, String prefixToken) + protected List createResults( + IndexSearcher searcher, + TopFieldDocs hits, + int num, + CharSequence charSequence, + boolean doHighlight, + Set matchedTokens, + String prefixToken) throws IOException { List leaves = searcher.getIndexReader().leaves(); List results = new ArrayList<>(); - for (int i=0;i contexts; if (contextsDV != null) { contexts = new HashSet(); @@ -735,7 +809,9 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { LookupResult result; if (doHighlight) { - result = new LookupResult(text, highlight(text, matchedTokens, prefixToken), score, payload, contexts); + result = + new LookupResult( + text, highlight(text, matchedTokens, prefixToken), score, payload, contexts); } else { result = new LookupResult(text, score, payload, contexts); } @@ -746,17 +822,18 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { return results; } - /** Subclass can override this to tweak the Query before - * searching. */ + /** Subclass can override this to tweak the Query before searching. */ protected Query finishQuery(BooleanQuery.Builder in, boolean allTermsRequired) { return in.build(); } - /** Override this method to customize the Object - * representing a single highlighted suggestions; the - * result is set on each {@link - * org.apache.lucene.search.suggest.Lookup.LookupResult#highlightKey} member. */ - protected Object highlight(String text, Set matchedTokens, String prefixToken) throws IOException { + /** + * Override this method to customize the Object representing a single highlighted suggestions; the + * result is set on each {@link org.apache.lucene.search.suggest.Lookup.LookupResult#highlightKey} + * member. + */ + protected Object highlight(String text, Set matchedTokens, String prefixToken) + throws IOException { try (TokenStream ts = queryAnalyzer.tokenStream("text", new StringReader(text))) { CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); @@ -773,7 +850,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } else if (upto > startOffset) { continue; } - + if (matchedTokens.contains(token)) { // Token matches. addWholeMatch(sb, text.substring(startOffset, endOffset), token); @@ -792,21 +869,24 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } } - /** Called while highlighting a single result, to append a - * non-matching chunk of text from the suggestion to the - * provided fragments list. - * @param sb The {@code StringBuilder} to append to - * @param text The text chunk to add + /** + * Called while highlighting a single result, to append a non-matching chunk of text from the + * suggestion to the provided fragments list. + * + * @param sb The {@code StringBuilder} to append to + * @param text The text chunk to add */ protected void addNonMatch(StringBuilder sb, String text) { sb.append(text); } - /** Called while highlighting a single result, to append - * the whole matched token to the provided fragments list. - * @param sb The {@code StringBuilder} to append to - * @param surface The surface form (original) text - * @param analyzed The analyzed token corresponding to the surface form text + /** + * Called while highlighting a single result, to append the whole matched token to the provided + * fragments list. + * + * @param sb The {@code StringBuilder} to append to + * @param surface The surface form (original) text + * @param analyzed The analyzed token corresponding to the surface form text */ protected void addWholeMatch(StringBuilder sb, String surface, String analyzed) { sb.append(""); @@ -814,16 +894,18 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { sb.append(""); } - /** Called while highlighting a single result, to append a - * matched prefix token, to the provided fragments list. - * @param sb The {@code StringBuilder} to append to - * @param surface The fragment of the surface form - * (indexed during {@link #build}, corresponding to - * this match - * @param analyzed The analyzed token that matched - * @param prefixToken The prefix of the token that matched + /** + * Called while highlighting a single result, to append a matched prefix token, to the provided + * fragments list. + * + * @param sb The {@code StringBuilder} to append to + * @param surface The fragment of the surface form (indexed during {@link #build}, corresponding + * to this match + * @param analyzed The analyzed token that matched + * @param prefixToken The prefix of the token that matched */ - protected void addPrefixMatch(StringBuilder sb, String surface, String analyzed, String prefixToken) { + protected void addPrefixMatch( + StringBuilder sb, String surface, String analyzed, String prefixToken) { // TODO: apps can try to invert their analysis logic // here, e.g. downcase the two before checking prefix: if (prefixToken.length() >= surface.length()) { @@ -904,7 +986,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { for (LeafReaderContext context : searcher.getIndexReader().leaves()) { LeafReader reader = FilterLeafReader.unwrap(context.reader()); if (reader instanceof SegmentReader) { - resources.add(Accountables.namedAccountable("segment", (SegmentReader)reader)); + resources.add(Accountables.namedAccountable("segment", (SegmentReader) reader)); } } } finally { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java index f2345aeea3b..4eaf4f51f68 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.search.suggest.analyzing; +import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -24,7 +26,6 @@ import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStreamToAutomaton; @@ -52,140 +53,108 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.LimitedFiniteStringsIterator; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.Transition; -import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.ByteSequenceOutputs; -import org.apache.lucene.util.fst.FST.BytesReader; import org.apache.lucene.util.fst.FST; -import org.apache.lucene.util.fst.PairOutputs.Pair; +import org.apache.lucene.util.fst.FST.BytesReader; +import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.PairOutputs; +import org.apache.lucene.util.fst.PairOutputs.Pair; import org.apache.lucene.util.fst.PositiveIntOutputs; +import org.apache.lucene.util.fst.Util; import org.apache.lucene.util.fst.Util.Result; import org.apache.lucene.util.fst.Util.TopResults; -import org.apache.lucene.util.fst.Util; - -import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; /** - * Suggester that first analyzes the surface form, adds the - * analyzed form to a weighted FST, and then does the same - * thing at lookup time. This means lookup is based on the - * analyzed form while suggestions are still the surface - * form(s). + * Suggester that first analyzes the surface form, adds the analyzed form to a weighted FST, and + * then does the same thing at lookup time. This means lookup is based on the analyzed form while + * suggestions are still the surface form(s). * - *

    - * This can result in powerful suggester functionality. For - * example, if you use an analyzer removing stop words, - * then the partial text "ghost chr..." could see the - * suggestion "The Ghost of Christmas Past". Note that - * position increments MUST NOT be preserved for this example - * to work, so you should call the constructor with - * preservePositionIncrements parameter set to + *

    This can result in powerful suggester functionality. For example, if you use an analyzer + * removing stop words, then the partial text "ghost chr..." could see the suggestion "The Ghost of + * Christmas Past". Note that position increments MUST NOT be preserved for this example to work, so + * you should call the constructor with preservePositionIncrements parameter set to * false * - *

    - * If SynonymFilter is used to map wifi and wireless network to - * hotspot then the partial text "wirele..." could suggest - * "wifi router". Token normalization like stemmers, accent - * removal, etc., would allow suggestions to ignore such - * variations. + *

    If SynonymFilter is used to map wifi and wireless network to hotspot then the partial text + * "wirele..." could suggest "wifi router". Token normalization like stemmers, accent removal, etc., + * would allow suggestions to ignore such variations. * - *

    - * When two matching suggestions have the same weight, they - * are tie-broken by the analyzed form. If their analyzed - * form is the same then the order is undefined. + *

    When two matching suggestions have the same weight, they are tie-broken by the analyzed form. + * If their analyzed form is the same then the order is undefined. + * + *

    There are some limitations: * - *

    - * There are some limitations: *

      - * - *
    • A lookup from a query like "net" in English won't - * be any different than "net " (ie, user added a - * trailing space) because analyzers don't reflect - * when they've seen a token separator and when they - * haven't. - * - *
    • If you're using {@code StopFilter}, and the user will - * type "fast apple", but so far all they've typed is - * "fast a", again because the analyzer doesn't convey whether - * it's seen a token separator after the "a", - * {@code StopFilter} will remove that "a" causing - * far more matches than you'd expect. - * - *
    • Lookups with the empty string return no results - * instead of all results. + *
    • A lookup from a query like "net" in English won't be any different than "net " (ie, user + * added a trailing space) because analyzers don't reflect when they've seen a token separator + * and when they haven't. + *
    • If you're using {@code StopFilter}, and the user will type "fast apple", but so far all + * they've typed is "fast a", again because the analyzer doesn't convey whether it's seen a + * token separator after the "a", {@code StopFilter} will remove that "a" causing far more + * matches than you'd expect. + *
    • Lookups with the empty string return no results instead of all results. *
    - * + * * @lucene.experimental */ // redundant 'implements Accountable' to workaround javadocs bugs public class AnalyzingSuggester extends Lookup implements Accountable { - + /** - * FST<Weight,Surface>: - * input is the analyzed form, with a null byte between terms - * weights are encoded as costs: (Integer.MAX_VALUE-weight) - * surface is the original, unanalyzed form. - */ - private FST> fst = null; - - /** - * Analyzer that will be used for analyzing suggestions at - * index time. + * FST<Weight,Surface>: input is the analyzed form, with a null byte between terms weights + * are encoded as costs: (Integer.MAX_VALUE-weight) surface is the original, unanalyzed form. */ + private FST> fst = null; + + /** Analyzer that will be used for analyzing suggestions at index time. */ private final Analyzer indexAnalyzer; - /** - * Analyzer that will be used for analyzing suggestions at - * query time. - */ + /** Analyzer that will be used for analyzing suggestions at query time. */ private final Analyzer queryAnalyzer; - - /** - * True if exact match suggestions should always be returned first. - */ + + /** True if exact match suggestions should always be returned first. */ private final boolean exactFirst; - - /** - * True if separator between tokens should be preserved. - */ + + /** True if separator between tokens should be preserved. */ private final boolean preserveSep; - /** Include this flag in the options parameter to {@link - * #AnalyzingSuggester(Directory,String,Analyzer,Analyzer,int,int,int,boolean)} to always - * return the exact match first, regardless of score. This - * has no performance impact but could result in - * low-quality suggestions. */ + /** + * Include this flag in the options parameter to {@link + * #AnalyzingSuggester(Directory,String,Analyzer,Analyzer,int,int,int,boolean)} to always return + * the exact match first, regardless of score. This has no performance impact but could result in + * low-quality suggestions. + */ public static final int EXACT_FIRST = 1; - /** Include this flag in the options parameter to {@link - * #AnalyzingSuggester(Directory,String,Analyzer,Analyzer,int,int,int,boolean)} to preserve - * token separators when matching. */ + /** + * Include this flag in the options parameter to {@link + * #AnalyzingSuggester(Directory,String,Analyzer,Analyzer,int,int,int,boolean)} to preserve token + * separators when matching. + */ public static final int PRESERVE_SEP = 2; - /** Represents the separation between tokens, if - * PRESERVE_SEP was specified */ + /** Represents the separation between tokens, if PRESERVE_SEP was specified */ private static final int SEP_LABEL = '\u001F'; - /** Marks end of the analyzed input and start of dedup - * byte. */ + /** Marks end of the analyzed input and start of dedup byte. */ private static final int END_BYTE = 0x0; - /** Maximum number of dup surface forms (different surface - * forms for the same analyzed form). */ + /** Maximum number of dup surface forms (different surface forms for the same analyzed form). */ private final int maxSurfaceFormsPerAnalyzedForm; - /** Maximum graph paths to index for a single analyzed - * surface form. This only matters if your analyzer - * makes lots of alternate paths (e.g. contains - * SynonymFilter). */ + /** + * Maximum graph paths to index for a single analyzed surface form. This only matters if your + * analyzer makes lots of alternate paths (e.g. contains SynonymFilter). + */ private final int maxGraphExpansions; private final Directory tempDir; private final String tempFileNamePrefix; - /** Highest number of analyzed paths we saw for any single - * input surface form. For analyzers that never create - * graphs this will always be 1. */ + /** + * Highest number of analyzed paths we saw for any single input surface form. For analyzers that + * never create graphs this will always be 1. + */ private int maxAnalyzedPathsForOneInput; private boolean hasPayloads; @@ -200,46 +169,60 @@ public class AnalyzingSuggester extends Lookup implements Accountable { /** * Calls {@link #AnalyzingSuggester(Directory,String,Analyzer,Analyzer,int,int,int,boolean) - * AnalyzingSuggester(analyzer, analyzer, EXACT_FIRST | - * PRESERVE_SEP, 256, -1, true)} + * AnalyzingSuggester(analyzer, analyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true)} */ public AnalyzingSuggester(Directory tempDir, String tempFileNamePrefix, Analyzer analyzer) { - this(tempDir, tempFileNamePrefix, analyzer, analyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true); + this( + tempDir, tempFileNamePrefix, analyzer, analyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true); } /** * Calls {@link #AnalyzingSuggester(Directory,String,Analyzer,Analyzer,int,int,int,boolean) - * AnalyzingSuggester(indexAnalyzer, queryAnalyzer, EXACT_FIRST | - * PRESERVE_SEP, 256, -1, true)} + * AnalyzingSuggester(indexAnalyzer, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true)} */ - public AnalyzingSuggester(Directory tempDir, String tempFileNamePrefix, Analyzer indexAnalyzer, Analyzer queryAnalyzer) { - this(tempDir, tempFileNamePrefix, indexAnalyzer, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true); + public AnalyzingSuggester( + Directory tempDir, + String tempFileNamePrefix, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer) { + this( + tempDir, + tempFileNamePrefix, + indexAnalyzer, + queryAnalyzer, + EXACT_FIRST | PRESERVE_SEP, + 256, + -1, + true); } /** * Creates a new suggester. - * - * @param indexAnalyzer Analyzer that will be used for - * analyzing suggestions while building the index. - * @param queryAnalyzer Analyzer that will be used for - * analyzing query text during lookup + * + * @param indexAnalyzer Analyzer that will be used for analyzing suggestions while building the + * index. + * @param queryAnalyzer Analyzer that will be used for analyzing query text during lookup * @param options see {@link #EXACT_FIRST}, {@link #PRESERVE_SEP} - * @param maxSurfaceFormsPerAnalyzedForm Maximum number of - * surface forms to keep for a single analyzed form. - * When there are too many surface forms we discard the - * lowest weighted ones. - * @param maxGraphExpansions Maximum number of graph paths - * to expand from the analyzed form. Set this to -1 for - * no limit. - * @param preservePositionIncrements Whether position holes - * should appear in the automata + * @param maxSurfaceFormsPerAnalyzedForm Maximum number of surface forms to keep for a single + * analyzed form. When there are too many surface forms we discard the lowest weighted ones. + * @param maxGraphExpansions Maximum number of graph paths to expand from the analyzed form. Set + * this to -1 for no limit. + * @param preservePositionIncrements Whether position holes should appear in the automata */ - public AnalyzingSuggester(Directory tempDir, String tempFileNamePrefix, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions, + public AnalyzingSuggester( + Directory tempDir, + String tempFileNamePrefix, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer, + int options, + int maxSurfaceFormsPerAnalyzedForm, + int maxGraphExpansions, boolean preservePositionIncrements) { this.indexAnalyzer = indexAnalyzer; this.queryAnalyzer = queryAnalyzer; if ((options & ~(EXACT_FIRST | PRESERVE_SEP)) != 0) { - throw new IllegalArgumentException("options should only contain EXACT_FIRST and PRESERVE_SEP; got " + options); + throw new IllegalArgumentException( + "options should only contain EXACT_FIRST and PRESERVE_SEP; got " + options); } this.exactFirst = (options & EXACT_FIRST) != 0; this.preserveSep = (options & PRESERVE_SEP) != 0; @@ -249,12 +232,16 @@ public class AnalyzingSuggester extends Lookup implements Accountable { // more than one byte to disambiguate ... but 256 seems // like it should be way more then enough. if (maxSurfaceFormsPerAnalyzedForm <= 0 || maxSurfaceFormsPerAnalyzedForm > 256) { - throw new IllegalArgumentException("maxSurfaceFormsPerAnalyzedForm must be > 0 and < 256 (got: " + maxSurfaceFormsPerAnalyzedForm + ")"); + throw new IllegalArgumentException( + "maxSurfaceFormsPerAnalyzedForm must be > 0 and < 256 (got: " + + maxSurfaceFormsPerAnalyzedForm + + ")"); } this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm; if (maxGraphExpansions < 1 && maxGraphExpansions != -1) { - throw new IllegalArgumentException("maxGraphExpansions must -1 (no limit) or > 0 (got: " + maxGraphExpansions + ")"); + throw new IllegalArgumentException( + "maxGraphExpansions must -1 (no limit) or > 0 (got: " + maxGraphExpansions + ")"); } this.maxGraphExpansions = maxGraphExpansions; this.preservePositionIncrements = preservePositionIncrements; @@ -290,10 +277,10 @@ public class AnalyzingSuggester extends Lookup implements Accountable { // make one pass: Transition t = new Transition(); int[] topoSortStates = Operations.topoSortStates(a); - for(int i=0;i { private final boolean hasPayloads; @@ -395,7 +381,7 @@ public class AnalyzingSuggester extends Lookup implements Accountable { } assert scratchA.isValid(); assert scratchB.isValid(); - + return scratchA.compareTo(scratchB); } } @@ -408,9 +394,11 @@ public class AnalyzingSuggester extends Lookup implements Accountable { hasPayloads = iterator.hasPayloads(); - OfflineSorter sorter = new OfflineSorter(tempDir, tempFileNamePrefix, new AnalyzingComparator(hasPayloads)); + OfflineSorter sorter = + new OfflineSorter(tempDir, tempFileNamePrefix, new AnalyzingComparator(hasPayloads)); - IndexOutput tempInput = tempDir.createTempOutput(tempFileNamePrefix, "input", IOContext.DEFAULT); + IndexOutput tempInput = + tempDir.createTempOutput(tempFileNamePrefix, "input", IOContext.DEFAULT); OfflineSorter.ByteSequencesWriter writer = new OfflineSorter.ByteSequencesWriter(tempInput); OfflineSorter.ByteSequencesReader reader = null; @@ -425,16 +413,21 @@ public class AnalyzingSuggester extends Lookup implements Accountable { try { ByteArrayDataOutput output = new ByteArrayDataOutput(buffer); - for (BytesRef surfaceForm; (surfaceForm = iterator.next()) != null;) { + for (BytesRef surfaceForm; (surfaceForm = iterator.next()) != null; ) { LimitedFiniteStringsIterator finiteStrings = new LimitedFiniteStringsIterator(toAutomaton(surfaceForm, ts2a), maxGraphExpansions); for (IntsRef string; (string = finiteStrings.next()) != null; count++) { Util.toBytesRef(string, scratch); - + // length of the analyzed text (FST input) - if (scratch.length() > Short.MAX_VALUE-2) { - throw new IllegalArgumentException("cannot handle analyzed forms > " + (Short.MAX_VALUE-2) + " in length (got " + scratch.length() + ")"); + if (scratch.length() > Short.MAX_VALUE - 2) { + throw new IllegalArgumentException( + "cannot handle analyzed forms > " + + (Short.MAX_VALUE - 2) + + " in length (got " + + scratch.length() + + ")"); } short analyzedLength = (short) scratch.length(); @@ -445,8 +438,13 @@ public class AnalyzingSuggester extends Lookup implements Accountable { BytesRef payload; if (hasPayloads) { - if (surfaceForm.length > (Short.MAX_VALUE-2)) { - throw new IllegalArgumentException("cannot handle surface form > " + (Short.MAX_VALUE-2) + " in length (got " + surfaceForm.length + ")"); + if (surfaceForm.length > (Short.MAX_VALUE - 2)) { + throw new IllegalArgumentException( + "cannot handle surface form > " + + (Short.MAX_VALUE - 2) + + " in length (got " + + surfaceForm.length + + ")"); } payload = iterator.payload(); // payload + surfaceLength (short) @@ -454,9 +452,9 @@ public class AnalyzingSuggester extends Lookup implements Accountable { } else { payload = null; } - + buffer = ArrayUtil.grow(buffer, requiredLength); - + output.reset(buffer); output.writeShort(analyzedLength); @@ -466,9 +464,10 @@ public class AnalyzingSuggester extends Lookup implements Accountable { output.writeInt(encodeWeight(iterator.weight())); if (hasPayloads) { - for(int i=0;i outputs = new PairOutputs<>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()); - FSTCompiler> fstCompiler = new FSTCompiler<>(FST.INPUT_TYPE.BYTE1, outputs); + reader = + new OfflineSorter.ByteSequencesReader( + tempDir.openChecksumInput(tempSortedFileName, IOContext.READONCE), + tempSortedFileName); + + PairOutputs outputs = + new PairOutputs<>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()); + FSTCompiler> fstCompiler = + new FSTCompiler<>(FST.INPUT_TYPE.BYTE1, outputs); // Build FST: BytesRefBuilder previousAnalyzed = null; @@ -519,7 +524,7 @@ public class AnalyzingSuggester extends Lookup implements Accountable { } input.reset(bytes.bytes, bytes.offset, bytes.length); short analyzedLength = input.readShort(); - analyzed.grow(analyzedLength+2); + analyzed.grow(analyzedLength + 2); input.readBytes(analyzed.bytes(), 0, analyzedLength); analyzed.setLength(analyzedLength); @@ -533,7 +538,7 @@ public class AnalyzingSuggester extends Lookup implements Accountable { surface.offset = input.getPosition(); surface.length = bytes.length - surface.offset; } - + if (previousAnalyzed == null) { previousAnalyzed = new BytesRefBuilder(); previousAnalyzed.copyBytes(analyzed.get()); @@ -568,7 +573,8 @@ public class AnalyzingSuggester extends Lookup implements Accountable { analyzed.append((byte) dedup); Util.toIntsRef(analyzed.get(), scratchInts); - //System.out.println("ADD: " + scratchInts + " -> " + cost + ": " + surface.utf8ToString()); + // System.out.println("ADD: " + scratchInts + " -> " + cost + ": " + + // surface.utf8ToString()); if (!hasPayloads) { fstCompiler.add(scratchInts.get(), outputs.newPair(cost, BytesRef.deepCopyOf(surface))); } else { @@ -577,14 +583,14 @@ public class AnalyzingSuggester extends Lookup implements Accountable { BytesRef br = new BytesRef(surface.length + 1 + payloadLength); System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length); br.bytes[surface.length] = PAYLOAD_SEP; - System.arraycopy(bytes.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength); + System.arraycopy(bytes.bytes, payloadOffset, br.bytes, surface.length + 1, payloadLength); br.length = br.bytes.length; fstCompiler.add(scratchInts.get(), outputs.newPair(cost, br)); } } fst = fstCompiler.compile(); - //Util.dotToFile(fst, "/tmp/suggest.dot"); + // Util.dotToFile(fst, "/tmp/suggest.dot"); } finally { IOUtils.closeWhileHandlingException(reader, writer); IOUtils.deleteFilesIgnoringExceptions(tempDir, tempInput.getName(), tempSortedFileName); @@ -607,7 +613,12 @@ public class AnalyzingSuggester extends Lookup implements Accountable { @Override public boolean load(DataInput input) throws IOException { count = input.readVLong(); - this.fst = new FST<>(input, input, new PairOutputs<>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton())); + this.fst = + new FST<>( + input, + input, + new PairOutputs<>( + PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton())); maxAnalyzedPathsForOneInput = input.readVInt(); hasPayloads = input.readByte() == 1; return true; @@ -617,8 +628,8 @@ public class AnalyzingSuggester extends Lookup implements Accountable { LookupResult result; if (hasPayloads) { int sepIndex = -1; - for(int i=0;i= output2.length) { return false; } - for(int i=0;i lookup(final CharSequence key, Set contexts, boolean onlyMorePopular, int num) { + public List lookup( + final CharSequence key, Set contexts, boolean onlyMorePopular, int num) { assert num > 0; if (onlyMorePopular) { @@ -671,13 +683,15 @@ public class AnalyzingSuggester extends Lookup implements Accountable { return Collections.emptyList(); } - //System.out.println("lookup key=" + key + " num=" + num); + // System.out.println("lookup key=" + key + " num=" + num); for (int i = 0; i < key.length(); i++) { if (key.charAt(i) == 0x1E) { - throw new IllegalArgumentException("lookup key cannot contain HOLE character U+001E; this character is reserved"); + throw new IllegalArgumentException( + "lookup key cannot contain HOLE character U+001E; this character is reserved"); } if (key.charAt(i) == 0x1F) { - throw new IllegalArgumentException("lookup key cannot contain unit separator character U+001F; this character is reserved"); + throw new IllegalArgumentException( + "lookup key cannot contain unit separator character U+001F; this character is reserved"); } } final BytesRef utf8Key = new BytesRef(key); @@ -686,26 +700,27 @@ public class AnalyzingSuggester extends Lookup implements Accountable { final CharsRefBuilder spare = new CharsRefBuilder(); - //System.out.println(" now intersect exactFirst=" + exactFirst); - + // System.out.println(" now intersect exactFirst=" + exactFirst); + // Intersect automaton w/ suggest wFST and get all // prefix starting nodes & their outputs: - //final PathIntersector intersector = getPathIntersector(lookupAutomaton, fst); + // final PathIntersector intersector = getPathIntersector(lookupAutomaton, fst); - //System.out.println(" prefixPaths: " + prefixPaths.size()); + // System.out.println(" prefixPaths: " + prefixPaths.size()); BytesReader bytesReader = fst.getBytesReader(); - FST.Arc> scratchArc = new FST.Arc<>(); + FST.Arc> scratchArc = new FST.Arc<>(); final List results = new ArrayList<>(); - List>> prefixPaths = FSTUtil.intersectPrefixPaths(convertAutomaton(lookupAutomaton), fst); + List>> prefixPaths = + FSTUtil.intersectPrefixPaths(convertAutomaton(lookupAutomaton), fst); if (exactFirst) { int count = 0; - for (FSTUtil.Path> path : prefixPaths) { + for (FSTUtil.Path> path : prefixPaths) { if (fst.findTargetArc(END_BYTE, path.fstNode, scratchArc, bytesReader) != null) { // This node has END_BYTE arc leaving, meaning it's an // "exact" match: @@ -715,23 +730,29 @@ public class AnalyzingSuggester extends Lookup implements Accountable { // Searcher just to find the single exact only // match, if present: - Util.TopNSearcher> searcher; - searcher = new Util.TopNSearcher<>(fst, count * maxSurfaceFormsPerAnalyzedForm, count * maxSurfaceFormsPerAnalyzedForm, weightComparator); + Util.TopNSearcher> searcher; + searcher = + new Util.TopNSearcher<>( + fst, + count * maxSurfaceFormsPerAnalyzedForm, + count * maxSurfaceFormsPerAnalyzedForm, + weightComparator); // NOTE: we could almost get away with only using // the first start node. The only catch is if // maxSurfaceFormsPerAnalyzedForm had kicked in and // pruned our exact match from one of these nodes // ...: - for (FSTUtil.Path> path : prefixPaths) { + for (FSTUtil.Path> path : prefixPaths) { if (fst.findTargetArc(END_BYTE, path.fstNode, scratchArc, bytesReader) != null) { // This node has END_BYTE arc leaving, meaning it's an // "exact" match: - searcher.addStartPaths(scratchArc, fst.outputs.add(path.output, scratchArc.output()), false, path.input); + searcher.addStartPaths( + scratchArc, fst.outputs.add(path.output, scratchArc.output()), false, path.input); } } - TopResults> completions = searcher.search(); + TopResults> completions = searcher.search(); assert completions.isComplete; // NOTE: this is rather inefficient: we enumerate @@ -746,7 +767,7 @@ public class AnalyzingSuggester extends Lookup implements Accountable { // seach: it's bounded by how many prefix start // nodes we have and the // maxSurfaceFormsPerAnalyzedForm: - for(Result> completion : completions) { + for (Result> completion : completions) { BytesRef output2 = completion.output.output2; if (sameSurfaceForm(utf8Key, output2)) { results.add(getLookupResult(completion.output.output1, output2, spare)); @@ -760,58 +781,58 @@ public class AnalyzingSuggester extends Lookup implements Accountable { } } - Util.TopNSearcher> searcher; - searcher = new Util.TopNSearcher>(fst, - num - results.size(), - num * maxAnalyzedPathsForOneInput, - weightComparator) { - private final Set seen = new HashSet<>(); + Util.TopNSearcher> searcher; + searcher = + new Util.TopNSearcher>( + fst, num - results.size(), num * maxAnalyzedPathsForOneInput, weightComparator) { + private final Set seen = new HashSet<>(); - @Override - protected boolean acceptResult(IntsRef input, Pair output) { + @Override + protected boolean acceptResult(IntsRef input, Pair output) { - // Dedup: when the input analyzes to a graph we - // can get duplicate surface forms: - if (seen.contains(output.output2)) { - return false; - } - seen.add(output.output2); - - if (!exactFirst) { - return true; - } else { - // In exactFirst mode, don't accept any paths - // matching the surface form since that will - // create duplicate results: - if (sameSurfaceForm(utf8Key, output.output2)) { - // We found exact match, which means we should - // have already found it in the first search: - assert results.size() == 1; - return false; - } else { - return true; + // Dedup: when the input analyzes to a graph we + // can get duplicate surface forms: + if (seen.contains(output.output2)) { + return false; + } + seen.add(output.output2); + + if (!exactFirst) { + return true; + } else { + // In exactFirst mode, don't accept any paths + // matching the surface form since that will + // create duplicate results: + if (sameSurfaceForm(utf8Key, output.output2)) { + // We found exact match, which means we should + // have already found it in the first search: + assert results.size() == 1; + return false; + } else { + return true; + } + } } - } - } - }; + }; prefixPaths = getFullPrefixPaths(prefixPaths, lookupAutomaton, fst); - - for (FSTUtil.Path> path : prefixPaths) { + + for (FSTUtil.Path> path : prefixPaths) { searcher.addStartPaths(path.fstNode, path.output, true, path.input); } - TopResults> completions = searcher.search(); + TopResults> completions = searcher.search(); assert completions.isComplete; - for(Result> completion : completions) { + for (Result> completion : completions) { - LookupResult result = getLookupResult(completion.output.output1, completion.output.output2, spare); + LookupResult result = + getLookupResult(completion.output.output1, completion.output.output2, spare); // TODO: for fuzzy case would be nice to return // how many edits were required - //System.out.println(" result=" + result); + // System.out.println(" result=" + result); results.add(result); if (results.size() == num) { @@ -826,21 +847,23 @@ public class AnalyzingSuggester extends Lookup implements Accountable { throw new RuntimeException(bogus); } } - + @Override public long getCount() { return count; } /** Returns all prefix paths to initialize the search. */ - protected List>> getFullPrefixPaths(List>> prefixPaths, - Automaton lookupAutomaton, - FST> fst) - throws IOException { + protected List>> getFullPrefixPaths( + List>> prefixPaths, + Automaton lookupAutomaton, + FST> fst) + throws IOException { return prefixPaths; } - - final Automaton toAutomaton(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException { + + final Automaton toAutomaton(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) + throws IOException { // Analyze surface form: Automaton automaton; try (TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString())) { @@ -879,31 +902,29 @@ public class AnalyzingSuggester extends Lookup implements Accountable { return automaton; } - /** - * Returns the weight associated with an input string, - * or null if it does not exist. - */ + /** Returns the weight associated with an input string, or null if it does not exist. */ public Object get(CharSequence key) { throw new UnsupportedOperationException(); } - + /** cost -> weight */ private static int decodeWeight(long encoded) { - return (int)(Integer.MAX_VALUE - encoded); + return (int) (Integer.MAX_VALUE - encoded); } - + /** weight -> cost */ private static int encodeWeight(long value) { if (value < 0 || value > Integer.MAX_VALUE) { throw new UnsupportedOperationException("cannot encode value: " + value); } - return Integer.MAX_VALUE - (int)value; + return Integer.MAX_VALUE - (int) value; } - - static final Comparator> weightComparator = new Comparator> () { - @Override - public int compare(Pair left, Pair right) { - return left.output1.compareTo(right.output1); - } - }; + + static final Comparator> weightComparator = + new Comparator>() { + @Override + public int compare(Pair left, Pair right) { + return left.output1.compareTo(right.output1); + } + }; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java index 63f432fb76a..9fee03a1c66 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; @@ -46,44 +45,32 @@ import org.apache.lucene.util.BytesRef; // - allow to use the search score /** - * Extension of the AnalyzingInfixSuggester which transforms the weight - * after search to take into account the position of the searched term into - * the indexed text. - * Please note that it increases the number of elements searched and applies the - * ponderation after. It might be costly for long suggestions. + * Extension of the AnalyzingInfixSuggester which transforms the weight after search to take into + * account the position of the searched term into the indexed text. Please note that it increases + * the number of elements searched and applies the ponderation after. It might be costly for long + * suggestions. * * @lucene.experimental */ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { - /** - * Coefficient used for linear blending - */ + /** Coefficient used for linear blending */ protected static double LINEAR_COEF = 0.10; private Double exponent = 2.0; - /** - * Default factor - */ + /** Default factor */ public static int DEFAULT_NUM_FACTOR = 10; - /** - * Factor to multiply the number of searched elements - */ + /** Factor to multiply the number of searched elements */ private final int numFactor; - /** - * Type of blender used by the suggester - */ + /** Type of blender used by the suggester */ private final BlenderType blenderType; - /** - * The different types of blender. - */ + /** The different types of blender. */ public static enum BlenderType { - /** Application dependent; override {@link - * #calculateCoefficient} to compute it. */ + /** Application dependent; override {@link #calculateCoefficient} to compute it. */ CUSTOM, /** weight*(1 - 0.10*position) */ POSITION_LINEAR, @@ -92,13 +79,10 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { /** weight/pow(1+position, exponent) */ POSITION_EXPONENTIAL_RECIPROCAL // TODO: - //SCORE + // SCORE } - /** - * Create a new instance, loading from a previously built - * directory, if it exists. - */ + /** Create a new instance, loading from a previously built directory, if it exists. */ public BlendedInfixSuggester(Directory dir, Analyzer analyzer) throws IOException { super(dir, analyzer); this.blenderType = BlenderType.POSITION_LINEAR; @@ -106,70 +90,114 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { } /** - * Create a new instance, loading from a previously built - * directory, if it exists. + * Create a new instance, loading from a previously built directory, if it exists. * * @param blenderType Type of blending strategy, see BlenderType for more precisions - * @param numFactor Factor to multiply the number of searched elements before ponderate + * @param numFactor Factor to multiply the number of searched elements before ponderate * @param commitOnBuild Call commit after the index has finished building. This would persist the - * suggester index to disk and future instances of this suggester can use this pre-built dictionary. + * suggester index to disk and future instances of this suggester can use this pre-built + * dictionary. * @throws IOException If there are problems opening the underlying Lucene index. */ - public BlendedInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, - int minPrefixChars, BlenderType blenderType, int numFactor, boolean commitOnBuild) throws IOException { + public BlendedInfixSuggester( + Directory dir, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer, + int minPrefixChars, + BlenderType blenderType, + int numFactor, + boolean commitOnBuild) + throws IOException { super(dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild); this.blenderType = blenderType; this.numFactor = numFactor; } /** - * Create a new instance, loading from a previously built - * directory, if it exists. + * Create a new instance, loading from a previously built directory, if it exists. * * @param blenderType Type of blending strategy, see BlenderType for more precisions - * @param numFactor Factor to multiply the number of searched elements before ponderate - * @param exponent exponent used only when blenderType is BlenderType.POSITION_EXPONENTIAL_RECIPROCAL + * @param numFactor Factor to multiply the number of searched elements before ponderate + * @param exponent exponent used only when blenderType is + * BlenderType.POSITION_EXPONENTIAL_RECIPROCAL * @param commitOnBuild Call commit after the index has finished building. This would persist the - * suggester index to disk and future instances of this suggester can use this pre-built dictionary. + * suggester index to disk and future instances of this suggester can use this pre-built + * dictionary. * @param allTermsRequired All terms in the suggest query must be matched. * @param highlight Highlight suggest query in suggestions. * @throws IOException If there are problems opening the underlying Lucene index. */ - public BlendedInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, - int minPrefixChars, BlenderType blenderType, int numFactor, Double exponent, - boolean commitOnBuild, boolean allTermsRequired, boolean highlight) throws IOException { - super(dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild, allTermsRequired, highlight); + public BlendedInfixSuggester( + Directory dir, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer, + int minPrefixChars, + BlenderType blenderType, + int numFactor, + Double exponent, + boolean commitOnBuild, + boolean allTermsRequired, + boolean highlight) + throws IOException { + super( + dir, + indexAnalyzer, + queryAnalyzer, + minPrefixChars, + commitOnBuild, + allTermsRequired, + highlight); this.blenderType = blenderType; this.numFactor = numFactor; - if(exponent != null) { + if (exponent != null) { this.exponent = exponent; } } @Override - public List lookup(CharSequence key, Set contexts, boolean onlyMorePopular, int num) throws IOException { + public List lookup( + CharSequence key, Set contexts, boolean onlyMorePopular, int num) + throws IOException { // Don't * numFactor here since we do it down below, once, in the call chain: return super.lookup(key, contexts, onlyMorePopular, num); } @Override - public List lookup(CharSequence key, Set contexts, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { + public List lookup( + CharSequence key, + Set contexts, + int num, + boolean allTermsRequired, + boolean doHighlight) + throws IOException { // Don't * numFactor here since we do it down below, once, in the call chain: return super.lookup(key, contexts, num, allTermsRequired, doHighlight); } @Override - public List lookup(CharSequence key, Map contextInfo, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { + public List lookup( + CharSequence key, + Map contextInfo, + int num, + boolean allTermsRequired, + boolean doHighlight) + throws IOException { // Don't * numFactor here since we do it down below, once, in the call chain: return super.lookup(key, contextInfo, num, allTermsRequired, doHighlight); } @Override - public List lookup(CharSequence key, BooleanQuery contextQuery, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { - /** We need to do num * numFactor here only because it is the last call in the lookup chain*/ + public List lookup( + CharSequence key, + BooleanQuery contextQuery, + int num, + boolean allTermsRequired, + boolean doHighlight) + throws IOException { + /** We need to do num * numFactor here only because it is the last call in the lookup chain */ return super.lookup(key, contextQuery, num * numFactor, allTermsRequired, doHighlight); } - + @Override protected FieldType getTextFieldType() { FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); @@ -182,8 +210,14 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { } @Override - protected List createResults(IndexSearcher searcher, TopFieldDocs hits, int num, CharSequence key, - boolean doHighlight, Set matchedTokens, String prefixToken) + protected List createResults( + IndexSearcher searcher, + TopFieldDocs hits, + int num, + CharSequence key, + boolean doHighlight, + Set matchedTokens, + String prefixToken) throws IOException { TreeSet results = new TreeSet<>(LOOKUP_COMP); @@ -194,7 +228,8 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { for (int i = 0; i < hits.scoreDocs.length; i++) { FieldDoc fd = (FieldDoc) hits.scoreDocs[i]; - BinaryDocValues textDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), TEXT_FIELD_NAME); + BinaryDocValues textDV = + MultiDocValues.getBinaryValues(searcher.getIndexReader(), TEXT_FIELD_NAME); assert textDV != null; textDV.advance(fd.doc); @@ -204,7 +239,8 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { // This will just be null if app didn't pass payloads to build(): // TODO: maybe just stored fields? they compress... - BinaryDocValues payloadsDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), "payloads"); + BinaryDocValues payloadsDV = + MultiDocValues.getBinaryValues(searcher.getIndexReader(), "payloads"); BytesRef payload; if (payloadsDV != null) { @@ -234,7 +270,8 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { LookupResult result; if (doHighlight) { - result = new LookupResult(text, highlight(text, matchedTokens, prefixToken), score, payload); + result = + new LookupResult(text, highlight(text, matchedTokens, prefixToken), score, payload); } else { result = new LookupResult(text, score, payload); } @@ -252,7 +289,8 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { * @param result the result we try to add * @param num size limit */ - private static void boundedTreeAdd(TreeSet results, Lookup.LookupResult result, int num) { + private static void boundedTreeAdd( + TreeSet results, Lookup.LookupResult result, int num) { if (results.size() >= num) { if (results.first().value < result.value) { @@ -272,9 +310,12 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { * @param matchedTokens tokens found in the query * @param prefixToken unfinished token in the query * @return the coefficient - * @throws IOException If there are problems reading term vectors from the underlying Lucene index. + * @throws IOException If there are problems reading term vectors from the underlying Lucene + * index. */ - private double createCoefficient(IndexSearcher searcher, int doc, Set matchedTokens, String prefixToken) throws IOException { + private double createCoefficient( + IndexSearcher searcher, int doc, Set matchedTokens, String prefixToken) + throws IOException { Terms tv = searcher.getIndexReader().getTermVector(doc, TEXT_FIELD_NAME); TermsEnum it = tv.iterator(); @@ -286,8 +327,9 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { String docTerm = term.utf8ToString(); - if (matchedTokens.contains(docTerm) || (prefixToken != null && docTerm.startsWith(prefixToken))) { - + if (matchedTokens.contains(docTerm) + || (prefixToken != null && docTerm.startsWith(prefixToken))) { + PostingsEnum docPosEnum = it.postings(null, PostingsEnum.OFFSETS); docPosEnum.nextDoc(); @@ -304,8 +346,9 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { } /** - * Calculate the weight coefficient based on the position of the first matching word. - * Subclass should override it to adapt it to particular needs + * Calculate the weight coefficient based on the position of the first matching word. Subclass + * should override it to adapt it to particular needs + * * @param position of the first matching word in text * @return the coefficient */ @@ -361,4 +404,3 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { } } } - diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FSTUtil.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FSTUtil.java index 97ef9a62ea1..1c5a3bf23db 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FSTUtil.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FSTUtil.java @@ -19,7 +19,6 @@ package org.apache.lucene.search.suggest.analyzing; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.util.IntsRefBuilder; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Transition; @@ -29,13 +28,12 @@ import org.apache.lucene.util.fst.Util; // TODO: move to core? nobody else uses it yet though... /** - * Exposes a utility method to enumerate all paths - * intersecting an {@link Automaton} with an {@link FST}. + * Exposes a utility method to enumerate all paths intersecting an {@link Automaton} with an {@link + * FST}. */ public class FSTUtil { - private FSTUtil() { - } + private FSTUtil() {} /** Holds a pair (automaton, fst) of states and accumulated output in the intersected machine. */ public static final class Path { @@ -62,11 +60,10 @@ public class FSTUtil { } /** - * Enumerates all minimal prefix paths in the automaton that also intersect the FST, - * accumulating the FST end node and output for each path. + * Enumerates all minimal prefix paths in the automaton that also intersect the FST, accumulating + * the FST end node and output for each path. */ - public static List> intersectPrefixPaths(Automaton a, FST fst) - throws IOException { + public static List> intersectPrefixPaths(Automaton a, FST fst) throws IOException { assert a.isDeterministic(); final List> queue = new ArrayList<>(); final List> endNodes = new ArrayList<>(); @@ -74,10 +71,10 @@ public class FSTUtil { return endNodes; } - queue.add(new Path<>(0, fst - .getFirstArc(new FST.Arc()), fst.outputs.getNoOutput(), - new IntsRefBuilder())); - + queue.add( + new Path<>( + 0, fst.getFirstArc(new FST.Arc()), fst.outputs.getNoOutput(), new IntsRefBuilder())); + final FST.Arc scratchArc = new FST.Arc<>(); final FST.BytesReader fstReader = fst.getBytesReader(); @@ -91,23 +88,25 @@ public class FSTUtil { // we accept all further paths too continue; } - + IntsRefBuilder currentInput = path.input; int count = a.initTransition(path.state, t); - for (int i=0;i nextArc = fst.findTargetArc(t.min, - path.fstNode, scratchArc, fstReader); + final FST.Arc nextArc = fst.findTargetArc(t.min, path.fstNode, scratchArc, fstReader); if (nextArc != null) { final IntsRefBuilder newInput = new IntsRefBuilder(); newInput.copyInts(currentInput.get()); newInput.append(t.min); - queue.add(new Path<>(t.dest, new FST.Arc() - .copyFrom(nextArc), fst.outputs - .add(path.output, nextArc.output()), newInput)); + queue.add( + new Path<>( + t.dest, + new FST.Arc().copyFrom(nextArc), + fst.outputs.add(path.output, nextArc.output()), + newInput)); } } else { // TODO: if this transition's TO state is accepting, and @@ -117,28 +116,27 @@ public class FSTUtil { // here. This just shifts the work from one queue // (this one) to another (the completion search // done in AnalyzingSuggester). - FST.Arc nextArc = Util.readCeilArc(min, fst, path.fstNode, - scratchArc, fstReader); + FST.Arc nextArc = Util.readCeilArc(min, fst, path.fstNode, scratchArc, fstReader); while (nextArc != null && nextArc.label() <= max) { - assert nextArc.label() <= max; - assert nextArc.label() >= min : nextArc.label() + " " - + min; + assert nextArc.label() <= max; + assert nextArc.label() >= min : nextArc.label() + " " + min; final IntsRefBuilder newInput = new IntsRefBuilder(); newInput.copyInts(currentInput.get()); newInput.append(nextArc.label()); - queue.add(new Path<>(t.dest, new FST.Arc() - .copyFrom(nextArc), fst.outputs - .add(path.output, nextArc.output()), newInput)); + queue.add( + new Path<>( + t.dest, + new FST.Arc().copyFrom(nextArc), + fst.outputs.add(path.output, nextArc.output()), + newInput)); final int label = nextArc.label(); // used in assert - nextArc = nextArc.isLast() ? null : fst.readNextRealArc(nextArc, - fstReader); - assert nextArc == null || label < nextArc.label() : "last: " + label - + " next: " + nextArc.label(); + nextArc = nextArc.isLast() ? null : fst.readNextRealArc(nextArc, fstReader); + assert nextArc == null || label < nextArc.label() + : "last: " + label + " next: " + nextArc.label(); } } } } return endNodes; } - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java index 2ac640b28f8..11c57a72709 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java @@ -30,7 +30,6 @@ import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.analysis.TokenStream; @@ -66,45 +65,38 @@ import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.IntsRefBuilder; -import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.FST.Arc; import org.apache.lucene.util.fst.FST.BytesReader; +import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.Outputs; import org.apache.lucene.util.fst.PositiveIntOutputs; import org.apache.lucene.util.fst.Util; import org.apache.lucene.util.fst.Util.Result; import org.apache.lucene.util.fst.Util.TopResults; -//import java.io.PrintWriter; +// import java.io.PrintWriter; /** - * Builds an ngram model from the text sent to {@link - * #build} and predicts based on the last grams-1 tokens in - * the request sent to {@link #lookup}. This tries to - * handle the "long tail" of suggestions for when the - * incoming query is a never before seen query string. + * Builds an ngram model from the text sent to {@link #build} and predicts based on the last grams-1 + * tokens in the request sent to {@link #lookup}. This tries to handle the "long tail" of + * suggestions for when the incoming query is a never before seen query string. * - *

    Likely this suggester would only be used as a - * fallback, when the primary suggester fails to find - * any suggestions. + *

    Likely this suggester would only be used as a fallback, when the primary suggester fails to + * find any suggestions. * - *

    Note that the weight for each suggestion is unused, - * and the suggestions are the analyzed forms (so your - * analysis process should normally be very "light"). + *

    Note that the weight for each suggestion is unused, and the suggestions are the analyzed forms + * (so your analysis process should normally be very "light"). * - *

    This uses the stupid backoff language model to smooth - * scores across ngram models; see - * "Large language models in machine translation", - * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.76.1126 - * for details. + *

    This uses the stupid backoff language model to smooth scores across ngram models; see "Large + * language models in machine translation", + * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.76.1126 for details. * - *

    From {@link #lookup}, the key of each result is the - * ngram token; the value is Long.MAX_VALUE * score (fixed - * point, cast to long). Divide by Long.MAX_VALUE to get - * the score back, which ranges from 0.0 to 1.0. - * - * onlyMorePopular is unused. + *

    From {@link #lookup}, the key of each result is the ngram token; the value is Long.MAX_VALUE * + * score (fixed point, cast to long). Divide by Long.MAX_VALUE to get the score back, which ranges + * from 0.0 to 1.0. + * + *

    onlyMorePopular is unused. * * @lucene.experimental */ @@ -112,43 +104,37 @@ import org.apache.lucene.util.fst.Util.TopResults; public class FreeTextSuggester extends Lookup implements Accountable { /** Codec name used in the header for the saved model. */ - public final static String CODEC_NAME = "freetextsuggest"; + public static final String CODEC_NAME = "freetextsuggest"; /** Initial version of the saved model file format. */ - public final static int VERSION_START = 0; + public static final int VERSION_START = 0; /** Current version of the saved model file format. */ - public final static int VERSION_CURRENT = VERSION_START; + public static final int VERSION_CURRENT = VERSION_START; /** By default we use a bigram model. */ public static final int DEFAULT_GRAMS = 2; // In general this could vary with gram, but the // original paper seems to use this constant: - /** The constant used for backoff smoothing; during - * lookup, this means that if a given trigram did not - * occur, and we backoff to the bigram, the overall score - * will be 0.4 times what the bigram model would have - * assigned. */ - public final static double ALPHA = 0.4; + /** + * The constant used for backoff smoothing; during lookup, this means that if a given trigram did + * not occur, and we backoff to the bigram, the overall score will be 0.4 times what the bigram + * model would have assigned. + */ + public static final double ALPHA = 0.4; /** Holds 1gram, 2gram, 3gram models as a single FST. */ private FST fst; - - /** - * Analyzer that will be used for analyzing suggestions at - * index time. - */ + + /** Analyzer that will be used for analyzing suggestions at index time. */ private final Analyzer indexAnalyzer; private long totTokens; - /** - * Analyzer that will be used for analyzing suggestions at - * query time. - */ + /** Analyzer that will be used for analyzing suggestions at query time. */ private final Analyzer queryAnalyzer; - + // 2 = bigram, 3 = trigram private final int grams; @@ -157,39 +143,43 @@ public class FreeTextSuggester extends Lookup implements Accountable { /** Number of entries the lookup was built with */ private long count = 0; - /** The default character used to join multiple tokens - * into a single ngram token. The input tokens produced - * by the analyzer must not contain this character. */ + /** + * The default character used to join multiple tokens into a single ngram token. The input tokens + * produced by the analyzer must not contain this character. + */ public static final byte DEFAULT_SEPARATOR = 0x1e; - /** Instantiate, using the provided analyzer for both - * indexing and lookup, using bigram model by default. */ + /** + * Instantiate, using the provided analyzer for both indexing and lookup, using bigram model by + * default. + */ public FreeTextSuggester(Analyzer analyzer) { this(analyzer, analyzer, DEFAULT_GRAMS); } - /** Instantiate, using the provided indexing and lookup - * analyzers, using bigram model by default. */ + /** + * Instantiate, using the provided indexing and lookup analyzers, using bigram model by default. + */ public FreeTextSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer) { this(indexAnalyzer, queryAnalyzer, DEFAULT_GRAMS); } - /** Instantiate, using the provided indexing and lookup - * analyzers, with the specified model (2 - * = bigram, 3 = trigram, etc.). */ + /** + * Instantiate, using the provided indexing and lookup analyzers, with the specified model (2 = + * bigram, 3 = trigram, etc.). + */ public FreeTextSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer, int grams) { this(indexAnalyzer, queryAnalyzer, grams, DEFAULT_SEPARATOR); } - /** Instantiate, using the provided indexing and lookup - * analyzers, and specified model (2 = bigram, 3 = - * trigram ,etc.). The separator is passed to {@link - * ShingleFilter#setTokenSeparator} to join multiple - * tokens into a single ngram token; it must be an ascii - * (7-bit-clean) byte. No input tokens should have this - * byte, otherwise {@code IllegalArgumentException} is - * thrown. */ - public FreeTextSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer, int grams, byte separator) { + /** + * Instantiate, using the provided indexing and lookup analyzers, and specified model (2 = bigram, + * 3 = trigram ,etc.). The separator is passed to {@link ShingleFilter#setTokenSeparator} to join + * multiple tokens into a single ngram token; it must be an ascii (7-bit-clean) byte. No input + * tokens should have this byte, otherwise {@code IllegalArgumentException} is thrown. + */ + public FreeTextSuggester( + Analyzer indexAnalyzer, Analyzer queryAnalyzer, int grams, byte separator) { this.grams = grams; this.indexAnalyzer = addShingles(indexAnalyzer); this.queryAnalyzer = addShingles(queryAnalyzer); @@ -202,7 +192,7 @@ public class FreeTextSuggester extends Lookup implements Accountable { this.separator = separator; } - /** Returns byte size of the underlying FST. */ + /** Returns byte size of the underlying FST. */ @Override public long ramBytesUsed() { if (fst == null) { @@ -233,7 +223,8 @@ public class FreeTextSuggester extends Lookup implements Accountable { } @Override - protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + protected TokenStreamComponents wrapComponents( + String fieldName, TokenStreamComponents components) { ShingleFilter shingles = new ShingleFilter(components.getTokenStream(), 2, grams); shingles.setTokenSeparator(Character.toString((char) separator)); return new TokenStreamComponents(components.getSource(), shingles); @@ -247,9 +238,10 @@ public class FreeTextSuggester extends Lookup implements Accountable { build(iterator, IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB); } - /** Build the suggest index, using up to the specified - * amount of temporary RAM while building. Note that - * the weights for the suggestions are ignored. */ + /** + * Build the suggest index, using up to the specified amount of temporary RAM while building. Note + * that the weights for the suggestions are ignored. + */ public void build(InputIterator iterator, double ramBufferSizeMB) throws IOException { if (iterator.hasPayloads()) { throw new IllegalArgumentException("this suggester doesn't support payloads"); @@ -314,7 +306,13 @@ public class FreeTextSuggester extends Lookup implements Accountable { } int ngramCount = countGrams(term); if (ngramCount > grams) { - throw new IllegalArgumentException("tokens must not contain separator byte; got token=" + term + " but gramCount=" + ngramCount + ", which is greater than expected max ngram size=" + grams); + throw new IllegalArgumentException( + "tokens must not contain separator byte; got token=" + + term + + " but gramCount=" + + ngramCount + + ", which is greater than expected max ngram size=" + + grams); } if (ngramCount == 1) { totTokens += termsEnum.totalTermFreq(); @@ -327,7 +325,7 @@ public class FreeTextSuggester extends Lookup implements Accountable { if (fst == null) { throw new IllegalArgumentException("need at least one suggestion"); } - //System.out.println("FST: " + fst.getNodeCount() + " nodes"); + // System.out.println("FST: " + fst.getNodeCount() + " nodes"); /* PrintWriter pw = new PrintWriter("/x/tmp/out.dot"); @@ -370,11 +368,16 @@ public class FreeTextSuggester extends Lookup implements Accountable { count = input.readVLong(); byte separatorOrig = input.readByte(); if (separatorOrig != separator) { - throw new IllegalStateException("separator=" + separator + " is incorrect: original model was built with separator=" + separatorOrig); + throw new IllegalStateException( + "separator=" + + separator + + " is incorrect: original model was built with separator=" + + separatorOrig); } int gramsOrig = input.readVInt(); if (gramsOrig != grams) { - throw new IllegalStateException("grams=" + grams + " is incorrect: original model was built with grams=" + gramsOrig); + throw new IllegalStateException( + "grams=" + grams + " is incorrect: original model was built with grams=" + gramsOrig); } totTokens = input.readVLong(); @@ -384,7 +387,8 @@ public class FreeTextSuggester extends Lookup implements Accountable { } @Override - public List lookup(final CharSequence key, /* ignored */ boolean onlyMorePopular, int num) { + public List lookup( + final CharSequence key, /* ignored */ boolean onlyMorePopular, int num) { return lookup(key, null, onlyMorePopular, num); } @@ -394,7 +398,11 @@ public class FreeTextSuggester extends Lookup implements Accountable { } @Override - public List lookup(final CharSequence key, Set contexts, /* ignored */ boolean onlyMorePopular, int num) { + public List lookup( + final CharSequence key, + Set contexts, /* ignored */ + boolean onlyMorePopular, + int num) { try { return lookup(key, contexts, num); } catch (IOException ioe) { @@ -407,10 +415,10 @@ public class FreeTextSuggester extends Lookup implements Accountable { public long getCount() { return count; } - + private int countGrams(BytesRef token) { int count = 1; - for(int i=0;i lookup(final CharSequence key, Set contexts, int num) throws IOException { + public List lookup(final CharSequence key, Set contexts, int num) + throws IOException { if (contexts != null) { throw new IllegalArgumentException("this suggester doesn't support contexts"); } @@ -434,60 +443,67 @@ public class FreeTextSuggester extends Lookup implements Accountable { PositionLengthAttribute posLenAtt = ts.addAttribute(PositionLengthAttribute.class); PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class); ts.reset(); - + BytesRefBuilder[] lastTokens = new BytesRefBuilder[grams]; - //System.out.println("lookup: key='" + key + "'"); - + // System.out.println("lookup: key='" + key + "'"); + // Run full analysis, but save only the // last 1gram, last 2gram, etc.: int maxEndOffset = -1; boolean sawRealToken = false; - while(ts.incrementToken()) { + while (ts.incrementToken()) { BytesRef tokenBytes = termBytesAtt.getBytesRef(); sawRealToken |= tokenBytes.length > 0; // TODO: this is somewhat iffy; today, ShingleFilter // sets posLen to the gram count; maybe we should make // a separate dedicated att for this? int gramCount = posLenAtt.getPositionLength(); - + assert gramCount <= grams; - + // Safety: make sure the recalculated count "agrees": if (countGrams(tokenBytes) != gramCount) { - throw new IllegalArgumentException("tokens must not contain separator byte; got token=" + tokenBytes + " but gramCount=" + gramCount + " does not match recalculated count=" + countGrams(tokenBytes)); + throw new IllegalArgumentException( + "tokens must not contain separator byte; got token=" + + tokenBytes + + " but gramCount=" + + gramCount + + " does not match recalculated count=" + + countGrams(tokenBytes)); } maxEndOffset = Math.max(maxEndOffset, offsetAtt.endOffset()); BytesRefBuilder b = new BytesRefBuilder(); b.append(tokenBytes); - lastTokens[gramCount-1] = b; + lastTokens[gramCount - 1] = b; } ts.end(); - + if (!sawRealToken) { - throw new IllegalArgumentException("no tokens produced by analyzer, or the only tokens were empty strings"); + throw new IllegalArgumentException( + "no tokens produced by analyzer, or the only tokens were empty strings"); } - + // Carefully fill last tokens with _ tokens; // ShingleFilter appraently won't emit "only hole" // tokens: int endPosInc = posIncAtt.getPositionIncrement(); - + // Note this will also be true if input is the empty // string (in which case we saw no tokens and // maxEndOffset is still -1), which in fact works out OK // because we fill the unigram with an empty BytesRef // below: boolean lastTokenEnded = offsetAtt.endOffset() > maxEndOffset || endPosInc > 0; - //System.out.println("maxEndOffset=" + maxEndOffset + " vs " + offsetAtt.endOffset()); - + // System.out.println("maxEndOffset=" + maxEndOffset + " vs " + offsetAtt.endOffset()); + if (lastTokenEnded) { - //System.out.println(" lastTokenEnded"); + // System.out.println(" lastTokenEnded"); // If user hit space after the last token, then // "upgrade" all tokens. This way "foo " will suggest // all bigrams starting w/ foo, and not any unigrams // starting with "foo": - for(int i=grams-1;i>0;i--) { - BytesRefBuilder token = lastTokens[i-1]; + for (int i = grams - 1; i > 0; i--) { + BytesRefBuilder token = lastTokens[i - 1]; if (token == null) { continue; } @@ -496,69 +512,69 @@ public class FreeTextSuggester extends Lookup implements Accountable { } lastTokens[0] = new BytesRefBuilder(); } - + Arc arc = new Arc<>(); - + BytesReader bytesReader = fst.getBytesReader(); - + // Try highest order models first, and if they return // results, return that; else, fallback: double backoff = 1.0; - + List results = new ArrayList<>(num); - + // We only add a given suffix once, from the highest // order model that saw it; for subsequent lower order // models we skip it: final Set seen = new HashSet<>(); - - for(int gram=grams-1;gram>=0;gram--) { + + for (int gram = grams - 1; gram >= 0; gram--) { BytesRefBuilder token = lastTokens[gram]; // Don't make unigram predictions from empty string: if (token == null || (token.length() == 0 && key.length() > 0)) { // Input didn't have enough tokens: - //System.out.println(" gram=" + gram + ": skip: not enough input"); + // System.out.println(" gram=" + gram + ": skip: not enough input"); continue; } - + if (endPosInc > 0 && gram <= endPosInc) { // Skip hole-only predictions; in theory we // shouldn't have to do this, but we'd need to fix // ShingleFilter to produce only-hole tokens: - //System.out.println(" break: only holes now"); + // System.out.println(" break: only holes now"); break; } - - //System.out.println("try " + (gram+1) + " gram token=" + token.utf8ToString()); - + + // System.out.println("try " + (gram+1) + " gram token=" + token.utf8ToString()); + // TODO: we could add fuzziness here // match the prefix portion exactly - //Pair prefixOutput = null; + // Pair prefixOutput = null; Long prefixOutput = null; try { prefixOutput = lookupPrefix(fst, bytesReader, token.get(), arc); } catch (IOException bogus) { throw new RuntimeException(bogus); } - //System.out.println(" prefixOutput=" + prefixOutput); - + // System.out.println(" prefixOutput=" + prefixOutput); + if (prefixOutput == null) { // This model never saw this prefix, e.g. the // trigram model never saw context "purple mushroom" backoff *= ALPHA; continue; } - + // TODO: we could do this division at build time, and // bake it into the FST? - + // Denominator for computing scores from current // model's predictions: long contextCount = totTokens; - + BytesRef lastTokenFragment = null; - - for(int i=token.length()-1;i>=0;i--) { + + for (int i = token.length() - 1; i >= 0; i--) { if (token.byteAt(i) == separator) { BytesRef context = new BytesRef(token.bytes(), 0, i); Long output = Util.get(fst, Util.toIntsRef(context, new IntsRefBuilder())); @@ -568,20 +584,20 @@ public class FreeTextSuggester extends Lookup implements Accountable { break; } } - + final BytesRefBuilder finalLastToken = new BytesRefBuilder(); if (lastTokenFragment == null) { finalLastToken.copyBytes(token.get()); } else { finalLastToken.copyBytes(lastTokenFragment); } - + CharsRefBuilder spare = new CharsRefBuilder(); - + // complete top-N TopResults completions = null; try { - + // Because we store multiple models in one FST // (1gram, 2gram, 3gram), we must restrict the // search so that it only considers the current @@ -590,103 +606,117 @@ public class FreeTextSuggester extends Lookup implements Accountable { // must be from this model, but for lower order // models we have to filter out the higher order // ones: - + // Must do num+seen.size() for queue depth because we may // reject up to seen.size() paths in acceptResult(): - Util.TopNSearcher searcher = new Util.TopNSearcher(fst, num, num+seen.size(), weightComparator) { - - BytesRefBuilder scratchBytes = new BytesRefBuilder(); - - @Override - protected void addIfCompetitive(Util.FSTPath path) { - if (path.arc.label() != separator) { - //System.out.println(" keep path: " + Util.toBytesRef(path.input, new BytesRef()).utf8ToString() + "; " + path + "; arc=" + path.arc); - super.addIfCompetitive(path); - } else { - //System.out.println(" prevent path: " + Util.toBytesRef(path.input, new BytesRef()).utf8ToString() + "; " + path + "; arc=" + path.arc); - } - } - - @Override - protected boolean acceptResult(IntsRef input, Long output) { - Util.toBytesRef(input, scratchBytes); - finalLastToken.grow(finalLastToken.length() + scratchBytes.length()); - int lenSav = finalLastToken.length(); - finalLastToken.append(scratchBytes); - //System.out.println(" accept? input='" + scratchBytes.utf8ToString() + "'; lastToken='" + finalLastToken.utf8ToString() + "'; return " + (seen.contains(finalLastToken) == false)); - boolean ret = seen.contains(finalLastToken.get()) == false; - - finalLastToken.setLength(lenSav); - return ret; - } - }; - - // since this search is initialized with a single start node + Util.TopNSearcher searcher = + new Util.TopNSearcher(fst, num, num + seen.size(), weightComparator) { + + BytesRefBuilder scratchBytes = new BytesRefBuilder(); + + @Override + protected void addIfCompetitive(Util.FSTPath path) { + if (path.arc.label() != separator) { + // System.out.println(" keep path: " + Util.toBytesRef(path.input, new + // BytesRef()).utf8ToString() + "; " + path + "; arc=" + path.arc); + super.addIfCompetitive(path); + } else { + // System.out.println(" prevent path: " + Util.toBytesRef(path.input, new + // BytesRef()).utf8ToString() + "; " + path + "; arc=" + path.arc); + } + } + + @Override + protected boolean acceptResult(IntsRef input, Long output) { + Util.toBytesRef(input, scratchBytes); + finalLastToken.grow(finalLastToken.length() + scratchBytes.length()); + int lenSav = finalLastToken.length(); + finalLastToken.append(scratchBytes); + // System.out.println(" accept? input='" + scratchBytes.utf8ToString() + "'; + // lastToken='" + finalLastToken.utf8ToString() + "'; return " + + // (seen.contains(finalLastToken) == false)); + boolean ret = seen.contains(finalLastToken.get()) == false; + + finalLastToken.setLength(lenSav); + return ret; + } + }; + + // since this search is initialized with a single start node // it is okay to start with an empty input path here searcher.addStartPaths(arc, prefixOutput, true, new IntsRefBuilder()); - + completions = searcher.search(); assert completions.isComplete; } catch (IOException bogus) { throw new RuntimeException(bogus); } - + int prefixLength = token.length(); - + BytesRefBuilder suffix = new BytesRefBuilder(); - //System.out.println(" " + completions.length + " completions"); - + // System.out.println(" " + completions.length + " completions"); + nextCompletion: - for (Result completion : completions) { - token.setLength(prefixLength); - // append suffix - Util.toBytesRef(completion.input, suffix); - token.append(suffix); - - //System.out.println(" completion " + token.utf8ToString()); - - // Skip this path if a higher-order model already - // saw/predicted its last token: - BytesRef lastToken = token.get(); - for(int i=token.length()-1;i>=0;i--) { - if (token.byteAt(i) == separator) { - assert token.length()-i-1 > 0; - lastToken = new BytesRef(token.bytes(), i+1, token.length()-i-1); - break; - } + for (Result completion : completions) { + token.setLength(prefixLength); + // append suffix + Util.toBytesRef(completion.input, suffix); + token.append(suffix); + + // System.out.println(" completion " + token.utf8ToString()); + + // Skip this path if a higher-order model already + // saw/predicted its last token: + BytesRef lastToken = token.get(); + for (int i = token.length() - 1; i >= 0; i--) { + if (token.byteAt(i) == separator) { + assert token.length() - i - 1 > 0; + lastToken = new BytesRef(token.bytes(), i + 1, token.length() - i - 1); + break; } - if (seen.contains(lastToken)) { - //System.out.println(" skip dup " + lastToken.utf8ToString()); - continue nextCompletion; - } - seen.add(BytesRef.deepCopyOf(lastToken)); - spare.copyUTF8Bytes(token.get()); - LookupResult result = new LookupResult(spare.toString(), (long) (Long.MAX_VALUE * backoff * ((double) decodeWeight(completion.output)) / contextCount)); - results.add(result); - assert results.size() == seen.size(); - //System.out.println(" add result=" + result); } + if (seen.contains(lastToken)) { + // System.out.println(" skip dup " + lastToken.utf8ToString()); + continue nextCompletion; + } + seen.add(BytesRef.deepCopyOf(lastToken)); + spare.copyUTF8Bytes(token.get()); + LookupResult result = + new LookupResult( + spare.toString(), + (long) + (Long.MAX_VALUE + * backoff + * ((double) decodeWeight(completion.output)) + / contextCount)); + results.add(result); + assert results.size() == seen.size(); + // System.out.println(" add result=" + result); + } backoff *= ALPHA; } - - Collections.sort(results, new Comparator() { - @Override - public int compare(LookupResult a, LookupResult b) { - if (a.value > b.value) { - return -1; - } else if (a.value < b.value) { - return 1; - } else { - // Tie break by UTF16 sort order: - return ((String) a.key).compareTo((String) b.key); - } - } - }); - + + Collections.sort( + results, + new Comparator() { + @Override + public int compare(LookupResult a, LookupResult b) { + if (a.value > b.value) { + return -1; + } else if (a.value < b.value) { + return 1; + } else { + // Tie break by UTF16 sort order: + return ((String) a.key).compareTo((String) b.key); + } + } + }); + if (results.size() > num) { results.subList(num, results.size()).clear(); } - + return results; } } @@ -697,20 +727,21 @@ public class FreeTextSuggester extends Lookup implements Accountable { } /** cost -> weight */ - //private long decodeWeight(Pair output) { + // private long decodeWeight(Pair output) { private long decodeWeight(Long output) { assert output != null; - return (int)(Long.MAX_VALUE - output); + return (int) (Long.MAX_VALUE - output); } - + // NOTE: copied from WFSTCompletionLookup & tweaked - private Long lookupPrefix(FST fst, FST.BytesReader bytesReader, - BytesRef scratch, Arc arc) throws /*Bogus*/IOException { + private Long lookupPrefix( + FST fst, FST.BytesReader bytesReader, BytesRef scratch, Arc arc) + throws /*Bogus*/ IOException { Long output = fst.outputs.getNoOutput(); - + fst.getFirstArc(arc); - + byte[] bytes = scratch.bytes; int pos = scratch.offset; int end = pos + scratch.length; @@ -721,21 +752,19 @@ public class FreeTextSuggester extends Lookup implements Accountable { output = fst.outputs.add(output, arc.output()); } } - + return output; } - static final Comparator weightComparator = new Comparator () { - @Override - public int compare(Long left, Long right) { - return left.compareTo(right); - } - }; + static final Comparator weightComparator = + new Comparator() { + @Override + public int compare(Long left, Long right) { + return left.compareTo(right); + } + }; - /** - * Returns the weight associated with an input string, - * or null if it does not exist. - */ + /** Returns the weight associated with an input string, or null if it does not exist. */ public Object get(CharSequence key) { throw new UnsupportedOperationException(); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java index 6a7cfc493b0..6c17ed0de73 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java @@ -15,10 +15,12 @@ * limitations under the License. */ package org.apache.lucene.search.suggest.analyzing; + +import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; + import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStreamToAutomaton; @@ -36,41 +38,28 @@ import org.apache.lucene.util.automaton.UTF32ToUTF8; import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.PairOutputs.Pair; -import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; - /** - * Implements a fuzzy {@link AnalyzingSuggester}. The similarity measurement is - * based on the Damerau-Levenshtein (optimal string alignment) algorithm, though - * you can explicitly choose classic Levenshtein by passing false - * for the transpositions parameter. - *

    - * At most, this query will match terms up to - * {@value org.apache.lucene.util.automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} - * edits. Higher distances are not supported. Note that the - * fuzzy distance is measured in "byte space" on the bytes - * returned by the {@link TokenStream}'s {@link - * TermToBytesRefAttribute}, usually UTF8. By default - * the analyzed bytes must be at least 3 {@link - * #DEFAULT_MIN_FUZZY_LENGTH} bytes before any edits are - * considered. Furthermore, the first 1 {@link - * #DEFAULT_NON_FUZZY_PREFIX} byte is not allowed to be - * edited. We allow up to 1 (@link - * #DEFAULT_MAX_EDITS} edit. - * If {@link #unicodeAware} parameter in the constructor is set to true, maxEdits, - * minFuzzyLength, transpositions and nonFuzzyPrefix are measured in Unicode code - * points (actual letters) instead of bytes. + * Implements a fuzzy {@link AnalyzingSuggester}. The similarity measurement is based on the + * Damerau-Levenshtein (optimal string alignment) algorithm, though you can explicitly choose + * classic Levenshtein by passing false for the transpositions parameter. * - *

    - * NOTE: This suggester does not boost suggestions that - * required no edits over suggestions that did require - * edits. This is a known limitation. + *

    At most, this query will match terms up to {@value + * org.apache.lucene.util.automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} edits. Higher + * distances are not supported. Note that the fuzzy distance is measured in "byte space" on the + * bytes returned by the {@link TokenStream}'s {@link TermToBytesRefAttribute}, usually UTF8. By + * default the analyzed bytes must be at least 3 {@link #DEFAULT_MIN_FUZZY_LENGTH} bytes before any + * edits are considered. Furthermore, the first 1 {@link #DEFAULT_NON_FUZZY_PREFIX} byte is not + * allowed to be edited. We allow up to 1 (@link #DEFAULT_MAX_EDITS} edit. If {@link #unicodeAware} + * parameter in the constructor is set to true, maxEdits, minFuzzyLength, transpositions and + * nonFuzzyPrefix are measured in Unicode code points (actual letters) instead of bytes. * - *

    - * Note: complex query analyzers can have a significant impact on the lookup - * performance. It's recommended to not use analyzers that drop or inject terms - * like synonyms to keep the complexity of the prefix intersection low for good - * lookup performance. At index time, complex analyzers can safely be used. - *

    + *

    NOTE: This suggester does not boost suggestions that required no edits over suggestions that + * did require edits. This is a known limitation. + * + *

    Note: complex query analyzers can have a significant impact on the lookup performance. It's + * recommended to not use analyzers that drop or inject terms like synonyms to keep the complexity + * of the prefix intersection low for good lookup performance. At index time, complex analyzers can + * safely be used. * * @lucene.experimental */ @@ -81,106 +70,135 @@ public final class FuzzySuggester extends AnalyzingSuggester { private final int minFuzzyLength; private final boolean unicodeAware; - /** Measure maxEdits, minFuzzyLength, transpositions and nonFuzzyPrefix - * parameters in Unicode code points (actual letters) - * instead of bytes. */ + /** + * Measure maxEdits, minFuzzyLength, transpositions and nonFuzzyPrefix parameters in Unicode code + * points (actual letters) instead of bytes. + */ public static final boolean DEFAULT_UNICODE_AWARE = false; /** - * The default minimum length of the key passed to {@link - * #lookup} before any edits are allowed. + * The default minimum length of the key passed to {@link #lookup} before any edits are allowed. */ public static final int DEFAULT_MIN_FUZZY_LENGTH = 3; - /** - * The default prefix length where edits are not allowed. - */ + /** The default prefix length where edits are not allowed. */ public static final int DEFAULT_NON_FUZZY_PREFIX = 1; - - /** - * The default maximum number of edits for fuzzy - * suggestions. - */ + + /** The default maximum number of edits for fuzzy suggestions. */ public static final int DEFAULT_MAX_EDITS = 1; - - /** - * The default transposition value passed to {@link LevenshteinAutomata} - */ + + /** The default transposition value passed to {@link LevenshteinAutomata} */ public static final boolean DEFAULT_TRANSPOSITIONS = true; /** * Creates a {@link FuzzySuggester} instance initialized with default values. - * + * * @param analyzer the analyzer used for this suggester */ public FuzzySuggester(Directory tempDir, String tempFileNamePrefix, Analyzer analyzer) { this(tempDir, tempFileNamePrefix, analyzer, analyzer); } - + /** - * Creates a {@link FuzzySuggester} instance with an index and query analyzer initialized with default values. - * - * @param indexAnalyzer - * Analyzer that will be used for analyzing suggestions while building the index. - * @param queryAnalyzer - * Analyzer that will be used for analyzing query text during lookup + * Creates a {@link FuzzySuggester} instance with an index and query analyzer initialized with + * default values. + * + * @param indexAnalyzer Analyzer that will be used for analyzing suggestions while building the + * index. + * @param queryAnalyzer Analyzer that will be used for analyzing query text during lookup */ - public FuzzySuggester(Directory tempDir, String tempFileNamePrefix, Analyzer indexAnalyzer, Analyzer queryAnalyzer) { - this(tempDir, tempFileNamePrefix, indexAnalyzer, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true, DEFAULT_MAX_EDITS, DEFAULT_TRANSPOSITIONS, - DEFAULT_NON_FUZZY_PREFIX, DEFAULT_MIN_FUZZY_LENGTH, DEFAULT_UNICODE_AWARE); + public FuzzySuggester( + Directory tempDir, + String tempFileNamePrefix, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer) { + this( + tempDir, + tempFileNamePrefix, + indexAnalyzer, + queryAnalyzer, + EXACT_FIRST | PRESERVE_SEP, + 256, + -1, + true, + DEFAULT_MAX_EDITS, + DEFAULT_TRANSPOSITIONS, + DEFAULT_NON_FUZZY_PREFIX, + DEFAULT_MIN_FUZZY_LENGTH, + DEFAULT_UNICODE_AWARE); } /** * Creates a {@link FuzzySuggester} instance. - * - * @param indexAnalyzer Analyzer that will be used for - * analyzing suggestions while building the index. - * @param queryAnalyzer Analyzer that will be used for - * analyzing query text during lookup + * + * @param indexAnalyzer Analyzer that will be used for analyzing suggestions while building the + * index. + * @param queryAnalyzer Analyzer that will be used for analyzing query text during lookup * @param options see {@link #EXACT_FIRST}, {@link #PRESERVE_SEP} - * @param maxSurfaceFormsPerAnalyzedForm Maximum number of - * surface forms to keep for a single analyzed form. - * When there are too many surface forms we discard the - * lowest weighted ones. - * @param maxGraphExpansions Maximum number of graph paths - * to expand from the analyzed form. Set this to -1 for - * no limit. + * @param maxSurfaceFormsPerAnalyzedForm Maximum number of surface forms to keep for a single + * analyzed form. When there are too many surface forms we discard the lowest weighted ones. + * @param maxGraphExpansions Maximum number of graph paths to expand from the analyzed form. Set + * this to -1 for no limit. * @param preservePositionIncrements Whether position holes should appear in the automaton - * @param maxEdits must be >= 0 and <= {@link LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} . - * @param transpositions true if transpositions should be treated as a primitive - * edit operation. If this is false, comparisons will implement the classic - * Levenshtein algorithm. - * @param nonFuzzyPrefix length of common (non-fuzzy) prefix (see default {@link #DEFAULT_NON_FUZZY_PREFIX} - * @param minFuzzyLength minimum length of lookup key before any edits are allowed (see default {@link #DEFAULT_MIN_FUZZY_LENGTH}) + * @param maxEdits must be >= 0 and <= {@link + * LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} . + * @param transpositions true if transpositions should be treated as a primitive edit + * operation. If this is false, comparisons will implement the classic Levenshtein algorithm. + * @param nonFuzzyPrefix length of common (non-fuzzy) prefix (see default {@link + * #DEFAULT_NON_FUZZY_PREFIX} + * @param minFuzzyLength minimum length of lookup key before any edits are allowed (see default + * {@link #DEFAULT_MIN_FUZZY_LENGTH}) * @param unicodeAware operate Unicode code points instead of bytes. */ - public FuzzySuggester(Directory tempDir, String tempFileNamePrefix, Analyzer indexAnalyzer, Analyzer queryAnalyzer, - int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions, - boolean preservePositionIncrements, int maxEdits, boolean transpositions, - int nonFuzzyPrefix, int minFuzzyLength, boolean unicodeAware) { - super(tempDir, tempFileNamePrefix, indexAnalyzer, queryAnalyzer, options, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions, preservePositionIncrements); + public FuzzySuggester( + Directory tempDir, + String tempFileNamePrefix, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer, + int options, + int maxSurfaceFormsPerAnalyzedForm, + int maxGraphExpansions, + boolean preservePositionIncrements, + int maxEdits, + boolean transpositions, + int nonFuzzyPrefix, + int minFuzzyLength, + boolean unicodeAware) { + super( + tempDir, + tempFileNamePrefix, + indexAnalyzer, + queryAnalyzer, + options, + maxSurfaceFormsPerAnalyzedForm, + maxGraphExpansions, + preservePositionIncrements); if (maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) { - throw new IllegalArgumentException("maxEdits must be between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE); + throw new IllegalArgumentException( + "maxEdits must be between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE); } if (nonFuzzyPrefix < 0) { - throw new IllegalArgumentException("nonFuzzyPrefix must not be >= 0 (got " + nonFuzzyPrefix + ")"); + throw new IllegalArgumentException( + "nonFuzzyPrefix must not be >= 0 (got " + nonFuzzyPrefix + ")"); } if (minFuzzyLength < 0) { - throw new IllegalArgumentException("minFuzzyLength must not be >= 0 (got " + minFuzzyLength + ")"); + throw new IllegalArgumentException( + "minFuzzyLength must not be >= 0 (got " + minFuzzyLength + ")"); } - + this.maxEdits = maxEdits; this.transpositions = transpositions; this.nonFuzzyPrefix = nonFuzzyPrefix; this.minFuzzyLength = minFuzzyLength; this.unicodeAware = unicodeAware; } - + @Override - protected List>> getFullPrefixPaths(List>> prefixPaths, - Automaton lookupAutomaton, - FST> fst) - throws IOException { + protected List>> getFullPrefixPaths( + List>> prefixPaths, + Automaton lookupAutomaton, + FST> fst) + throws IOException { // TODO: right now there's no penalty for fuzzy/edits, // ie a completion whose prefix matched exactly what the @@ -223,19 +241,23 @@ public final class FuzzySuggester extends AnalyzingSuggester { Automaton toLevenshteinAutomata(Automaton automaton) { List subs = new ArrayList<>(); FiniteStringsIterator finiteStrings = new FiniteStringsIterator(automaton); - for (IntsRef string; (string = finiteStrings.next()) != null;) { + for (IntsRef string; (string = finiteStrings.next()) != null; ) { if (string.length <= nonFuzzyPrefix || string.length < minFuzzyLength) { subs.add(Automata.makeString(string.ints, string.offset, string.length)); } else { - int ints[] = new int[string.length-nonFuzzyPrefix]; - System.arraycopy(string.ints, string.offset+nonFuzzyPrefix, ints, 0, ints.length); + int ints[] = new int[string.length - nonFuzzyPrefix]; + System.arraycopy(string.ints, string.offset + nonFuzzyPrefix, ints, 0, ints.length); // TODO: maybe add alphaMin to LevenshteinAutomata, // and pass 1 instead of 0? We probably don't want // to allow the trailing dedup bytes to be // edited... but then 0 byte is "in general" allowed // on input (but not in UTF8). - LevenshteinAutomata lev = new LevenshteinAutomata(ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions); - subs.add(lev.toAutomaton(maxEdits, UnicodeUtil.newString(string.ints, string.offset, nonFuzzyPrefix))); + LevenshteinAutomata lev = + new LevenshteinAutomata( + ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions); + subs.add( + lev.toAutomaton( + maxEdits, UnicodeUtil.newString(string.ints, string.offset, nonFuzzyPrefix))); } } @@ -249,7 +271,7 @@ public final class FuzzySuggester extends AnalyzingSuggester { // multiple paths: this is really scary! is it slow? // maybe we should not do this and throw UOE? Automaton a = Operations.union(subs); - // TODO: we could call toLevenshteinAutomata() before det? + // TODO: we could call toLevenshteinAutomata() before det? // this only happens if you have multiple paths anyway (e.g. synonyms) return Operations.determinize(a, DEFAULT_MAX_DETERMINIZED_STATES); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilter.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilter.java index 736d15da5f1..f7cbd50a75f 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilter.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilter.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.analyzing; import java.io.IOException; - import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenFilter; @@ -27,22 +26,20 @@ import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -/** Like {@link StopFilter} except it will not remove the - * last token if that token was not followed by some token - * separator. For example, a query 'find the' would - * preserve the 'the' since it was not followed by a space or - * punctuation or something, and mark it KEYWORD so future - * stemmers won't touch it either while a query like "find - * the popsicle' would remove 'the' as a stopword. +/** + * Like {@link StopFilter} except it will not remove the last token if that token was not followed + * by some token separator. For example, a query 'find the' would preserve the 'the' since it was + * not followed by a space or punctuation or something, and mark it KEYWORD so future stemmers won't + * touch it either while a query like "find the popsicle' would remove 'the' as a stopword. * - *

    Normally you'd use the ordinary {@link StopFilter} - * in your indexAnalyzer and then this class in your - * queryAnalyzer, when using one of the analyzing suggesters. */ - + *

    Normally you'd use the ordinary {@link StopFilter} in your indexAnalyzer and then this class + * in your queryAnalyzer, when using one of the analyzing suggesters. + */ public final class SuggestStopFilter extends TokenFilter { private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncAtt = + addAttribute(PositionIncrementAttribute.class); private final KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); private final CharArraySet stopWords; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilterFactory.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilterFactory.java index 90f3c8aa8fd..42bdffe8d74 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilterFactory.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilterFactory.java @@ -18,20 +18,19 @@ package org.apache.lucene.search.suggest.analyzing; import java.io.IOException; import java.util.Map; - import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.TokenFilterFactory; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.WordlistLoader; import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.util.ResourceLoader; import org.apache.lucene.util.ResourceLoaderAware; -import org.apache.lucene.analysis.TokenFilterFactory; /** * Factory for {@link SuggestStopFilter}. * *

    - * <fieldType name="autosuggest" class="solr.TextField" 
    + * <fieldType name="autosuggest" class="solr.TextField"
      *            positionIncrementGap="100" autoGeneratePhraseQueries="true">
      *   <analyzer>
      *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
    @@ -41,35 +40,30 @@ import org.apache.lucene.analysis.TokenFilterFactory;
      *   </analyzer>
      * </fieldType>
    * - *

    - * All attributes are optional: - *

    + *

    All attributes are optional: + * *

      - *
    • ignoreCase defaults to false
    • - *
    • words should be the name of a stopwords file to parse, if not - * specified the factory will use {@link EnglishAnalyzer#ENGLISH_STOP_WORDS_SET} - *
    • - *
    • format defines how the words file will be parsed, - * and defaults to wordset. If words is not specified, - * then format must not be specified. - *
    • + *
    • ignoreCase defaults to false + *
    • words should be the name of a stopwords file to parse, if not specified the + * factory will use {@link EnglishAnalyzer#ENGLISH_STOP_WORDS_SET} + *
    • format defines how the words file will be parsed, and defaults to + * wordset. If words is not specified, then format must + * not be specified. *
    - *

    - * The valid values for the format option are: - *

    + * + *

    The valid values for the format option are: + * *

      - *
    • wordset - This is the default format, which supports one word per - * line (including any intra-word whitespace) and allows whole line comments - * beginning with the "#" character. Blank lines are ignored. See - * {@link WordlistLoader#getLines WordlistLoader.getLines} for details. - *
    • - *
    • snowball - This format allows for multiple words specified on each - * line, and trailing comments may be specified using the vertical line ("|"). - * Blank lines are ignored. See - * {@link WordlistLoader#getSnowballWordSet WordlistLoader.getSnowballWordSet} - * for details. - *
    • + *
    • wordset - This is the default format, which supports one word per line + * (including any intra-word whitespace) and allows whole line comments beginning with the "#" + * character. Blank lines are ignored. See {@link WordlistLoader#getLines + * WordlistLoader.getLines} for details. + *
    • snowball - This format allows for multiple words specified on each line, and + * trailing comments may be specified using the vertical line ("|"). Blank lines are + * ignored. See {@link WordlistLoader#getSnowballWordSet WordlistLoader.getSnowballWordSet} + * for details. *
    + * * @since 5.0.0 * @lucene.spi {@value #NAME} */ @@ -89,7 +83,7 @@ public class SuggestStopFilterFactory extends TokenFilterFactory implements Reso private final boolean ignoreCase; /** Creates a new StopFilterFactory */ - public SuggestStopFilterFactory(Map args) { + public SuggestStopFilterFactory(Map args) { super(args); stopWordFiles = get(args, "words"); format = get(args, "format", (null == stopWordFiles ? null : FORMAT_WORDSET)); @@ -112,11 +106,13 @@ public class SuggestStopFilterFactory extends TokenFilterFactory implements Reso } else if (FORMAT_SNOWBALL.equalsIgnoreCase(format)) { stopWords = getSnowballWordSet(loader, stopWordFiles, ignoreCase); } else { - throw new IllegalArgumentException("Unknown 'format' specified for 'words' file: " + format); + throw new IllegalArgumentException( + "Unknown 'format' specified for 'words' file: " + format); } } else { if (null != format) { - throw new IllegalArgumentException("'format' can not be specified w/o an explicit 'words' file: " + format); + throw new IllegalArgumentException( + "'format' can not be specified w/o an explicit 'words' file: " + format); } stopWords = new CharArraySet(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/package-info.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/package-info.java index 3ac2442babc..df04c343120 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/package-info.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/package-info.java @@ -14,8 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Analyzer based autosuggest. - */ + +/** Analyzer based autosuggest. */ package org.apache.lucene.search.suggest.analyzing; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/Completion50PostingsFormat.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/Completion50PostingsFormat.java index b0c6d37930d..03171174a7f 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/Completion50PostingsFormat.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/Completion50PostingsFormat.java @@ -19,27 +19,22 @@ package org.apache.lucene.search.suggest.document; import org.apache.lucene.codecs.PostingsFormat; /** - * {@link org.apache.lucene.search.suggest.document.CompletionPostingsFormat} - * for {@code org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat}. - * This format is only used for backward-compatibility of the index format - * and cannot be used to write data, use {@link Completion84PostingsFormat} - * on new indices. + * {@link org.apache.lucene.search.suggest.document.CompletionPostingsFormat} for {@code + * org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat}. This format is only used for + * backward-compatibility of the index format and cannot be used to write data, use {@link + * Completion84PostingsFormat} on new indices. * * @lucene.experimental */ public class Completion50PostingsFormat extends CompletionPostingsFormat { - /** - * Creates a {@link Completion50PostingsFormat} that will load - * the completion FST on-heap. - */ + /** Creates a {@link Completion50PostingsFormat} that will load the completion FST on-heap. */ public Completion50PostingsFormat() { this(FSTLoadMode.ON_HEAP); } /** - * Creates a {@link Completion50PostingsFormat} that will - * use the provided fstLoadMode to determine - * if the completion FST should be loaded on or off heap. + * Creates a {@link Completion50PostingsFormat} that will use the provided fstLoadMode + * to determine if the completion FST should be loaded on or off heap. */ public Completion50PostingsFormat(FSTLoadMode fstLoadMode) { super("completion", fstLoadMode); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/Completion84PostingsFormat.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/Completion84PostingsFormat.java index 4c77437843e..2057a2bf4e9 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/Completion84PostingsFormat.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/Completion84PostingsFormat.java @@ -19,24 +19,20 @@ package org.apache.lucene.search.suggest.document; import org.apache.lucene.codecs.PostingsFormat; /** - * {@link org.apache.lucene.search.suggest.document.CompletionPostingsFormat} - * for {@link org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat} + * {@link org.apache.lucene.search.suggest.document.CompletionPostingsFormat} for {@link + * org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat} * * @lucene.experimental */ public class Completion84PostingsFormat extends CompletionPostingsFormat { - /** - * Creates a {@link Completion84PostingsFormat} that will load - * the completion FST on-heap. - */ + /** Creates a {@link Completion84PostingsFormat} that will load the completion FST on-heap. */ public Completion84PostingsFormat() { this(FSTLoadMode.ON_HEAP); } /** - * Creates a {@link Completion84PostingsFormat} that will - * use the provided fstLoadMode to determine - * if the completion FST should be loaded on or off heap. + * Creates a {@link Completion84PostingsFormat} that will use the provided fstLoadMode + * to determine if the completion FST should be loaded on or off heap. */ public Completion84PostingsFormat(FSTLoadMode fstLoadMode) { super("Completion84", fstLoadMode); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionAnalyzer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionAnalyzer.java index 35cf5e8bc2e..d3a80dec35e 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionAnalyzer.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionAnalyzer.java @@ -22,50 +22,47 @@ import org.apache.lucene.analysis.TokenStreamToAutomaton; import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; /** - * Wraps an {@link org.apache.lucene.analysis.Analyzer} - * to provide additional completion-only tuning - * (e.g. preserving token separators, preserving position increments while converting - * a token stream to an automaton) - *

    - * Can be used to index {@link SuggestField} and {@link ContextSuggestField} - * and as a query analyzer to {@link PrefixCompletionQuery} amd {@link FuzzyCompletionQuery} - *

    - * NOTE: In most cases, index and query analyzer should have same values for {@link #preservePositionIncrements()} - * and {@link #preserveSep()} + * Wraps an {@link org.apache.lucene.analysis.Analyzer} to provide additional completion-only tuning + * (e.g. preserving token separators, preserving position increments while converting a token stream + * to an automaton) + * + *

    Can be used to index {@link SuggestField} and {@link ContextSuggestField} and as a query + * analyzer to {@link PrefixCompletionQuery} amd {@link FuzzyCompletionQuery} + * + *

    NOTE: In most cases, index and query analyzer should have same values for {@link + * #preservePositionIncrements()} and {@link #preserveSep()} * * @lucene.experimental - * * @since 5.1.0 */ public final class CompletionAnalyzer extends AnalyzerWrapper { /** - * Represent a hole character, inserted by {@link org.apache.lucene.analysis.TokenStreamToAutomaton} + * Represent a hole character, inserted by {@link + * org.apache.lucene.analysis.TokenStreamToAutomaton} */ - final static int HOLE_CHARACTER = TokenStreamToAutomaton.HOLE; + static final int HOLE_CHARACTER = TokenStreamToAutomaton.HOLE; private final Analyzer analyzer; /** - * Preserve separation between tokens - * when converting to an automaton - *

    - * Defaults to true + * Preserve separation between tokens when converting to an automaton + * + *

    Defaults to true */ private final boolean preserveSep; /** - * Preserve position increments for tokens - * when converting to an automaton - *

    - * Defaults to true + * Preserve position increments for tokens when converting to an automaton + * + *

    Defaults to true */ private final boolean preservePositionIncrements; /** * Sets the maximum number of graph expansions of a completion automaton - *

    - * Defaults to -1 (no limit) + * + *

    Defaults to -1 (no limit) */ private final int maxGraphExpansions; @@ -74,10 +71,15 @@ public final class CompletionAnalyzer extends AnalyzerWrapper { * * @param analyzer token stream to be converted to an automaton * @param preserveSep Preserve separation between tokens when converting to an automaton - * @param preservePositionIncrements Preserve position increments for tokens when converting to an automaton + * @param preservePositionIncrements Preserve position increments for tokens when converting to an + * automaton * @param maxGraphExpansions Sets the maximum number of graph expansions of a completion automaton */ - public CompletionAnalyzer(Analyzer analyzer, boolean preserveSep, boolean preservePositionIncrements, int maxGraphExpansions) { + public CompletionAnalyzer( + Analyzer analyzer, + boolean preserveSep, + boolean preservePositionIncrements, + int maxGraphExpansions) { super(PER_FIELD_REUSE_STRATEGY); this.analyzer = analyzer; this.preserveSep = preserveSep; @@ -90,15 +92,24 @@ public final class CompletionAnalyzer extends AnalyzerWrapper { * preserving token separation, position increments and no limit on graph expansions */ public CompletionAnalyzer(Analyzer analyzer) { - this(analyzer, ConcatenateGraphFilter.DEFAULT_PRESERVE_SEP, ConcatenateGraphFilter.DEFAULT_PRESERVE_POSITION_INCREMENTS, ConcatenateGraphFilter.DEFAULT_MAX_GRAPH_EXPANSIONS); + this( + analyzer, + ConcatenateGraphFilter.DEFAULT_PRESERVE_SEP, + ConcatenateGraphFilter.DEFAULT_PRESERVE_POSITION_INCREMENTS, + ConcatenateGraphFilter.DEFAULT_MAX_GRAPH_EXPANSIONS); } /** * Calls {@link #CompletionAnalyzer(org.apache.lucene.analysis.Analyzer, boolean, boolean, int)} * with no limit on graph expansions */ - public CompletionAnalyzer(Analyzer analyzer, boolean preserveSep, boolean preservePositionIncrements) { - this(analyzer, preserveSep, preservePositionIncrements, ConcatenateGraphFilter.DEFAULT_MAX_GRAPH_EXPANSIONS); + public CompletionAnalyzer( + Analyzer analyzer, boolean preserveSep, boolean preservePositionIncrements) { + this( + analyzer, + preserveSep, + preservePositionIncrements, + ConcatenateGraphFilter.DEFAULT_MAX_GRAPH_EXPANSIONS); } /** @@ -106,20 +117,24 @@ public final class CompletionAnalyzer extends AnalyzerWrapper { * preserving token separation and position increments */ public CompletionAnalyzer(Analyzer analyzer, int maxGraphExpansions) { - this(analyzer, ConcatenateGraphFilter.DEFAULT_PRESERVE_SEP, ConcatenateGraphFilter.DEFAULT_PRESERVE_POSITION_INCREMENTS, maxGraphExpansions); + this( + analyzer, + ConcatenateGraphFilter.DEFAULT_PRESERVE_SEP, + ConcatenateGraphFilter.DEFAULT_PRESERVE_POSITION_INCREMENTS, + maxGraphExpansions); } /** - * Returns true if separation between tokens are preserved when converting - * the token stream to an automaton + * Returns true if separation between tokens are preserved when converting the token stream to an + * automaton */ public boolean preserveSep() { return preserveSep; } /** - * Returns true if position increments are preserved when converting - * the token stream to an automaton + * Returns true if position increments are preserved when converting the token stream to an + * automaton */ public boolean preservePositionIncrements() { return preservePositionIncrements; @@ -131,9 +146,14 @@ public final class CompletionAnalyzer extends AnalyzerWrapper { } @Override - protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - CompletionTokenStream tokenStream = new CompletionTokenStream(components.getTokenStream(), - preserveSep, preservePositionIncrements, maxGraphExpansions); + protected TokenStreamComponents wrapComponents( + String fieldName, TokenStreamComponents components) { + CompletionTokenStream tokenStream = + new CompletionTokenStream( + components.getTokenStream(), + preserveSep, + preservePositionIncrements, + maxGraphExpansions); return new TokenStreamComponents(components.getSource(), tokenStream); } } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java index b3e1bf961a8..eff9ed193f4 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java @@ -16,10 +16,13 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.COMPLETION_VERSION_CURRENT; +import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.DICT_EXTENSION; +import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.INDEX_EXTENSION; + import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.NormsProducer; @@ -38,21 +41,13 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IOUtils; -import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.COMPLETION_VERSION_CURRENT; -import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.DICT_EXTENSION; -import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.INDEX_EXTENSION; - /** - *

    - * Weighted FSTs for any indexed {@link SuggestField} is built on {@link #write(Fields,NormsProducer)}. - * A weighted FST maps the analyzed forms of a field to its - * surface form and document id. FSTs are stored in the CompletionDictionary (.lkp). - *

    - *

    - * The file offsets of a field's FST are stored in the CompletionIndex (.cmp) - * along with the field's internal number {@link FieldInfo#number} on {@link #close()}. - *

    + * Weighted FSTs for any indexed {@link SuggestField} is built on {@link + * #write(Fields,NormsProducer)}. A weighted FST maps the analyzed forms of a field to its surface + * form and document id. FSTs are stored in the CompletionDictionary (.lkp). * + *

    The file offsets of a field's FST are stored in the CompletionIndex (.cmp) along with the + * field's internal number {@link FieldInfo#number} on {@link #close()}. */ final class CompletionFieldsConsumer extends FieldsConsumer { @@ -63,16 +58,24 @@ final class CompletionFieldsConsumer extends FieldsConsumer { private FieldsConsumer delegateFieldsConsumer; private final String codecName; - CompletionFieldsConsumer(String codecName, PostingsFormat delegatePostingsFormat, SegmentWriteState state) throws IOException { + CompletionFieldsConsumer( + String codecName, PostingsFormat delegatePostingsFormat, SegmentWriteState state) + throws IOException { this.codecName = codecName; this.delegatePostingsFormatName = delegatePostingsFormat.getName(); this.state = state; - String dictFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION); + String dictFile = + IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION); boolean success = false; try { this.delegateFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state); dictOut = state.directory.createOutput(dictFile, state.context); - CodecUtil.writeIndexHeader(dictOut, codecName, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.writeIndexHeader( + dictOut, + codecName, + COMPLETION_VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix); success = true; } finally { if (success == false) { @@ -89,7 +92,8 @@ final class CompletionFieldsConsumer extends FieldsConsumer { CompletionTermWriter termWriter = new CompletionTermWriter(); Terms terms = fields.terms(field); if (terms == null) { - // this can happen from ghost fields, where the incoming Fields iterator claims a field exists but it does not + // this can happen from ghost fields, where the incoming Fields iterator claims a field + // exists but it does not continue; } TermsEnum termsEnum = terms.iterator(); @@ -103,10 +107,10 @@ final class CompletionFieldsConsumer extends FieldsConsumer { // store lookup, if needed long filePointer = dictOut.getFilePointer(); if (termWriter.finish(dictOut)) { - seenFields.put(field, new CompletionMetaData(filePointer, - termWriter.minWeight, - termWriter.maxWeight, - termWriter.type)); + seenFields.put( + field, + new CompletionMetaData( + filePointer, termWriter.minWeight, termWriter.maxWeight, termWriter.type)); } } } @@ -119,11 +123,18 @@ final class CompletionFieldsConsumer extends FieldsConsumer { return; } closed = true; - String indexFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, INDEX_EXTENSION); + String indexFile = + IndexFileNames.segmentFileName( + state.segmentInfo.name, state.segmentSuffix, INDEX_EXTENSION); boolean success = false; try (IndexOutput indexOut = state.directory.createOutput(indexFile, state.context)) { delegateFieldsConsumer.close(); - CodecUtil.writeIndexHeader(indexOut, codecName, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.writeIndexHeader( + indexOut, + codecName, + COMPLETION_VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix); /* * we write the delegate postings format name so we can load it * without getting an instance in the ctor @@ -185,21 +196,20 @@ final class CompletionFieldsConsumer extends FieldsConsumer { } /** - * Stores the built FST in output - * Returns true if there was anything stored, false otherwise + * Stores the built FST in output Returns true if there was anything stored, false + * otherwise */ public boolean finish(IndexOutput output) throws IOException { boolean stored = builder.store(output); - assert stored || docCount == 0 : "the FST is null but docCount is != 0 actual value: [" + docCount + "]"; + assert stored || docCount == 0 + : "the FST is null but docCount is != 0 actual value: [" + docCount + "]"; if (docCount == 0) { minWeight = 0; } return stored; } - /** - * Writes all postings (surface form, weight, document id) for term - */ + /** Writes all postings (surface form, weight, document id) for term */ public void write(BytesRef term, TermsEnum termsEnum) throws IOException { postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.PAYLOADS); builder.startTerm(term); @@ -210,7 +220,8 @@ final class CompletionFieldsConsumer extends FieldsConsumer { postingsEnum.nextPosition(); assert postingsEnum.getPayload() != null; BytesRef payload = postingsEnum.getPayload(); - ByteArrayDataInput input = new ByteArrayDataInput(payload.bytes, payload.offset, payload.length); + ByteArrayDataInput input = + new ByteArrayDataInput(payload.bytes, payload.offset, payload.length); int len = input.readVInt(); scratch.grow(len); scratch.setLength(len); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java index 945041e28cf..f04b362f492 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java @@ -16,6 +16,11 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.COMPLETION_CODEC_VERSION; +import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.COMPLETION_VERSION_CURRENT; +import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.DICT_EXTENSION; +import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.INDEX_EXTENSION; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -24,7 +29,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.PostingsFormat; @@ -39,25 +43,15 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.IOUtils; -import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.COMPLETION_CODEC_VERSION; -import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.COMPLETION_VERSION_CURRENT; -import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.DICT_EXTENSION; -import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat.INDEX_EXTENSION; - /** - *

    * Completion index (.cmp) is opened and read at instantiation to read in {@link SuggestField} * numbers and their FST offsets in the Completion dictionary (.lkp). - *

    - *

    - * Completion dictionary (.lkp) is opened at instantiation and a field's FST is loaded - * into memory the first time it is requested via {@link #terms(String)}. - *

    - *

    - * NOTE: Only the footer is validated for Completion dictionary (.lkp) and not the checksum due - * to random access pattern and checksum validation being too costly at instantiation - *

    * + *

    Completion dictionary (.lkp) is opened at instantiation and a field's FST is loaded into + * memory the first time it is requested via {@link #terms(String)}. + * + *

    NOTE: Only the footer is validated for Completion dictionary (.lkp) and not the checksum due + * to random access pattern and checksum validation being too costly at instantiation */ final class CompletionFieldsProducer extends FieldsProducer { @@ -66,26 +60,44 @@ final class CompletionFieldsProducer extends FieldsProducer { private IndexInput dictIn; // copy ctr for merge instance - private CompletionFieldsProducer(FieldsProducer delegateFieldsProducer, Map readers) { + private CompletionFieldsProducer( + FieldsProducer delegateFieldsProducer, Map readers) { this.delegateFieldsProducer = delegateFieldsProducer; this.readers = readers; } - CompletionFieldsProducer(String codecName, SegmentReadState state, FSTLoadMode fstLoadMode) throws IOException { - String indexFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, INDEX_EXTENSION); + CompletionFieldsProducer(String codecName, SegmentReadState state, FSTLoadMode fstLoadMode) + throws IOException { + String indexFile = + IndexFileNames.segmentFileName( + state.segmentInfo.name, state.segmentSuffix, INDEX_EXTENSION); delegateFieldsProducer = null; boolean success = false; try (ChecksumIndexInput index = state.directory.openChecksumInput(indexFile, state.context)) { // open up dict file containing all fsts - String dictFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION); + String dictFile = + IndexFileNames.segmentFileName( + state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION); dictIn = state.directory.openInput(dictFile, state.context); - CodecUtil.checkIndexHeader(dictIn, codecName, COMPLETION_CODEC_VERSION, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.checkIndexHeader( + dictIn, + codecName, + COMPLETION_CODEC_VERSION, + COMPLETION_VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix); // just validate the footer for the dictIn CodecUtil.retrieveChecksum(dictIn); // open up index file (fieldNumber, offset) - CodecUtil.checkIndexHeader(index, codecName, COMPLETION_CODEC_VERSION, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.checkIndexHeader( + index, + codecName, + COMPLETION_CODEC_VERSION, + COMPLETION_VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix); // load delegate PF PostingsFormat delegatePostingsFormat = PostingsFormat.forName(index.readString()); delegateFieldsProducer = delegatePostingsFormat.fieldsProducer(state); @@ -101,7 +113,9 @@ final class CompletionFieldsProducer extends FieldsProducer { byte type = index.readByte(); FieldInfo fieldInfo = state.fieldInfos.fieldInfo(fieldNumber); // we don't load the FST yet - readers.put(fieldInfo.name, new CompletionsTermsReader(dictIn, offset, minWeight, maxWeight, type, fstLoadMode)); + readers.put( + fieldInfo.name, + new CompletionsTermsReader(dictIn, offset, minWeight, maxWeight, type, fstLoadMode)); } CodecUtil.checkFooter(index); success = true; @@ -150,7 +164,8 @@ final class CompletionFieldsProducer extends FieldsProducer { public Collection getChildResources() { List accountableList = new ArrayList<>(); for (Map.Entry readerEntry : readers.entrySet()) { - accountableList.add(Accountables.namedAccountable(readerEntry.getKey(), readerEntry.getValue())); + accountableList.add( + Accountables.namedAccountable(readerEntry.getKey(), readerEntry.getValue())); } return Collections.unmodifiableCollection(accountableList); } @@ -162,7 +177,7 @@ final class CompletionFieldsProducer extends FieldsProducer { @Override public Terms terms(String field) throws IOException { - Terms terms = delegateFieldsProducer.terms(field) ; + Terms terms = delegateFieldsProducer.terms(field); if (terms == null) { return null; } @@ -173,5 +188,4 @@ final class CompletionFieldsProducer extends FieldsProducer { public int size() { return readers.size(); } - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionPostingsFormat.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionPostingsFormat.java index c56a1fbc817..146487d7774 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionPostingsFormat.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionPostingsFormat.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; @@ -29,61 +28,71 @@ import org.apache.lucene.store.DataOutput; import org.apache.lucene.util.fst.FST; /** - *

    - * A {@link PostingsFormat} which supports document suggestion based on - * indexed {@link SuggestField}s. - * Document suggestion is based on an weighted FST which map analyzed - * terms of a {@link SuggestField} to its surface form and document id. - *

    - *

    - * Files: + * A {@link PostingsFormat} which supports document suggestion based on indexed {@link + * SuggestField}s. Document suggestion is based on an weighted FST which map analyzed terms of a + * {@link SuggestField} to its surface form and document id. + * + *

    Files: + * *

    - *

    - * + * + *

    + * *

    Completion Dictionary

    + * *

    The .lkp file contains an FST for each suggest field - *

    + * *
      - *
    • CompletionDict (.lkp) --> Header, FSTNumSuggestFields, Footer
    • - *
    • Header --> {@link CodecUtil#writeHeader CodecHeader}
    • - * - *
    • FST --> {@link FST FST<Long, BytesRef>}
    • - *
    • Footer --> {@link CodecUtil#writeFooter CodecFooter}
    • + *
    • CompletionDict (.lkp) --> Header, FSTNumSuggestFields, Footer + *
    • Header --> {@link CodecUtil#writeHeader CodecHeader} + * + *
    • FST --> {@link FST FST<Long, BytesRef>} + *
    • Footer --> {@link CodecUtil#writeFooter CodecFooter} *
    - *

    Notes:

    + * + *

    Notes: + * *

      - *
    • Header is a {@link CodecUtil#writeHeader CodecHeader} storing the version information - * for the Completion implementation.
    • - *
    • FST maps all analyzed forms to surface forms of a SuggestField
    • + *
    • Header is a {@link CodecUtil#writeHeader CodecHeader} storing the version information for + * the Completion implementation. + *
    • FST maps all analyzed forms to surface forms of a SuggestField *
    + * * + * *

    Completion Index

    - *

    The .cmp file contains an index into the completion dictionary, so that it can be - * accessed randomly.

    + * + *

    The .cmp file contains an index into the completion dictionary, so that it can be accessed + * randomly. + * *

      - *
    • CompletionIndex (.cmp) --> Header, NumSuggestFields, EntryNumSuggestFields, Footer
    • - *
    • Header --> {@link CodecUtil#writeHeader CodecHeader}
    • - *
    • NumSuggestFields --> {@link DataOutput#writeVInt Uint32}
    • - *
    • Entry --> FieldNumber, CompletionDictionaryOffset, MinWeight, MaxWeight, Type
    • - *
    • FieldNumber --> {@link DataOutput#writeVInt Uint32}
    • - *
    • CompletionDictionaryOffset --> {@link DataOutput#writeVLong Uint64}
    • - *
    • MinWeight --> {@link DataOutput#writeVLong Uint64}
    • - *
    • MaxWeight --> {@link DataOutput#writeVLong Uint64}
    • - *
    • Type --> {@link DataOutput#writeByte Byte}
    • - *
    • Footer --> {@link CodecUtil#writeFooter CodecFooter}
    • + *
    • CompletionIndex (.cmp) --> Header, NumSuggestFields, EntryNumSuggestFields, + * Footer + *
    • Header --> {@link CodecUtil#writeHeader CodecHeader} + *
    • NumSuggestFields --> {@link DataOutput#writeVInt Uint32} + *
    • Entry --> FieldNumber, CompletionDictionaryOffset, MinWeight, MaxWeight, Type + *
    • FieldNumber --> {@link DataOutput#writeVInt Uint32} + *
    • CompletionDictionaryOffset --> {@link DataOutput#writeVLong Uint64} + *
    • MinWeight --> {@link DataOutput#writeVLong Uint64} + *
    • MaxWeight --> {@link DataOutput#writeVLong Uint64} + *
    • Type --> {@link DataOutput#writeByte Byte} + *
    • Footer --> {@link CodecUtil#writeFooter CodecFooter} *
    - *

    Notes:

    + * + *

    Notes: + * *

      - *
    • Header is a {@link CodecUtil#writeHeader CodecHeader} storing the version information - * for the Completion implementation.
    • - *
    • NumSuggestFields is the number of suggest fields indexed
    • - *
    • FieldNumber is the fields number from {@link FieldInfos}. (.fnm)
    • - *
    • CompletionDictionaryOffset is the file offset of a field's FST in CompletionDictionary (.lkp)
    • - *
    • MinWeight and MaxWeight are the global minimum and maximum weight for the field
    • - *
    • Type indicates if the suggester has context or not
    • + *
    • Header is a {@link CodecUtil#writeHeader CodecHeader} storing the version information for + * the Completion implementation. + *
    • NumSuggestFields is the number of suggest fields indexed + *
    • FieldNumber is the fields number from {@link FieldInfos}. (.fnm) + *
    • CompletionDictionaryOffset is the file offset of a field's FST in CompletionDictionary + * (.lkp) + *
    • MinWeight and MaxWeight are the global minimum and maximum weight for the field + *
    • Type indicates if the suggester has context or not *
    * * @lucene.experimental @@ -95,56 +104,49 @@ public abstract class CompletionPostingsFormat extends PostingsFormat { static final String INDEX_EXTENSION = "cmp"; static final String DICT_EXTENSION = "lkp"; - /** - * An enum that allows to control if suggester FSTs are loaded into memory or read off-heap - */ + /** An enum that allows to control if suggester FSTs are loaded into memory or read off-heap */ public enum FSTLoadMode { /** - * Always read FSTs from disk. - * NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations - * are used. + * Always read FSTs from disk. NOTE: If this option is used the FST will be read off-heap even + * if buffered directory implementations are used. */ OFF_HEAP, - /** - * Never read FSTs from disk ie. all suggest fields FSTs are loaded into memory - */ + /** Never read FSTs from disk ie. all suggest fields FSTs are loaded into memory */ ON_HEAP, /** - * Automatically make the decision if FSTs are read from disk depending if the segment read from an MMAPDirectory + * Automatically make the decision if FSTs are read from disk depending if the segment read from + * an MMAPDirectory */ AUTO } private final FSTLoadMode fstLoadMode; - /** - * Used only by core Lucene at read-time via Service Provider instantiation - */ + /** Used only by core Lucene at read-time via Service Provider instantiation */ public CompletionPostingsFormat(String name) { this(name, FSTLoadMode.ON_HEAP); } /** - * Creates a {@link CompletionPostingsFormat} that will - * use the provided fstLoadMode to determine - * if the completion FST should be loaded on or off heap. + * Creates a {@link CompletionPostingsFormat} that will use the provided fstLoadMode + * to determine if the completion FST should be loaded on or off heap. */ public CompletionPostingsFormat(String name, FSTLoadMode fstLoadMode) { super(name); this.fstLoadMode = fstLoadMode; } - /** - * Concrete implementation should specify the delegating postings format - */ + /** Concrete implementation should specify the delegating postings format */ protected abstract PostingsFormat delegatePostingsFormat(); @Override public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { PostingsFormat delegatePostingsFormat = delegatePostingsFormat(); if (delegatePostingsFormat == null) { - throw new UnsupportedOperationException("Error - " + getClass().getName() - + " has been constructed without a choice of PostingsFormat"); + throw new UnsupportedOperationException( + "Error - " + + getClass().getName() + + " has been constructed without a choice of PostingsFormat"); } return new CompletionFieldsConsumer(getName(), delegatePostingsFormat, state); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java index 6be0c91117f..2f665ce466d 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java @@ -16,8 +16,10 @@ */ package org.apache.lucene.search.suggest.document; -import java.io.IOException; +import static org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter.SEP_LABEL; +import static org.apache.lucene.search.suggest.document.CompletionAnalyzer.HOLE_CHARACTER; +import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -26,42 +28,36 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Query; import org.apache.lucene.search.suggest.BitsProducer; -import static org.apache.lucene.search.suggest.document.CompletionAnalyzer.HOLE_CHARACTER; -import static org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter.SEP_LABEL; - /** - * Abstract {@link Query} that match documents containing terms with a specified prefix - * filtered by {@link BitsProducer}. This should be used to query against any {@link SuggestField}s - * or {@link ContextSuggestField}s of documents. - *

    - * Use {@link SuggestIndexSearcher#suggest(CompletionQuery, int, boolean)} to execute any query - * that provides a concrete implementation of this query. Example below shows using this query - * to retrieve the top 5 documents. + * Abstract {@link Query} that match documents containing terms with a specified prefix filtered by + * {@link BitsProducer}. This should be used to query against any {@link SuggestField}s or {@link + * ContextSuggestField}s of documents. + * + *

    Use {@link SuggestIndexSearcher#suggest(CompletionQuery, int, boolean)} to execute any query + * that provides a concrete implementation of this query. Example below shows using this query to + * retrieve the top 5 documents. * *

      *  SuggestIndexSearcher searcher = new SuggestIndexSearcher(reader);
      *  TopSuggestDocs suggestDocs = searcher.suggest(query, 5);
      * 
    - * This query rewrites to an appropriate {@link CompletionQuery} depending on the - * type ({@link SuggestField} or {@link ContextSuggestField}) of the field the query is run against. + * + * This query rewrites to an appropriate {@link CompletionQuery} depending on the type ({@link + * SuggestField} or {@link ContextSuggestField}) of the field the query is run against. * * @lucene.experimental */ public abstract class CompletionQuery extends Query { - /** - * Term to query against - */ + /** Term to query against */ private final Term term; - /** - * {@link BitsProducer} which is used to filter the document scope. - */ + /** {@link BitsProducer} which is used to filter the document scope. */ private final BitsProducer filter; /** - * Creates a base Completion query against a term - * with a filter to scope the documents + * Creates a base Completion query against a term with a filter to scope + * the documents */ protected CompletionQuery(Term term, BitsProducer filter) { validate(term.text()); @@ -70,24 +66,18 @@ public abstract class CompletionQuery extends Query { } /** - * Returns a {@link BitsProducer}. Only suggestions matching the returned - * bits will be returned. + * Returns a {@link BitsProducer}. Only suggestions matching the returned bits will be returned. */ public BitsProducer getFilter() { return filter; } - /** - * Returns the field name this query should - * be run against - */ + /** Returns the field name this query should be run against */ public String getField() { return term.field(); } - /** - * Returns the term to be queried against - */ + /** Returns the term to be queried against */ public Term getTerm() { return term; } @@ -121,9 +111,10 @@ public abstract class CompletionQuery extends Query { if (first == false) { if (this instanceof ContextQuery) { if (type == SuggestField.TYPE) { - throw new IllegalStateException(this.getClass().getSimpleName() - + " can not be executed against a non context-enabled SuggestField: " - + getField()); + throw new IllegalStateException( + this.getClass().getSimpleName() + + " can not be executed against a non context-enabled SuggestField: " + + getField()); } } else { if (type == ContextSuggestField.TYPE) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionScorer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionScorer.java index 5c0601be9e9..d409b40e5ba 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionScorer.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionScorer.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; - import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.LeafCollector; @@ -25,15 +24,13 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.automaton.Automaton; /** - * Expert: Responsible for executing the query against an - * appropriate suggester and collecting the results - * via a collector. + * Expert: Responsible for executing the query against an appropriate suggester and collecting the + * results via a collector. * - * {@link #score(LeafCollector, Bits, int, int)} is called - * for each leaf reader. + *

    {@link #score(LeafCollector, Bits, int, int)} is called for each leaf reader. * - * {@link #accept(int,Bits)} and {@link #score(float, float)} - * is called for every matched completion (i.e. document) + *

    {@link #accept(int,Bits)} and {@link #score(float, float)} is called for every matched + * completion (i.e. document) * * @lucene.experimental */ @@ -44,6 +41,7 @@ public class CompletionScorer extends BulkScorer { // values accessed by suggester /** weight that created this scorer */ protected final CompletionWeight weight; + final LeafReader reader; final boolean filtered; final Automaton automaton; @@ -51,9 +49,14 @@ public class CompletionScorer extends BulkScorer { /** * Creates a scorer for a field-specific suggester scoped by acceptDocs */ - protected CompletionScorer(final CompletionWeight weight, final NRTSuggester suggester, - final LeafReader reader, final Bits filterDocs, - final boolean filtered, final Automaton automaton) throws IOException { + protected CompletionScorer( + final CompletionWeight weight, + final NRTSuggester suggester, + final LeafReader reader, + final Bits filterDocs, + final boolean filtered, + final Automaton automaton) + throws IOException { this.weight = weight; this.suggester = suggester; this.reader = reader; @@ -77,11 +80,11 @@ public class CompletionScorer extends BulkScorer { } /** - * Returns true if a document with docID is accepted, - * false if the docID maps to a deleted - * document or has been filtered out - * @param liveDocs the {@link Bits} representing live docs, or possibly - * {@code null} if all docs are live + * Returns true if a document with docID is accepted, false if the docID maps to a + * deleted document or has been filtered out + * + * @param liveDocs the {@link Bits} representing live docs, or possibly {@code null} if all docs + * are live */ public final boolean accept(int docID, Bits liveDocs) { return (filterDocs == null || filterDocs.get(docID)) @@ -89,9 +92,8 @@ public class CompletionScorer extends BulkScorer { } /** - * Returns the score for a matched completion - * based on the query time boost and the - * index time weight. + * Returns the score for a matched completion based on the query time boost and the index time + * weight. */ public float score(float weight, float boost) { if (boost == 0f) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java index 0f86739515f..0544f95ea29 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java @@ -17,14 +17,12 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; - import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.Terms; /** - * Wrapped {@link org.apache.lucene.index.Terms} - * used by {@link SuggestField} and {@link ContextSuggestField} - * to access corresponding suggester and their attributes + * Wrapped {@link org.apache.lucene.index.Terms} used by {@link SuggestField} and {@link + * ContextSuggestField} to access corresponding suggester and their attributes * * @lucene.experimental */ @@ -32,40 +30,32 @@ public final class CompletionTerms extends FilterLeafReader.FilterTerms { private final CompletionsTermsReader reader; - /** - * Creates a completionTerms based on {@link CompletionsTermsReader} - */ + /** Creates a completionTerms based on {@link CompletionsTermsReader} */ CompletionTerms(Terms in, CompletionsTermsReader reader) { super(in); this.reader = reader; } /** - * Returns the type of FST, either {@link SuggestField#TYPE} or - * {@link ContextSuggestField#TYPE} + * Returns the type of FST, either {@link SuggestField#TYPE} or {@link ContextSuggestField#TYPE} */ public byte getType() { return (reader != null) ? reader.type : SuggestField.TYPE; } - /** - * Returns the minimum weight of all entries in the weighted FST - */ + /** Returns the minimum weight of all entries in the weighted FST */ public long getMinWeight() { return (reader != null) ? reader.minWeight : 0; } - /** - * Returns the maximum weight of all entries in the weighted FST - */ + /** Returns the maximum weight of all entries in the weighted FST */ public long getMaxWeight() { return (reader != null) ? reader.maxWeight : 0; } /** - * Returns a {@link NRTSuggester} for the field - * or null if no FST - * was indexed for this field + * Returns a {@link NRTSuggester} for the field or null if no FST was indexed for + * this field */ public NRTSuggester suggester() throws IOException { return (reader != null) ? reader.suggester() : null; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTokenStream.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTokenStream.java index d3bec8e50c9..c67737ce43d 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTokenStream.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTokenStream.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; - import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; @@ -28,6 +27,7 @@ import org.apache.lucene.util.automaton.Automaton; /** * A {@link ConcatenateGraphFilter} but we can set the payload and provide access to config options. + * * @lucene.experimental */ public final class CompletionTokenStream extends TokenFilter { @@ -40,26 +40,32 @@ public final class CompletionTokenStream extends TokenFilter { final boolean preservePositionIncrements; final int maxGraphExpansions; - private BytesRef payload; // note doesn't participate in TokenStream lifecycle; it's effectively constant + private BytesRef + payload; // note doesn't participate in TokenStream lifecycle; it's effectively constant CompletionTokenStream(TokenStream inputTokenStream) { - this(inputTokenStream, + this( + inputTokenStream, ConcatenateGraphFilter.DEFAULT_PRESERVE_SEP, ConcatenateGraphFilter.DEFAULT_PRESERVE_POSITION_INCREMENTS, ConcatenateGraphFilter.DEFAULT_MAX_GRAPH_EXPANSIONS); } - CompletionTokenStream(TokenStream inputTokenStream, boolean preserveSep, boolean preservePositionIncrements, int maxGraphExpansions) { - super(new ConcatenateGraphFilter(inputTokenStream, preserveSep, preservePositionIncrements, maxGraphExpansions)); + CompletionTokenStream( + TokenStream inputTokenStream, + boolean preserveSep, + boolean preservePositionIncrements, + int maxGraphExpansions) { + super( + new ConcatenateGraphFilter( + inputTokenStream, preserveSep, preservePositionIncrements, maxGraphExpansions)); this.inputTokenStream = inputTokenStream; this.preserveSep = preserveSep; this.preservePositionIncrements = preservePositionIncrements; this.maxGraphExpansions = maxGraphExpansions; } - /** - * Sets a payload available throughout successive token stream enumeration - */ + /** Sets a payload available throughout successive token stream enumeration */ public void setPayload(BytesRef payload) { this.payload = payload; } @@ -74,15 +80,21 @@ public final class CompletionTokenStream extends TokenFilter { } } - /** Delegates to...At - * @see ConcatenateGraphFilter#toAutomaton() */ + /** + * Delegates to...At + * + * @see ConcatenateGraphFilter#toAutomaton() + */ public Automaton toAutomaton() throws IOException { - return ((ConcatenateGraphFilter)input).toAutomaton(); + return ((ConcatenateGraphFilter) input).toAutomaton(); } - /** Delegates to... - * @see ConcatenateGraphFilter#toAutomaton(boolean) */ + /** + * Delegates to... + * + * @see ConcatenateGraphFilter#toAutomaton(boolean) + */ public Automaton toAutomaton(boolean unicodeAware) throws IOException { - return ((ConcatenateGraphFilter)input).toAutomaton(unicodeAware); + return ((ConcatenateGraphFilter) input).toAutomaton(unicodeAware); } } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java index 8632c2265ce..4442ced1684 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; - import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Terms; @@ -31,13 +30,10 @@ import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.automaton.Automaton; /** - * Expert: the Weight for CompletionQuery, used to - * score and explain these queries. + * Expert: the Weight for CompletionQuery, used to score and explain these queries. * - * Subclasses can override {@link #setNextMatch(IntsRef)}, - * {@link #boost()} and {@link #context()} - * to calculate the boost and extract the context of - * a matched path prefix. + *

    Subclasses can override {@link #setNextMatch(IntsRef)}, {@link #boost()} and {@link + * #context()} to calculate the boost and extract the context of a matched path prefix. * * @lucene.experimental */ @@ -46,18 +42,18 @@ public class CompletionWeight extends Weight { private final Automaton automaton; /** - * Creates a weight for query with an automaton, - * using the reader for index stats + * Creates a weight for query with an automaton, using the reader + * for index stats */ - public CompletionWeight(final CompletionQuery query, final Automaton automaton) throws IOException { + public CompletionWeight(final CompletionQuery query, final Automaton automaton) + throws IOException { super(query); this.completionQuery = query; this.automaton = automaton; } /** - * Returns the automaton specified - * by the {@link CompletionQuery} + * Returns the automaton specified by the {@link CompletionQuery} * * @return query automaton */ @@ -96,16 +92,14 @@ public class CompletionWeight extends Weight { } /** - * Set for every partial path in the index that matched the query - * automaton. + * Set for every partial path in the index that matched the query automaton. * - * Subclasses should override {@link #boost()} and {@link #context()} - * to return an appropriate value with respect to the current pathPrefix. + *

    Subclasses should override {@link #boost()} and {@link #context()} to return an appropriate + * value with respect to the current pathPrefix. * * @param pathPrefix the prefix of a matched path */ - protected void setNextMatch(IntsRef pathPrefix) { - } + protected void setNextMatch(IntsRef pathPrefix) {} /** * Returns the boost of the partial path set by {@link #setNextMatch(IntsRef)} @@ -132,7 +126,7 @@ public class CompletionWeight extends Weight { /** * This object can be cached - * + * * @see org.apache.lucene.search.SegmentCacheable#isCacheable(LeafReaderContext) */ @Override @@ -142,8 +136,7 @@ public class CompletionWeight extends Weight { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - //TODO + // TODO return null; } - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionsTermsReader.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionsTermsReader.java index 1776ce61248..6313c020d6c 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionsTermsReader.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionsTermsReader.java @@ -19,14 +19,12 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; import java.util.Collection; import java.util.Collections; - import org.apache.lucene.search.suggest.document.CompletionPostingsFormat.FSTLoadMode; import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.Accountable; /** - * Holder for suggester and field-level info - * for a suggest field + * Holder for suggester and field-level info for a suggest field * * @lucene.experimental */ @@ -37,6 +35,7 @@ public final class CompletionsTermsReader implements Accountable { public final long maxWeight; /** type of suggester (context-enabled or not) */ public final byte type; + private final IndexInput dictIn; private final long offset; @@ -45,10 +44,16 @@ public final class CompletionsTermsReader implements Accountable { private NRTSuggester suggester; /** - * Creates a CompletionTermsReader to load a field-specific suggester - * from the index dictIn with offset + * Creates a CompletionTermsReader to load a field-specific suggester from the index dictIn + * with offset */ - CompletionsTermsReader(IndexInput dictIn, long offset, long minWeight, long maxWeight, byte type, FSTLoadMode fstLoadMode) { + CompletionsTermsReader( + IndexInput dictIn, + long offset, + long minWeight, + long maxWeight, + byte type, + FSTLoadMode fstLoadMode) { assert minWeight <= maxWeight; assert offset >= 0l && offset < dictIn.length(); this.dictIn = dictIn; @@ -60,8 +65,8 @@ public final class CompletionsTermsReader implements Accountable { } /** - * Returns the suggester for a field, if not loaded already, loads - * the appropriate suggester from CompletionDictionary + * Returns the suggester for a field, if not loaded already, loads the appropriate suggester from + * CompletionDictionary */ public synchronized NRTSuggester suggester() throws IOException { if (suggester == null) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java index 08dbab4f467..c2b41a31ce1 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java @@ -21,7 +21,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.TreeSet; - import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.QueryVisitor; @@ -39,47 +38,39 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.fst.Util; /** - * A {@link CompletionQuery} that matches documents specified by - * a wrapped {@link CompletionQuery} supporting boosting and/or filtering - * by specified contexts. - *

    - * Use this query against {@link ContextSuggestField} - *

    - * Example of using a {@link CompletionQuery} with boosted - * contexts: + * A {@link CompletionQuery} that matches documents specified by a wrapped {@link CompletionQuery} + * supporting boosting and/or filtering by specified contexts. + * + *

    Use this query against {@link ContextSuggestField} + * + *

    Example of using a {@link CompletionQuery} with boosted contexts: + * *

      *  CompletionQuery completionQuery = ...;
      *  ContextQuery query = new ContextQuery(completionQuery);
      *  query.addContext("context1", 2);
      *  query.addContext("context2", 1);
      * 
    - *

    - * NOTE: + * + *

    NOTE: + * *

      - *
    • - * This query can be constructed with - * {@link PrefixCompletionQuery}, {@link RegexCompletionQuery} - * or {@link FuzzyCompletionQuery} query. - *
    • - *
    • - * To suggest across all contexts, use {@link #addAllContexts()}. - * When no context is added, the default behaviour is to suggest across - * all contexts. - *
    • - *
    • - * To apply the same boost to multiple contexts sharing the same prefix, - * Use {@link #addContext(CharSequence, float, boolean)} with the common - * context prefix, boost and set exact to false. - *
    • - * Using this query against a {@link SuggestField} (not context enabled), - * would yield results ignoring any context filtering/boosting - *
    • + *
    • This query can be constructed with {@link PrefixCompletionQuery}, {@link + * RegexCompletionQuery} or {@link FuzzyCompletionQuery} query. + *
    • To suggest across all contexts, use {@link #addAllContexts()}. When no context is added, + * the default behaviour is to suggest across all contexts. + *
    • To apply the same boost to multiple contexts sharing the same prefix, Use {@link + * #addContext(CharSequence, float, boolean)} with the common context prefix, boost and set + * exact to false. + *
    • Using this query against a {@link SuggestField} (not context enabled), would yield results + * ignoring any context filtering/boosting *
    * * @lucene.experimental */ public class ContextQuery extends CompletionQuery implements Accountable { - private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(ContextQuery.class); + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(ContextQuery.class); private IntsRefBuilder scratch = new IntsRefBuilder(); private Map contexts; @@ -90,17 +81,15 @@ public class ContextQuery extends CompletionQuery implements Accountable { private long ramBytesUsed; /** - * Constructs a context completion query that matches - * documents specified by query. - *

    - * Use {@link #addContext(CharSequence, float, boolean)} - * to add context(s) with boost + * Constructs a context completion query that matches documents specified by query. + * + *

    Use {@link #addContext(CharSequence, float, boolean)} to add context(s) with boost */ public ContextQuery(CompletionQuery query) { super(query.getTerm(), query.getFilter()); if (query instanceof ContextQuery) { - throw new IllegalArgumentException("'query' parameter must not be of type " - + this.getClass().getSimpleName()); + throw new IllegalArgumentException( + "'query' parameter must not be of type " + this.getClass().getSimpleName()); } this.innerQuery = query; contexts = new HashMap<>(); @@ -108,28 +97,26 @@ public class ContextQuery extends CompletionQuery implements Accountable { } private void updateRamBytesUsed() { - ramBytesUsed = BASE_RAM_BYTES + - RamUsageEstimator.sizeOfObject(contexts) + - RamUsageEstimator.sizeOfObject(innerQuery, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED); + ramBytesUsed = + BASE_RAM_BYTES + + RamUsageEstimator.sizeOfObject(contexts) + + RamUsageEstimator.sizeOfObject( + innerQuery, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED); } - /** - * Adds an exact context with default boost of 1 - */ + /** Adds an exact context with default boost of 1 */ public void addContext(CharSequence context) { addContext(context, 1f, true); } - /** - * Adds an exact context with boost - */ + /** Adds an exact context with boost */ public void addContext(CharSequence context, float boost) { addContext(context, boost, true); } /** - * Adds a context with boost, set exact to false - * if the context is a prefix of any indexed contexts + * Adds a context with boost, set exact to false if the context is a prefix of any + * indexed contexts */ public void addContext(CharSequence context, float boost, boolean exact) { if (boost < 0f) { @@ -137,17 +124,23 @@ public class ContextQuery extends CompletionQuery implements Accountable { } for (int i = 0; i < context.length(); i++) { if (ContextSuggestField.CONTEXT_SEPARATOR == context.charAt(i)) { - throw new IllegalArgumentException("Illegal value [" + context + "] UTF-16 codepoint [0x" - + Integer.toHexString((int) context.charAt(i))+ "] at position " + i + " is a reserved character"); + throw new IllegalArgumentException( + "Illegal value [" + + context + + "] UTF-16 codepoint [0x" + + Integer.toHexString((int) context.charAt(i)) + + "] at position " + + i + + " is a reserved character"); } } - contexts.put(IntsRef.deepCopyOf(Util.toIntsRef(new BytesRef(context), scratch)), new ContextMetaData(boost, exact)); + contexts.put( + IntsRef.deepCopyOf(Util.toIntsRef(new BytesRef(context), scratch)), + new ContextMetaData(boost, exact)); updateRamBytesUsed(); } - /** - * Add all contexts with a boost of 1f - */ + /** Add all contexts with a boost of 1f */ public void addAllContexts() { matchAllContexts = true; } @@ -181,8 +174,10 @@ public class ContextQuery extends CompletionQuery implements Accountable { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { - final CompletionWeight innerWeight = ((CompletionWeight) innerQuery.createWeight(searcher, scoreMode, boost)); + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { + final CompletionWeight innerWeight = + ((CompletionWeight) innerQuery.createWeight(searcher, scoreMode, boost)); final Automaton innerAutomaton = innerWeight.getAutomaton(); // If the inner automaton matches nothing, then we return an empty weight to avoid @@ -194,10 +189,13 @@ public class ContextQuery extends CompletionQuery implements Accountable { // if separators are preserved the fst contains a SEP_LABEL // behind each gap. To have a matching automaton, we need to // include the SEP_LABEL in the query as well - Automaton optionalSepLabel = Operations.optional(Automata.makeChar(ConcatenateGraphFilter.SEP_LABEL)); + Automaton optionalSepLabel = + Operations.optional(Automata.makeChar(ConcatenateGraphFilter.SEP_LABEL)); Automaton prefixAutomaton = Operations.concatenate(optionalSepLabel, innerAutomaton); - Automaton contextsAutomaton = Operations.concatenate(toContextAutomaton(contexts, matchAllContexts), prefixAutomaton); - contextsAutomaton = Operations.determinize(contextsAutomaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES); + Automaton contextsAutomaton = + Operations.concatenate(toContextAutomaton(contexts, matchAllContexts), prefixAutomaton); + contextsAutomaton = + Operations.determinize(contextsAutomaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES); final Map contextMap = new HashMap<>(contexts.size()); final TreeSet contextLengths = new TreeSet<>(); @@ -211,10 +209,12 @@ public class ContextQuery extends CompletionQuery implements Accountable { for (int i = 0; iterator.hasNext(); i++) { contextLengthArray[i] = iterator.next(); } - return new ContextCompletionWeight(this, contextsAutomaton, innerWeight, contextMap, contextLengthArray); + return new ContextCompletionWeight( + this, contextsAutomaton, innerWeight, contextMap, contextLengthArray); } - private static Automaton toContextAutomaton(final Map contexts, final boolean matchAllContexts) { + private static Automaton toContextAutomaton( + final Map contexts, final boolean matchAllContexts) { final Automaton matchAllAutomaton = Operations.repeat(Automata.makeAnyString()); final Automaton sep = Automata.makeChar(ContextSuggestField.CONTEXT_SEPARATOR); if (matchAllContexts || contexts.size() == 0) { @@ -239,21 +239,15 @@ public class ContextQuery extends CompletionQuery implements Accountable { } } - /** - * Holder for context value meta data - */ + /** Holder for context value meta data */ private static class ContextMetaData { - /** - * Boost associated with a - * context value - */ + /** Boost associated with a context value */ private final float boost; /** - * flag to indicate whether the context - * value should be treated as an exact - * value or a context prefix + * flag to indicate whether the context value should be treated as an exact value or a context + * prefix */ private final boolean exact; @@ -273,9 +267,13 @@ public class ContextQuery extends CompletionQuery implements Accountable { private float currentBoost; private CharSequence currentContext; - public ContextCompletionWeight(CompletionQuery query, Automaton automaton, CompletionWeight innerWeight, - Map contextMap, - int[] contextLengths) throws IOException { + public ContextCompletionWeight( + CompletionQuery query, + Automaton automaton, + CompletionWeight innerWeight, + Map contextMap, + int[] contextLengths) + throws IOException { super(query, automaton); this.contextMap = contextMap; this.contextLengths = contextLengths; @@ -320,7 +318,8 @@ public class ContextQuery extends CompletionQuery implements Accountable { assert ref.offset < ref.length : "input should not end with the context separator"; if (ref.ints[i] == ConcatenateGraphFilter.SEP_LABEL) { ref.offset++; - assert ref.offset < ref.length : "input should not end with a context separator followed by SEP_LABEL"; + assert ref.offset < ref.length + : "input should not end with a context separator followed by SEP_LABEL"; } ref.length = ref.length - ref.offset; refBuilder.copyInts(ref.ints, ref.offset, ref.length); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java index cf462e1dbc8..1b43e192069 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java @@ -21,34 +21,31 @@ import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.Set; - import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; /** - * {@link SuggestField} which additionally takes in a set of - * contexts. Example usage of adding a suggestion with contexts is as follows: + * {@link SuggestField} which additionally takes in a set of contexts. Example usage of adding a + * suggestion with contexts is as follows: * *

      *  document.add(
      *   new ContextSuggestField(name, "suggestion", Arrays.asList("context1", "context2"),  4));
      * 
    * - * Use {@link ContextQuery} to boost and/or filter suggestions - * at query-time. Use {@link PrefixCompletionQuery}, {@link RegexCompletionQuery} - * or {@link FuzzyCompletionQuery} if context boost/filtering - * are not needed. + * Use {@link ContextQuery} to boost and/or filter suggestions at query-time. Use {@link + * PrefixCompletionQuery}, {@link RegexCompletionQuery} or {@link FuzzyCompletionQuery} if context + * boost/filtering are not needed. * * @lucene.experimental */ public class ContextSuggestField extends SuggestField { - /** - * Separator used between context value and the suggest field value - */ + /** Separator used between context value and the suggest field value */ public static final int CONTEXT_SEPARATOR = '\u001D'; + static final byte TYPE = 1; private final Set contexts; @@ -60,10 +57,8 @@ public class ContextSuggestField extends SuggestField { * @param value field value to get suggestion on * @param weight field weight * @param contexts associated contexts - * - * @throws IllegalArgumentException if either the name or value is null, - * if value is an empty string, if the weight is negative, if value or - * contexts contains any reserved characters + * @throws IllegalArgumentException if either the name or value is null, if value is an empty + * string, if the weight is negative, if value or contexts contains any reserved characters */ public ContextSuggestField(String name, String value, int weight, CharSequence... contexts) { super(name, value, weight); @@ -74,10 +69,7 @@ public class ContextSuggestField extends SuggestField { } } - /** - * Expert: Sub-classes can inject contexts at - * index-time - */ + /** Expert: Sub-classes can inject contexts at index-time */ protected Iterable contexts() { return contexts; } @@ -90,15 +82,21 @@ public class ContextSuggestField extends SuggestField { } CompletionTokenStream completionTokenStream; if (stream instanceof CompletionTokenStream) { - //TODO this is awkward; is there a better way avoiding re-creating the chain? + // TODO this is awkward; is there a better way avoiding re-creating the chain? completionTokenStream = (CompletionTokenStream) stream; - PrefixTokenFilter prefixTokenFilter = new PrefixTokenFilter(completionTokenStream.inputTokenStream, (char) CONTEXT_SEPARATOR, contexts); - completionTokenStream = new CompletionTokenStream(prefixTokenFilter, - completionTokenStream.preserveSep, - completionTokenStream.preservePositionIncrements, - completionTokenStream.maxGraphExpansions); + PrefixTokenFilter prefixTokenFilter = + new PrefixTokenFilter( + completionTokenStream.inputTokenStream, (char) CONTEXT_SEPARATOR, contexts); + completionTokenStream = + new CompletionTokenStream( + prefixTokenFilter, + completionTokenStream.preserveSep, + completionTokenStream.preservePositionIncrements, + completionTokenStream.maxGraphExpansions); } else { - completionTokenStream = new CompletionTokenStream(new PrefixTokenFilter(stream, (char) CONTEXT_SEPARATOR, contexts)); + completionTokenStream = + new CompletionTokenStream( + new PrefixTokenFilter(stream, (char) CONTEXT_SEPARATOR, contexts)); } return completionTokenStream; } @@ -109,14 +107,15 @@ public class ContextSuggestField extends SuggestField { } /** - * The {@link PrefixTokenFilter} wraps a {@link TokenStream} and adds a set - * prefixes ahead. The position attribute will not be incremented for the prefixes. + * The {@link PrefixTokenFilter} wraps a {@link TokenStream} and adds a set prefixes ahead. The + * position attribute will not be incremented for the prefixes. */ private static final class PrefixTokenFilter extends TokenFilter { private final char separator; private final CharTermAttribute termAttr = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posAttr = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posAttr = + addAttribute(PositionIncrementAttribute.class); private final Iterable prefixes; private Iterator currentPrefix; @@ -166,8 +165,14 @@ public class ContextSuggestField extends SuggestField { private void validate(final CharSequence value) { for (int i = 0; i < value.length(); i++) { if (CONTEXT_SEPARATOR == value.charAt(i)) { - throw new IllegalArgumentException("Illegal value [" + value + "] UTF-16 codepoint [0x" - + Integer.toHexString((int) value.charAt(i))+ "] at position " + i + " is a reserved character"); + throw new IllegalArgumentException( + "Illegal value [" + + value + + "] UTF-16 codepoint [0x" + + Integer.toHexString((int) value.charAt(i)) + + "] at position " + + i + + " is a reserved character"); } } } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java index 14479fecd12..16bbd616ed8 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; @@ -38,14 +37,14 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.UTF32ToUTF8; /** - * A {@link CompletionQuery} that match documents containing terms - * within an edit distance of the specified prefix. - *

    - * This query boost documents relative to how similar the indexed terms are to the - * provided prefix. - *

    - * Example usage of querying an analyzed prefix within an edit distance of 1 of 'subg' - * against a field 'suggest_field' is as follows: + * A {@link CompletionQuery} that match documents containing terms within an edit distance of the + * specified prefix. + * + *

    This query boost documents relative to how similar the indexed terms are to the provided + * prefix. + * + *

    Example usage of querying an analyzed prefix within an edit distance of 1 of 'subg' against a + * field 'suggest_field' is as follows: * *

      *  CompletionQuery query = new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "subg"));
    @@ -56,31 +55,21 @@ import org.apache.lucene.util.automaton.UTF32ToUTF8;
     public class FuzzyCompletionQuery extends PrefixCompletionQuery {
     
       /**
    -   * Measure maxEdits, minFuzzyLength, transpositions and nonFuzzyPrefix
    -   * parameters in Unicode code points (actual letters)
    -   * instead of bytes.
    -   * */
    +   * Measure maxEdits, minFuzzyLength, transpositions and nonFuzzyPrefix parameters in Unicode code
    +   * points (actual letters) instead of bytes.
    +   */
       public static final boolean DEFAULT_UNICODE_AWARE = false;
     
    -  /**
    -   * The default minimum length of the key before any edits are allowed.
    -   */
    +  /** The default minimum length of the key before any edits are allowed. */
       public static final int DEFAULT_MIN_FUZZY_LENGTH = 3;
     
    -  /**
    -   * The default prefix length where edits are not allowed.
    -   */
    +  /** The default prefix length where edits are not allowed. */
       public static final int DEFAULT_NON_FUZZY_PREFIX = 1;
     
    -  /**
    -   * The default maximum number of edits for fuzzy
    -   * suggestions.
    -   */
    +  /** The default maximum number of edits for fuzzy suggestions. */
       public static final int DEFAULT_MAX_EDITS = 1;
     
    -  /**
    -   * The default transposition value passed to {@link LevenshteinAutomata}
    -   */
    +  /** The default transposition value passed to {@link LevenshteinAutomata} */
       public static final boolean DEFAULT_TRANSPOSITIONS = true;
     
       private final int maxEdits;
    @@ -91,37 +80,41 @@ public class FuzzyCompletionQuery extends PrefixCompletionQuery {
       private final int maxDeterminizedStates;
     
       /**
    -   * Calls {@link FuzzyCompletionQuery#FuzzyCompletionQuery(Analyzer, Term, BitsProducer)}
    -   * with no filter
    +   * Calls {@link FuzzyCompletionQuery#FuzzyCompletionQuery(Analyzer, Term, BitsProducer)} with no
    +   * filter
        */
       public FuzzyCompletionQuery(Analyzer analyzer, Term term) {
         this(analyzer, term, null);
       }
     
       /**
    -   * Calls {@link FuzzyCompletionQuery#FuzzyCompletionQuery(Analyzer, Term, BitsProducer,
    -   * int, boolean, int, int, boolean, int)}
    -   * with defaults for maxEdits, transpositions,
    -   * nonFuzzyPrefix, minFuzzyLength,
    -   * unicodeAware and maxDeterminizedStates
    -   *
    -   * See {@link #DEFAULT_MAX_EDITS}, {@link #DEFAULT_TRANSPOSITIONS},
    -   * {@link #DEFAULT_NON_FUZZY_PREFIX}, {@link #DEFAULT_MIN_FUZZY_LENGTH},
    -   * {@link #DEFAULT_UNICODE_AWARE} and {@link Operations#DEFAULT_MAX_DETERMINIZED_STATES}
    -   * for defaults
    +   * Calls {@link FuzzyCompletionQuery#FuzzyCompletionQuery(Analyzer, Term, BitsProducer, int,
    +   * boolean, int, int, boolean, int)} with defaults for maxEdits, transpositions
    +   * , nonFuzzyPrefix, minFuzzyLength, unicodeAware
    +   * and maxDeterminizedStates See {@link #DEFAULT_MAX_EDITS}, {@link
    +   * #DEFAULT_TRANSPOSITIONS}, {@link #DEFAULT_NON_FUZZY_PREFIX}, {@link #DEFAULT_MIN_FUZZY_LENGTH},
    +   * {@link #DEFAULT_UNICODE_AWARE} and {@link Operations#DEFAULT_MAX_DETERMINIZED_STATES} for
    +   * defaults
        */
       public FuzzyCompletionQuery(Analyzer analyzer, Term term, BitsProducer filter) {
    -    this(analyzer, term, filter, DEFAULT_MAX_EDITS, DEFAULT_TRANSPOSITIONS, DEFAULT_NON_FUZZY_PREFIX,
    -        DEFAULT_MIN_FUZZY_LENGTH, DEFAULT_UNICODE_AWARE, Operations.DEFAULT_MAX_DETERMINIZED_STATES
    -    );
    +    this(
    +        analyzer,
    +        term,
    +        filter,
    +        DEFAULT_MAX_EDITS,
    +        DEFAULT_TRANSPOSITIONS,
    +        DEFAULT_NON_FUZZY_PREFIX,
    +        DEFAULT_MIN_FUZZY_LENGTH,
    +        DEFAULT_UNICODE_AWARE,
    +        Operations.DEFAULT_MAX_DETERMINIZED_STATES);
       }
     
       /**
        * Constructs an analyzed fuzzy prefix completion query
        *
        * @param analyzer used to analyze the provided {@link Term#text()}
    -   * @param term query is run against {@link Term#field()} and {@link Term#text()}
    -   *             is analyzed with analyzer
    +   * @param term query is run against {@link Term#field()} and {@link Term#text()} is analyzed with
    +   *     analyzer
        * @param filter used to query on a sub set of documents
        * @param maxEdits maximum number of acceptable edits
        * @param transpositions value passed to {@link LevenshteinAutomata}
    @@ -130,9 +123,16 @@ public class FuzzyCompletionQuery extends PrefixCompletionQuery {
        * @param unicodeAware treat prefix as unicode rather than bytes
        * @param maxDeterminizedStates maximum automaton states allowed for {@link LevenshteinAutomata}
        */
    -  public FuzzyCompletionQuery(Analyzer analyzer, Term term, BitsProducer filter, int maxEdits,
    -                              boolean transpositions, int nonFuzzyPrefix, int minFuzzyLength,
    -                              boolean unicodeAware, int maxDeterminizedStates) {
    +  public FuzzyCompletionQuery(
    +      Analyzer analyzer,
    +      Term term,
    +      BitsProducer filter,
    +      int maxEdits,
    +      boolean transpositions,
    +      int nonFuzzyPrefix,
    +      int minFuzzyLength,
    +      boolean unicodeAware,
    +      int maxDeterminizedStates) {
         super(analyzer, term, filter);
         this.maxEdits = maxEdits;
         this.transpositions = transpositions;
    @@ -143,9 +143,11 @@ public class FuzzyCompletionQuery extends PrefixCompletionQuery {
       }
     
       @Override
    -  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
    +  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
    +      throws IOException {
         final Automaton originalAutomata;
    -    try (CompletionTokenStream stream = (CompletionTokenStream) analyzer.tokenStream(getField(), getTerm().text()) ) {
    +    try (CompletionTokenStream stream =
    +        (CompletionTokenStream) analyzer.tokenStream(getField(), getTerm().text())) {
           originalAutomata = stream.toAutomaton(unicodeAware);
         }
         Set refs = new HashSet<>();
    @@ -163,7 +165,7 @@ public class FuzzyCompletionQuery extends PrefixCompletionQuery {
       private Automaton toLevenshteinAutomata(Automaton automaton, Set refs) {
         List subs = new ArrayList<>();
         FiniteStringsIterator finiteStrings = new FiniteStringsIterator(automaton);
    -    for (IntsRef string; (string = finiteStrings.next()) != null;) {
    +    for (IntsRef string; (string = finiteStrings.next()) != null; ) {
           refs.add(IntsRef.deepCopyOf(string));
     
           if (string.length <= nonFuzzyPrefix || string.length < minFuzzyLength) {
    @@ -176,11 +178,12 @@ public class FuzzyCompletionQuery extends PrefixCompletionQuery {
             // to allow the trailing dedup bytes to be
             // edited... but then 0 byte is "in general" allowed
             // on input (but not in UTF8).
    -        LevenshteinAutomata lev = new LevenshteinAutomata(ints,
    -            unicodeAware ? Character.MAX_CODE_POINT : 255,
    -            transpositions);
    -        subs.add(lev.toAutomaton(maxEdits,
    -            UnicodeUtil.newString(string.ints, string.offset, nonFuzzyPrefix)));
    +        LevenshteinAutomata lev =
    +            new LevenshteinAutomata(
    +                ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions);
    +        subs.add(
    +            lev.toAutomaton(
    +                maxEdits, UnicodeUtil.newString(string.ints, string.offset, nonFuzzyPrefix)));
           }
         }
     
    @@ -200,44 +203,32 @@ public class FuzzyCompletionQuery extends PrefixCompletionQuery {
         }
       }
     
    -  /**
    -   * Get the maximum edit distance for fuzzy matches
    -   */
    +  /** Get the maximum edit distance for fuzzy matches */
       public int getMaxEdits() {
         return maxEdits;
       }
     
    -  /**
    -   * Return whether transpositions count as a single edit
    -   */
    +  /** Return whether transpositions count as a single edit */
       public boolean isTranspositions() {
         return transpositions;
       }
     
    -  /**
    -   * Get the length of a prefix where no edits are permitted
    -   */
    +  /** Get the length of a prefix where no edits are permitted */
       public int getNonFuzzyPrefix() {
         return nonFuzzyPrefix;
       }
     
    -  /**
    -   * Get the minimum length of a term considered for matching
    -   */
    +  /** Get the minimum length of a term considered for matching */
       public int getMinFuzzyLength() {
         return minFuzzyLength;
       }
     
    -  /**
    -   * Return true if lengths are measured in unicode code-points rather than bytes
    -   */
    +  /** Return true if lengths are measured in unicode code-points rather than bytes */
       public boolean isUnicodeAware() {
         return unicodeAware;
       }
     
    -  /**
    -   * Get the maximum number of determinized states permitted
    -   */
    +  /** Get the maximum number of determinized states permitted */
       public int getMaxDeterminizedStates() {
         return maxDeterminizedStates;
       }
    @@ -265,7 +256,8 @@ public class FuzzyCompletionQuery extends PrefixCompletionQuery {
         private final Set refs;
         int currentBoost = 0;
     
    -    public FuzzyCompletionWeight(CompletionQuery query, Automaton automaton, Set refs) throws IOException {
    +    public FuzzyCompletionWeight(CompletionQuery query, Automaton automaton, Set refs)
    +        throws IOException {
           super(query, automaton);
           this.refs = refs;
         }
    diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
    index b62d9b900cd..47a80350aa2 100644
    --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
    +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
    @@ -16,12 +16,13 @@
      */
     package org.apache.lucene.search.suggest.document;
     
    +import static org.apache.lucene.search.suggest.document.NRTSuggester.PayLoadProcessor.parseSurfaceForm;
    +
     import java.io.IOException;
     import java.util.Collection;
     import java.util.Collections;
     import java.util.Comparator;
     import java.util.List;
    -
     import org.apache.lucene.search.suggest.analyzing.FSTUtil;
     import org.apache.lucene.search.suggest.document.CompletionPostingsFormat.FSTLoadMode;
     import org.apache.lucene.store.ByteArrayDataInput;
    @@ -35,34 +36,34 @@ import org.apache.lucene.util.CharsRefBuilder;
     import org.apache.lucene.util.fst.ByteSequenceOutputs;
     import org.apache.lucene.util.fst.FST;
     import org.apache.lucene.util.fst.OffHeapFSTStore;
    -import org.apache.lucene.util.fst.PairOutputs.Pair;
     import org.apache.lucene.util.fst.PairOutputs;
    +import org.apache.lucene.util.fst.PairOutputs.Pair;
     import org.apache.lucene.util.fst.PositiveIntOutputs;
     import org.apache.lucene.util.fst.Util;
     
    -import static org.apache.lucene.search.suggest.document.NRTSuggester.PayLoadProcessor.parseSurfaceForm;
    -
     /**
    - * 

    * NRTSuggester executes Top N search on a weighted FST specified by a {@link CompletionScorer} - *

    - * See {@link #lookup(CompletionScorer, Bits, TopSuggestDocsCollector)} for more implementation + * + *

    See {@link #lookup(CompletionScorer, Bits, TopSuggestDocsCollector)} for more implementation * details. - *

    - * FST Format: + * + *

    FST Format: + * *

      - *
    • Input: analyzed forms of input terms
    • - *
    • Output: Pair<Long, BytesRef> containing weight, surface form and docID
    • + *
    • Input: analyzed forms of input terms + *
    • Output: Pair<Long, BytesRef> containing weight, surface form and docID *
    - *

    - * NOTE: + * + *

    NOTE: + * *

      - *
    • having too many deletions or using a very restrictive filter can make the search inadmissible due to - * over-pruning of potential paths. See {@link CompletionScorer#accept(int, Bits)}
    • - *
    • when matched documents are arbitrarily filtered ({@link CompletionScorer#filtered} set to true, - * it is assumed that the filter will roughly filter out half the number of documents that match - * the provided automaton
    • - *
    • lookup performance will degrade as more accepted completions lead to filtered out documents
    • + *
    • having too many deletions or using a very restrictive filter can make the search + * inadmissible due to over-pruning of potential paths. See {@link + * CompletionScorer#accept(int, Bits)} + *
    • when matched documents are arbitrarily filtered ({@link CompletionScorer#filtered} set to + * true, it is assumed that the filter will roughly filter out half the number of + * documents that match the provided automaton + *
    • lookup performance will degrade as more accepted completions lead to filtered out documents *
    * * @lucene.experimental @@ -70,36 +71,30 @@ import static org.apache.lucene.search.suggest.document.NRTSuggester.PayLoadProc public final class NRTSuggester implements Accountable { /** - * FST: - * input is the analyzed form, with a null byte between terms - * and a {@link NRTSuggesterBuilder#END_BYTE} to denote the - * end of the input - * weight is a long - * surface is the original, unanalyzed form followed by the docID + * FST: input is the analyzed form, with a null byte between terms and a {@link + * NRTSuggesterBuilder#END_BYTE} to denote the end of the input weight is a long surface is the + * original, unanalyzed form followed by the docID */ private final FST> fst; /** - * Highest number of analyzed paths we saw for any single - * input surface form. This can be > 1, when index analyzer - * creates graphs or if multiple surface form(s) yields the - * same analyzed form + * Highest number of analyzed paths we saw for any single input surface form. This can be > 1, + * when index analyzer creates graphs or if multiple surface form(s) yields the same analyzed form */ private final int maxAnalyzedPathsPerOutput; - /** - * Separator used between surface form and its docID in the FST output - */ + /** Separator used between surface form and its docID in the FST output */ private final int payloadSep; /** * Maximum queue depth for TopNSearcher * - * NOTE: value should be <= Integer.MAX_VALUE + *

    NOTE: value should be <= Integer.MAX_VALUE */ private static final long MAX_TOP_N_QUEUE_SIZE = 5000; - private NRTSuggester(FST> fst, int maxAnalyzedPathsPerOutput, int payloadSep) { + private NRTSuggester( + FST> fst, int maxAnalyzedPathsPerOutput, int payloadSep) { this.fst = fst; this.maxAnalyzedPathsPerOutput = maxAnalyzedPathsPerOutput; this.payloadSep = payloadSep; @@ -116,23 +111,27 @@ public final class NRTSuggester implements Accountable { } /** - * Collects at most {@link TopSuggestDocsCollector#getCountToCollect()} completions that - * match the provided {@link CompletionScorer}. - *

    - * The {@link CompletionScorer#automaton} is intersected with the {@link #fst}. - * {@link CompletionScorer#weight} is used to compute boosts and/or extract context - * for each matched partial paths. A top N search is executed on {@link #fst} seeded with - * the matched partial paths. Upon reaching a completed path, {@link CompletionScorer#accept(int, Bits)} - * and {@link CompletionScorer#score(float, float)} is used on the document id, index weight - * and query boost to filter and score the entry, before being collected via - * {@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)} + * Collects at most {@link TopSuggestDocsCollector#getCountToCollect()} completions that match the + * provided {@link CompletionScorer}. + * + *

    The {@link CompletionScorer#automaton} is intersected with the {@link #fst}. {@link + * CompletionScorer#weight} is used to compute boosts and/or extract context for each matched + * partial paths. A top N search is executed on {@link #fst} seeded with the matched partial + * paths. Upon reaching a completed path, {@link CompletionScorer#accept(int, Bits)} and {@link + * CompletionScorer#score(float, float)} is used on the document id, index weight and query boost + * to filter and score the entry, before being collected via {@link + * TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)} */ - public void lookup(final CompletionScorer scorer, Bits acceptDocs, final TopSuggestDocsCollector collector) throws IOException { - final double liveDocsRatio = calculateLiveDocRatio(scorer.reader.numDocs(), scorer.reader.maxDoc()); + public void lookup( + final CompletionScorer scorer, Bits acceptDocs, final TopSuggestDocsCollector collector) + throws IOException { + final double liveDocsRatio = + calculateLiveDocRatio(scorer.reader.numDocs(), scorer.reader.maxDoc()); if (liveDocsRatio == -1) { return; } - final List>> prefixPaths = FSTUtil.intersectPrefixPaths(scorer.automaton, fst); + final List>> prefixPaths = + FSTUtil.intersectPrefixPaths(scorer.automaton, fst); // The topN is increased by a factor of # of intersected path // to ensure search admissibility. For example, one suggestion can // have multiple contexts, resulting in num_context paths for the @@ -143,99 +142,113 @@ public final class NRTSuggester implements Accountable { // have been collected, regardless of the set topN value. This value is the // maximum number of suggestions that can be collected. final int topN = collector.getCountToCollect() * prefixPaths.size(); - final int queueSize = getMaxTopNSearcherQueueSize(topN, scorer.reader.numDocs(), liveDocsRatio, scorer.filtered); + final int queueSize = + getMaxTopNSearcherQueueSize(topN, scorer.reader.numDocs(), liveDocsRatio, scorer.filtered); final CharsRefBuilder spare = new CharsRefBuilder(); Comparator> comparator = getComparator(); - Util.TopNSearcher> searcher = new Util.TopNSearcher>(fst, topN, queueSize, comparator, - new ScoringPathComparator(scorer)) { + Util.TopNSearcher> searcher = + new Util.TopNSearcher>( + fst, topN, queueSize, comparator, new ScoringPathComparator(scorer)) { - private final ByteArrayDataInput scratchInput = new ByteArrayDataInput(); + private final ByteArrayDataInput scratchInput = new ByteArrayDataInput(); - @Override - protected boolean acceptPartialPath(Util.FSTPath> path) { - if (collector.doSkipDuplicates()) { - // We are removing dups - if (path.payload == -1) { - // This path didn't yet see the complete surface form; let's see if it just did with the arc output we just added: - BytesRef arcOutput = path.arc.output().output2; - BytesRef output = path.output.output2; - for(int i=0;i> path) { + if (collector.doSkipDuplicates()) { + // We are removing dups + if (path.payload == -1) { + // This path didn't yet see the complete surface form; let's see if it just did with + // the arc output we just added: + BytesRef arcOutput = path.arc.output().output2; + BytesRef output = path.output.output2; + for (int i = 0; i < arcOutput.length; i++) { + if (arcOutput.bytes[arcOutput.offset + i] == payloadSep) { + // OK this arc that the path was just extended by contains the payloadSep, so we + // now have a full surface form in this path + path.payload = output.length - arcOutput.length + i; + assert output.bytes[output.offset + path.payload] == payloadSep; + break; + } + } + } + + if (path.payload != -1) { + BytesRef output = path.output.output2; + spare.copyUTF8Bytes(output.bytes, output.offset, path.payload); + if (collector.seenSurfaceForms.contains(spare.chars(), 0, spare.length())) { + return false; + } } } + return true; } - if (path.payload != -1) { + @Override + protected boolean acceptResult(Util.FSTPath> path) { BytesRef output = path.output.output2; - spare.copyUTF8Bytes(output.bytes, output.offset, path.payload); - if (collector.seenSurfaceForms.contains(spare.chars(), 0, spare.length())) { + int payloadSepIndex; + if (path.payload != -1) { + payloadSepIndex = path.payload; + spare.copyUTF8Bytes(output.bytes, output.offset, payloadSepIndex); + } else { + assert collector.doSkipDuplicates() == false; + payloadSepIndex = parseSurfaceForm(output, payloadSep, spare); + } + + scratchInput.reset( + output.bytes, + output.offset + payloadSepIndex + 1, + output.length - payloadSepIndex - 1); + int docID = scratchInput.readVInt(); + + if (!scorer.accept(docID, acceptDocs)) { return false; } + if (collector.doSkipDuplicates()) { + // now record that we've seen this surface form: + char[] key = new char[spare.length()]; + System.arraycopy(spare.chars(), 0, key, 0, spare.length()); + if (collector.seenSurfaceForms.contains(key)) { + // we already collected a higher scoring document with this key, in this segment: + return false; + } + collector.seenSurfaceForms.add(key); + } + try { + float score = scorer.score(decode(path.output.output1), path.boost); + collector.collect(docID, spare.toCharsRef(), path.context, score); + return true; + } catch (IOException e) { + throw new RuntimeException(e); + } } - } - return true; - } - - @Override - protected boolean acceptResult(Util.FSTPath> path) { - BytesRef output = path.output.output2; - int payloadSepIndex; - if (path.payload != -1) { - payloadSepIndex = path.payload; - spare.copyUTF8Bytes(output.bytes, output.offset, payloadSepIndex); - } else { - assert collector.doSkipDuplicates() == false; - payloadSepIndex = parseSurfaceForm(output, payloadSep, spare); - } - - scratchInput.reset(output.bytes, output.offset + payloadSepIndex + 1, output.length - payloadSepIndex - 1); - int docID = scratchInput.readVInt(); - - if (!scorer.accept(docID, acceptDocs)) { - return false; - } - if (collector.doSkipDuplicates()) { - // now record that we've seen this surface form: - char[] key = new char[spare.length()]; - System.arraycopy(spare.chars(), 0, key, 0, spare.length()); - if (collector.seenSurfaceForms.contains(key)) { - // we already collected a higher scoring document with this key, in this segment: - return false; - } - collector.seenSurfaceForms.add(key); - } - try { - float score = scorer.score(decode(path.output.output1), path.boost); - collector.collect(docID, spare.toCharsRef(), path.context, score); - return true; - } catch (IOException e) { - throw new RuntimeException(e); - } - } - }; + }; for (FSTUtil.Path> path : prefixPaths) { scorer.weight.setNextMatch(path.input.get()); BytesRef output = path.output.output2; int payload = -1; if (collector.doSkipDuplicates()) { - for(int j=0;j>> { + private static class ScoringPathComparator + implements Comparator>> { private final CompletionScorer scorer; public ScoringPathComparator(CompletionScorer scorer) { @@ -257,9 +271,12 @@ public final class NRTSuggester implements Accountable { } @Override - public int compare(Util.FSTPath> first, Util.FSTPath> second) { - int cmp = Float.compare(scorer.score(decode(second.output.output1), second.boost), - scorer.score(decode(first.output.output1), first.boost)); + public int compare( + Util.FSTPath> first, Util.FSTPath> second) { + int cmp = + Float.compare( + scorer.score(decode(second.output.output1), second.boost), + scorer.score(decode(first.output.output1), first.boost)); return (cmp != 0) ? cmp : first.input.get().compareTo(second.input.get()); } } @@ -274,25 +291,25 @@ public final class NRTSuggester implements Accountable { } /** - * Simple heuristics to try to avoid over-pruning potential suggestions by the - * TopNSearcher. Since suggestion entries can be rejected if they belong - * to a deleted document, the length of the TopNSearcher queue has to - * be increased by some factor, to account for the filtered out suggestions. - * This heuristic will try to make the searcher admissible, but the search - * can still lead to over-pruning - *

    - * If a filter is applied, the queue size is increased by - * half the number of live documents. - *

    - * The maximum queue size is {@link #MAX_TOP_N_QUEUE_SIZE} + * Simple heuristics to try to avoid over-pruning potential suggestions by the TopNSearcher. Since + * suggestion entries can be rejected if they belong to a deleted document, the length of the + * TopNSearcher queue has to be increased by some factor, to account for the filtered out + * suggestions. This heuristic will try to make the searcher admissible, but the search can still + * lead to over-pruning + * + *

    If a filter is applied, the queue size is increased by half the number of live + * documents. + * + *

    The maximum queue size is {@link #MAX_TOP_N_QUEUE_SIZE} */ - private int getMaxTopNSearcherQueueSize(int topN, int numDocs, double liveDocsRatio, boolean filterEnabled) { + private int getMaxTopNSearcherQueueSize( + int topN, int numDocs, double liveDocsRatio, boolean filterEnabled) { long maxQueueSize = topN * maxAnalyzedPathsPerOutput; // liveDocRatio can be at most 1.0 (if no docs were deleted) assert liveDocsRatio <= 1.0d; maxQueueSize = (long) (maxQueueSize / liveDocsRatio); if (filterEnabled) { - maxQueueSize = maxQueueSize + (numDocs/2); + maxQueueSize = maxQueueSize + (numDocs / 2); } return (int) Math.min(MAX_TOP_N_QUEUE_SIZE, maxQueueSize); } @@ -324,12 +341,21 @@ public final class NRTSuggester implements Accountable { OffHeapFSTStore store = new OffHeapFSTStore(); IndexInput clone = input.clone(); clone.seek(input.getFilePointer()); - fst = new FST<>(clone, clone, new PairOutputs<>( - PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()), store); + fst = + new FST<>( + clone, + clone, + new PairOutputs<>( + PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()), + store); input.seek(clone.getFilePointer() + store.size()); } else { - fst = new FST<>(input, input, new PairOutputs<>( - PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton())); + fst = + new FST<>( + input, + input, + new PairOutputs<>( + PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton())); } /* read some meta info */ @@ -351,16 +377,14 @@ public final class NRTSuggester implements Accountable { } static long decode(long output) { - assert output >= 0 && output <= Integer.MAX_VALUE : - "decoded output: " + output + " is not within 0 and Integer.MAX_VALUE"; + assert output >= 0 && output <= Integer.MAX_VALUE + : "decoded output: " + output + " is not within 0 and Integer.MAX_VALUE"; return Integer.MAX_VALUE - output; } - /** - * Helper to encode/decode payload (surface + PAYLOAD_SEP + docID) output - */ + /** Helper to encode/decode payload (surface + PAYLOAD_SEP + docID) output */ static final class PayLoadProcessor { - final static private int MAX_DOC_ID_LEN_WITH_SEP = 6; // vint takes at most 5 bytes + private static final int MAX_DOC_ID_LEN_WITH_SEP = 6; // vint takes at most 5 bytes static int parseSurfaceForm(final BytesRef output, int payloadSep, CharsRefBuilder spare) { int surfaceFormLen = -1; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggesterBuilder.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggesterBuilder.java index ca2934439c3..9123ab75528 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggesterBuilder.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggesterBuilder.java @@ -16,40 +16,30 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.search.suggest.document.NRTSuggester.encode; + import java.io.IOException; import java.util.PriorityQueue; - import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IntsRefBuilder; -import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.ByteSequenceOutputs; import org.apache.lucene.util.fst.FST; +import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.PairOutputs; import org.apache.lucene.util.fst.PositiveIntOutputs; import org.apache.lucene.util.fst.Util; -import static org.apache.lucene.search.suggest.document.NRTSuggester.encode; - -/** - * Builder for {@link NRTSuggester} - * - */ +/** Builder for {@link NRTSuggester} */ final class NRTSuggesterBuilder { - /** - * Label used to separate surface form and docID - * in the output - */ + /** Label used to separate surface form and docID in the output */ public static final int PAYLOAD_SEP = ConcatenateGraphFilter.SEP_LABEL; - /** - * Marks end of the analyzed input and start of dedup - * byte. - */ + /** Marks end of the analyzed input and start of dedup byte. */ public static final int END_BYTE = 0x0; private final PairOutputs outputs; @@ -62,37 +52,32 @@ final class NRTSuggesterBuilder { private int maxAnalyzedPathsPerOutput = 0; - /** - * Create a builder for {@link NRTSuggester} - */ + /** Create a builder for {@link NRTSuggester} */ public NRTSuggesterBuilder() { this.payloadSep = PAYLOAD_SEP; this.endByte = END_BYTE; - this.outputs = new PairOutputs<>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()); + this.outputs = + new PairOutputs<>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()); this.entries = new PriorityQueue<>(); this.fstCompiler = new FSTCompiler<>(FST.INPUT_TYPE.BYTE1, outputs); } - /** - * Initializes an FST input term to add entries against - */ + /** Initializes an FST input term to add entries against */ public void startTerm(BytesRef analyzed) { this.analyzed.copyBytes(analyzed); this.analyzed.append((byte) endByte); } /** - * Adds an entry for the latest input term, should be called after - * {@link #startTerm(org.apache.lucene.util.BytesRef)} on the desired input + * Adds an entry for the latest input term, should be called after {@link + * #startTerm(org.apache.lucene.util.BytesRef)} on the desired input */ public void addEntry(int docID, BytesRef surfaceForm, long weight) throws IOException { BytesRef payloadRef = NRTSuggester.PayLoadProcessor.make(surfaceForm, docID, payloadSep); entries.add(new Entry(payloadRef, encode(weight))); } - /** - * Writes all the entries for the FST input term - */ + /** Writes all the entries for the FST input term */ public void finishTerm() throws IOException { int numArcs = 0; int numDedupBytes = 1; @@ -115,8 +100,8 @@ final class NRTSuggesterBuilder { } /** - * Builds and stores a FST that can be loaded with - * {@link NRTSuggester#load(IndexInput, CompletionPostingsFormat.FSTLoadMode)})} + * Builds and stores a FST that can be loaded with {@link NRTSuggester#load(IndexInput, + * CompletionPostingsFormat.FSTLoadMode)})} */ public boolean store(DataOutput output) throws IOException { final FST> fst = fstCompiler.compile(); @@ -134,12 +119,9 @@ final class NRTSuggesterBuilder { } /** - * Num arcs for nth dedup byte: - * if n <= 5: 1 + (2 * n) - * else: (1 + (2 * n)) * n - *

    - * TODO: is there a better way to make the fst built to be - * more TopNSearcher friendly? + * Num arcs for nth dedup byte: if n <= 5: 1 + (2 * n) else: (1 + (2 * n)) * n + * + *

    TODO: is there a better way to make the fst built to be more TopNSearcher friendly? */ private static int maxNumArcsForDedupByte(int currentNumDedupBytes) { int maxArcs = 1 + (2 * currentNumDedupBytes); @@ -149,7 +131,7 @@ final class NRTSuggesterBuilder { return Math.min(maxArcs, 255); } - private final static class Entry implements Comparable { + private static final class Entry implements Comparable { final BytesRef payload; final long weight; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java index 896d9c8f15e..1d10d8bcf1c 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; @@ -27,15 +26,16 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.suggest.BitsProducer; /** - * A {@link CompletionQuery} which takes an {@link Analyzer} - * to analyze the prefix of the query term. - *

    - * Example usage of querying an analyzed prefix 'sugg' - * against a field 'suggest_field' is as follows: + * A {@link CompletionQuery} which takes an {@link Analyzer} to analyze the prefix of the query + * term. + * + *

    Example usage of querying an analyzed prefix 'sugg' against a field 'suggest_field' is as + * follows: * *

      *  CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"));
      * 
    + * * @lucene.experimental */ public class PrefixCompletionQuery extends CompletionQuery { @@ -43,8 +43,8 @@ public class PrefixCompletionQuery extends CompletionQuery { protected final CompletionAnalyzer analyzer; /** - * Calls {@link PrefixCompletionQuery#PrefixCompletionQuery(Analyzer, Term, BitsProducer)} - * with no filter + * Calls {@link PrefixCompletionQuery#PrefixCompletionQuery(Analyzer, Term, BitsProducer)} with no + * filter */ public PrefixCompletionQuery(Analyzer analyzer, Term term) { this(analyzer, term, null); @@ -54,8 +54,8 @@ public class PrefixCompletionQuery extends CompletionQuery { * Constructs an analyzed prefix completion query * * @param analyzer used to analyze the provided {@link Term#text()} - * @param term query is run against {@link Term#field()} and {@link Term#text()} - * is analyzed with analyzer + * @param term query is run against {@link Term#field()} and {@link Term#text()} is analyzed with + * analyzer * @param filter used to query on a sub set of documents */ public PrefixCompletionQuery(Analyzer analyzer, Term term, BitsProducer filter) { @@ -68,8 +68,10 @@ public class PrefixCompletionQuery extends CompletionQuery { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { - try (CompletionTokenStream stream = (CompletionTokenStream) analyzer.tokenStream(getField(), getTerm().text())) { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { + try (CompletionTokenStream stream = + (CompletionTokenStream) analyzer.tokenStream(getField(), getTerm().text())) { return new CompletionWeight(this, stream.toAutomaton()); } } @@ -78,9 +80,7 @@ public class PrefixCompletionQuery extends CompletionQuery { public void visit(QueryVisitor visitor) { visitor.visitLeaf(this); } - /** - * Gets the analyzer used to analyze the prefix. - */ + /** Gets the analyzer used to analyze the prefix. */ public Analyzer getAnalyzer() { return analyzer; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java index 4487f226a9b..c18e89b25c4 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; - import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.QueryVisitor; @@ -30,20 +29,16 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; /** - * A {@link CompletionQuery} which takes a regular expression - * as the prefix of the query term. + * A {@link CompletionQuery} which takes a regular expression as the prefix of the query term. * - *

    - * Example usage of querying a prefix of 'sug' and 'sub' - * as a regular expression against a suggest field 'suggest_field': + *

    Example usage of querying a prefix of 'sug' and 'sub' as a regular expression against a + * suggest field 'suggest_field': * *

      *  CompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "su[g|b]"));
      * 
    * - *

    - * See {@link RegExp} for the supported regular expression - * syntax + *

    See {@link RegExp} for the supported regular expression syntax * * @lucene.experimental */ @@ -52,25 +47,22 @@ public class RegexCompletionQuery extends CompletionQuery { private final int flags; private final int maxDeterminizedStates; - /** - * Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, BitsProducer)} - * with no filter - */ + /** Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, BitsProducer)} with no filter */ public RegexCompletionQuery(Term term) { this(term, null); } /** - * Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, int, int, BitsProducer)} - * enabling all optional regex syntax and maxDeterminizedStates of - * {@value Operations#DEFAULT_MAX_DETERMINIZED_STATES} + * Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, int, int, BitsProducer)} enabling + * all optional regex syntax and maxDeterminizedStates of {@value + * Operations#DEFAULT_MAX_DETERMINIZED_STATES} */ public RegexCompletionQuery(Term term, BitsProducer filter) { this(term, RegExp.ALL, Operations.DEFAULT_MAX_DETERMINIZED_STATES, filter); } /** - * Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, int, int, BitsProducer)} - * with no filter + * Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, int, int, BitsProducer)} with no + * filter */ public RegexCompletionQuery(Term term, int flags, int maxDeterminizedStates) { this(term, flags, maxDeterminizedStates, null); @@ -79,38 +71,37 @@ public class RegexCompletionQuery extends CompletionQuery { /** * Constructs a regular expression completion query * - * @param term query is run against {@link Term#field()} and {@link Term#text()} - * is interpreted as a regular expression + * @param term query is run against {@link Term#field()} and {@link Term#text()} is interpreted as + * a regular expression * @param flags used as syntax_flag in {@link RegExp#RegExp(String, int)} * @param maxDeterminizedStates used in {@link RegExp#toAutomaton(int)} * @param filter used to query on a sub set of documents */ - public RegexCompletionQuery(Term term, int flags, int maxDeterminizedStates, BitsProducer filter) { + public RegexCompletionQuery( + Term term, int flags, int maxDeterminizedStates, BitsProducer filter) { super(term, filter); this.flags = flags; this.maxDeterminizedStates = maxDeterminizedStates; } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { // If an empty regex is provided, we return an automaton that matches nothing. This ensures // consistency with PrefixCompletionQuery, which returns no results for an empty term. - Automaton automaton = getTerm().text().isEmpty() - ? Automata.makeEmpty() - : new RegExp(getTerm().text(), flags).toAutomaton(maxDeterminizedStates); + Automaton automaton = + getTerm().text().isEmpty() + ? Automata.makeEmpty() + : new RegExp(getTerm().text(), flags).toAutomaton(maxDeterminizedStates); return new CompletionWeight(this, automaton); } - /** - * Get the regex flags - */ + /** Get the regex flags */ public int getFlags() { return flags; } - /** - * Get the maximum number of states permitted in the determinized automaton - */ + /** Get the maximum number of states permitted in the determinized automaton */ public int getMaxDeterminizedStates() { return maxDeterminizedStates; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java index b2d24c2c84e..82484e8a595 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.suggest.document; import java.io.ByteArrayOutputStream; import java.io.IOException; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; @@ -29,26 +28,22 @@ import org.apache.lucene.store.OutputStreamDataOutput; import org.apache.lucene.util.BytesRef; /** - *

    - * Field that indexes a string value and a weight as a weighted completion - * against a named suggester. - * Field is tokenized, not stored and stores documents, frequencies and positions. - * Field can be used to provide near real time document suggestions. - *

    - *

    - * Besides the usual {@link org.apache.lucene.analysis.Analyzer}s, - * {@link CompletionAnalyzer} - * can be used to tune suggest field only parameters - * (e.g. preserving token separators, preserving position increments - * when converting the token stream to an automaton) - *

    - *

    - * Example indexing usage: + * Field that indexes a string value and a weight as a weighted completion against a named + * suggester. Field is tokenized, not stored and stores documents, frequencies and positions. Field + * can be used to provide near real time document suggestions. + * + *

    Besides the usual {@link org.apache.lucene.analysis.Analyzer}s, {@link CompletionAnalyzer} can + * be used to tune suggest field only parameters (e.g. preserving token separators, preserving + * position increments when converting the token stream to an automaton) + * + *

    Example indexing usage: + * *

      * document.add(new SuggestField(name, "suggestion", 4));
      * 
    - * To perform document suggestions based on the this field, use - * {@link SuggestIndexSearcher#suggest(CompletionQuery, int, boolean)} + * + * To perform document suggestions based on the this field, use {@link + * SuggestIndexSearcher#suggest(CompletionQuery, int, boolean)} * * @lucene.experimental */ @@ -56,6 +51,7 @@ public class SuggestField extends Field { /** Default field type for suggest field */ public static final FieldType FIELD_TYPE = new FieldType(); + static { FIELD_TYPE.setTokenized(true); FIELD_TYPE.setStored(false); @@ -73,13 +69,11 @@ public class SuggestField extends Field { /** * Creates a {@link SuggestField} * - * @param name field name - * @param value field value to get suggestions on + * @param name field name + * @param value field value to get suggestions on * @param weight field weight - * - * @throws IllegalArgumentException if either the name or value is null, - * if value is an empty string, if the weight is negative, if value contains - * any reserved characters + * @throws IllegalArgumentException if either the name or value is null, if value is an empty + * string, if the weight is negative, if value contains any reserved characters */ public SuggestField(String name, String value, int weight) { super(name, value, FIELD_TYPE); @@ -91,8 +85,14 @@ public class SuggestField extends Field { } for (int i = 0; i < value.length(); i++) { if (isReserved(value.charAt(i))) { - throw new IllegalArgumentException("Illegal input [" + value + "] UTF-16 codepoint [0x" - + Integer.toHexString((int) value.charAt(i))+ "] at position " + i + " is a reserved character"); + throw new IllegalArgumentException( + "Illegal input [" + + value + + "] UTF-16 codepoint [0x" + + Integer.toHexString((int) value.charAt(i)) + + "] at position " + + i + + " is a reserved character"); } } this.surfaceForm = new BytesRef(value); @@ -109,7 +109,7 @@ public class SuggestField extends Field { /** * Wraps a stream with a CompletionTokenStream. * - * Subclasses can override this method to change the indexing pipeline. + *

    Subclasses can override this method to change the indexing pipeline. */ protected CompletionTokenStream wrapTokenStream(TokenStream stream) { if (stream instanceof CompletionTokenStream) { @@ -119,9 +119,7 @@ public class SuggestField extends Field { } } - /** - * Returns a byte to denote the type of the field - */ + /** Returns a byte to denote the type of the field */ protected byte type() { return TYPE; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestIndexSearcher.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestIndexSearcher.java index 8d8d550a0c6..f58959c4735 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestIndexSearcher.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestIndexSearcher.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.document; import java.io.IOException; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BulkScorer; @@ -26,13 +25,12 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Weight; /** - * Adds document suggest capabilities to IndexSearcher. - * Any {@link CompletionQuery} can be used to suggest documents. + * Adds document suggest capabilities to IndexSearcher. Any {@link CompletionQuery} can be used to + * suggest documents. * - * Use {@link PrefixCompletionQuery} for analyzed prefix queries, - * {@link RegexCompletionQuery} for regular expression prefix queries, - * {@link FuzzyCompletionQuery} for analyzed prefix with typo tolerance - * and {@link ContextQuery} to boost and/or filter suggestions by contexts + *

    Use {@link PrefixCompletionQuery} for analyzed prefix queries, {@link RegexCompletionQuery} + * for regular expression prefix queries, {@link FuzzyCompletionQuery} for analyzed prefix with typo + * tolerance and {@link ContextQuery} to boost and/or filter suggestions by contexts * * @lucene.experimental */ @@ -41,30 +39,25 @@ public class SuggestIndexSearcher extends IndexSearcher { // NOTE: we do not accept an ExecutorService here, because at least the dedup // logic in TopSuggestDocsCollector/NRTSuggester would not be thread safe (and maybe other things) - /** - * Creates a searcher with document suggest capabilities - * for reader. - */ + /** Creates a searcher with document suggest capabilities for reader. */ public SuggestIndexSearcher(IndexReader reader) { super(reader); } - /** - * Returns top n completion hits for - * query - */ - public TopSuggestDocs suggest(CompletionQuery query, int n, boolean skipDuplicates) throws IOException { + /** Returns top n completion hits for query */ + public TopSuggestDocs suggest(CompletionQuery query, int n, boolean skipDuplicates) + throws IOException { TopSuggestDocsCollector collector = new TopSuggestDocsCollector(n, skipDuplicates); suggest(query, collector); return collector.get(); } /** - * Lower-level suggest API. - * Collects completion hits through collector for query. + * Lower-level suggest API. Collects completion hits through collector for + * query. * - *

    {@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)} - * is called for every matching completion hit. + *

    {@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)} is called + * for every matching completion hit. */ public void suggest(CompletionQuery query, TopSuggestDocsCollector collector) throws IOException { // TODO use IndexSearcher.rewrite instead diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestScoreDocPriorityQueue.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestScoreDocPriorityQueue.java index 9927cdd3062..fee4d35c4d2 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestScoreDocPriorityQueue.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestScoreDocPriorityQueue.java @@ -21,14 +21,11 @@ import org.apache.lucene.search.suggest.document.TopSuggestDocs.SuggestScoreDoc; import org.apache.lucene.util.PriorityQueue; /** - * Bounded priority queue for {@link SuggestScoreDoc}s. - * Priority is based on {@link SuggestScoreDoc#score} and tie - * is broken by {@link SuggestScoreDoc#doc} + * Bounded priority queue for {@link SuggestScoreDoc}s. Priority is based on {@link + * SuggestScoreDoc#score} and tie is broken by {@link SuggestScoreDoc#doc} */ final class SuggestScoreDocPriorityQueue extends PriorityQueue { - /** - * Creates a new priority queue of the specified size. - */ + /** Creates a new priority queue of the specified size. */ public SuggestScoreDocPriorityQueue(int size) { super(size); } @@ -44,9 +41,7 @@ final class SuggestScoreDocPriorityQueue extends PriorityQueue return a.score < b.score; } - /** - * Returns the top N results in descending order. - */ + /** Returns the top N results in descending order. */ public SuggestScoreDoc[] getResults() { int size = size(); SuggestScoreDoc[] res = new SuggestScoreDoc[size]; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocs.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocs.java index f05e32a8b69..b19c9f3950b 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocs.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocs.java @@ -22,39 +22,31 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.suggest.Lookup; /** - * {@link org.apache.lucene.search.TopDocs} wrapper with - * an additional CharSequence key per {@link org.apache.lucene.search.ScoreDoc} + * {@link org.apache.lucene.search.TopDocs} wrapper with an additional CharSequence key per {@link + * org.apache.lucene.search.ScoreDoc} * * @lucene.experimental */ public class TopSuggestDocs extends TopDocs { - /** - * Singleton for empty {@link TopSuggestDocs} - */ - public final static TopSuggestDocs EMPTY = new TopSuggestDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new SuggestScoreDoc[0]); + /** Singleton for empty {@link TopSuggestDocs} */ + public static final TopSuggestDocs EMPTY = + new TopSuggestDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new SuggestScoreDoc[0]); - /** - * {@link org.apache.lucene.search.ScoreDoc} with an - * additional CharSequence key - */ + /** {@link org.apache.lucene.search.ScoreDoc} with an additional CharSequence key */ public static class SuggestScoreDoc extends ScoreDoc implements Comparable { - /** - * Matched completion key - */ + /** Matched completion key */ public final CharSequence key; - /** - * Context for the completion - */ + /** Context for the completion */ public final CharSequence context; /** * Creates a SuggestScoreDoc instance * - * @param doc document id (hit) - * @param key matched completion + * @param doc document id (hit) + * @param key matched completion * @param score weight of the matched completion */ public SuggestScoreDoc(int doc, CharSequence key, CharSequence context, float score) { @@ -84,35 +76,30 @@ public class TopSuggestDocs extends TopDocs { @Override public String toString() { - return "key=" + key + " doc=" + doc + " score=" + score + " shardIndex=" + shardIndex; + return "key=" + key + " doc=" + doc + " score=" + score + " shardIndex=" + shardIndex; } } /** - * {@link org.apache.lucene.search.TopDocs} wrapper with - * {@link TopSuggestDocs.SuggestScoreDoc} + * {@link org.apache.lucene.search.TopDocs} wrapper with {@link TopSuggestDocs.SuggestScoreDoc} * instead of {@link org.apache.lucene.search.ScoreDoc} */ public TopSuggestDocs(TotalHits totalHits, SuggestScoreDoc[] scoreDocs) { super(totalHits, scoreDocs); } - /** - * Returns {@link TopSuggestDocs.SuggestScoreDoc}s - * for this instance - */ + /** Returns {@link TopSuggestDocs.SuggestScoreDoc}s for this instance */ public SuggestScoreDoc[] scoreLookupDocs() { return (SuggestScoreDoc[]) scoreDocs; } /** - * Returns a new TopSuggestDocs, containing topN results across - * the provided TopSuggestDocs, sorting by score. Each {@link TopSuggestDocs} - * instance must be sorted. - * Analogous to {@link org.apache.lucene.search.TopDocs#merge(int, org.apache.lucene.search.TopDocs[])} - * for {@link TopSuggestDocs} + * Returns a new TopSuggestDocs, containing topN results across the provided TopSuggestDocs, + * sorting by score. Each {@link TopSuggestDocs} instance must be sorted. Analogous to {@link + * org.apache.lucene.search.TopDocs#merge(int, org.apache.lucene.search.TopDocs[])} for {@link + * TopSuggestDocs} * - * NOTE: assumes every shardHit is already sorted by score + *

    NOTE: assumes every shardHit is already sorted by score */ public static TopSuggestDocs merge(int topN, TopSuggestDocs[] shardHits) { SuggestScoreDocPriorityQueue priorityQueue = new SuggestScoreDocPriorityQueue(topN); @@ -125,10 +112,10 @@ public class TopSuggestDocs extends TopDocs { } SuggestScoreDoc[] topNResults = priorityQueue.getResults(); if (topNResults.length > 0) { - return new TopSuggestDocs(new TotalHits(topNResults.length, TotalHits.Relation.EQUAL_TO), topNResults); + return new TopSuggestDocs( + new TotalHits(topNResults.length, TotalHits.Relation.EQUAL_TO), topNResults); } else { return TopSuggestDocs.EMPTY; } } - } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocsCollector.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocsCollector.java index 0e895001976..e8f66c87b3b 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocsCollector.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocsCollector.java @@ -16,11 +16,12 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.search.suggest.document.TopSuggestDocs.SuggestScoreDoc; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectionTerminatedException; @@ -29,23 +30,20 @@ import org.apache.lucene.search.SimpleCollector; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.suggest.Lookup; -import static org.apache.lucene.search.suggest.document.TopSuggestDocs.SuggestScoreDoc; - /** - * {@link org.apache.lucene.search.Collector} that collects completion and - * score, along with document id - *

    - * Non scoring collector that collect completions in order of their - * pre-computed scores. - *

    - * NOTE: One document can be collected multiple times if a document - * is matched for multiple unique completions for a given query - *

    - * Subclasses should only override - * {@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)}. - *

    - * NOTE: {@link #setScorer(org.apache.lucene.search.Scorable)} and - * {@link #collect(int)} is not used + * {@link org.apache.lucene.search.Collector} that collects completion and score, along with + * document id + * + *

    Non scoring collector that collect completions in order of their pre-computed scores. + * + *

    NOTE: One document can be collected multiple times if a document is matched for multiple + * unique completions for a given query + * + *

    Subclasses should only override {@link TopSuggestDocsCollector#collect(int, CharSequence, + * CharSequence, float)}. + * + *

    NOTE: {@link #setScorer(org.apache.lucene.search.Scorable)} and {@link #collect(int)} is not + * used * * @lucene.experimental */ @@ -54,10 +52,16 @@ public class TopSuggestDocsCollector extends SimpleCollector { private final SuggestScoreDocPriorityQueue priorityQueue; private final int num; - /** Only set if we are deduplicating hits: holds all per-segment hits until the end, when we dedup them */ + /** + * Only set if we are deduplicating hits: holds all per-segment hits until the end, when we dedup + * them + */ private final List pendingResults; - /** Only set if we are deduplicating hits: holds all surface forms seen so far in the current segment */ + /** + * Only set if we are deduplicating hits: holds all surface forms seen so far in the current + * segment + */ final CharArraySet seenSurfaceForms; /** Document base offset for the current Leaf */ @@ -66,8 +70,7 @@ public class TopSuggestDocsCollector extends SimpleCollector { /** * Sole constructor * - * Collects at most num completions - * with corresponding document and weight + *

    Collects at most num completions with corresponding document and weight */ public TopSuggestDocsCollector(int num, boolean skipDuplicates) { if (num <= 0) { @@ -89,9 +92,7 @@ public class TopSuggestDocsCollector extends SimpleCollector { return seenSurfaceForms != null; } - /** - * Returns the number of results to be collected - */ + /** Returns the number of results to be collected */ public int getCountToCollect() { return num; } @@ -109,14 +110,13 @@ public class TopSuggestDocsCollector extends SimpleCollector { } /** - * Called for every matched completion, - * similar to {@link org.apache.lucene.search.LeafCollector#collect(int)} - * but for completions. + * Called for every matched completion, similar to {@link + * org.apache.lucene.search.LeafCollector#collect(int)} but for completions. * - * NOTE: collection at the leaf level is guaranteed to be in - * descending order of score + *

    NOTE: collection at the leaf level is guaranteed to be in descending order of score */ - public void collect(int docID, CharSequence key, CharSequence context, float score) throws IOException { + public void collect(int docID, CharSequence key, CharSequence context, float score) + throws IOException { SuggestScoreDoc current = new SuggestScoreDoc(docBase + docID, key, context, score); if (current == priorityQueue.insertWithOverflow(current)) { // if the current SuggestScoreDoc has overflown from pq, @@ -128,12 +128,13 @@ public class TopSuggestDocsCollector extends SimpleCollector { } /** - * Returns at most num Top scoring {@link org.apache.lucene.search.suggest.document.TopSuggestDocs}s + * Returns at most num Top scoring {@link + * org.apache.lucene.search.suggest.document.TopSuggestDocs}s */ public TopSuggestDocs get() throws IOException { SuggestScoreDoc[] suggestScoreDocs; - + if (seenSurfaceForms != null) { // NOTE: this also clears the priorityQueue: for (SuggestScoreDoc hit : priorityQueue.getResults()) { @@ -144,10 +145,13 @@ public class TopSuggestDocsCollector extends SimpleCollector { // truncating the FST top paths search, but across segments there may still be dups: seenSurfaceForms.clear(); - // TODO: we could use a priority queue here to make cost O(N * log(num)) instead of O(N * log(N)), where N = O(num * - // numSegments), but typically numSegments is smallish and num is smallish so this won't matter much in practice: + // TODO: we could use a priority queue here to make cost O(N * log(num)) instead of O(N * + // log(N)), where N = O(num * + // numSegments), but typically numSegments is smallish and num is smallish so this won't + // matter much in practice: - Collections.sort(pendingResults, + Collections.sort( + pendingResults, (a, b) -> { // sort by higher score int cmp = Float.compare(b.score, a.score); @@ -163,7 +167,7 @@ public class TopSuggestDocsCollector extends SimpleCollector { }); List hits = new ArrayList<>(); - + for (SuggestScoreDoc hit : pendingResults) { if (seenSurfaceForms.contains(hit.key) == false) { seenSurfaceForms.add(hit.key); @@ -179,24 +183,21 @@ public class TopSuggestDocsCollector extends SimpleCollector { } if (suggestScoreDocs.length > 0) { - return new TopSuggestDocs(new TotalHits(suggestScoreDocs.length, TotalHits.Relation.EQUAL_TO), suggestScoreDocs); + return new TopSuggestDocs( + new TotalHits(suggestScoreDocs.length, TotalHits.Relation.EQUAL_TO), suggestScoreDocs); } else { return TopSuggestDocs.EMPTY; } } - /** - * Ignored - */ + /** Ignored */ @Override public void collect(int doc) throws IOException { // {@link #collect(int, CharSequence, CharSequence, long)} is used // instead } - /** - * Ignored - */ + /** Ignored */ @Override public ScoreMode scoreMode() { return ScoreMode.COMPLETE; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/package-info.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/package-info.java index 33025644102..ff71bc146c8 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/package-info.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Support for document suggestion - */ +/** Support for document suggestion */ package org.apache.lucene.search.suggest.document; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/BytesRefSorter.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/BytesRefSorter.java index 92b669890d8..f29f0c7f3df 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/BytesRefSorter.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/BytesRefSorter.java @@ -18,36 +18,34 @@ package org.apache.lucene.search.suggest.fst; import java.io.IOException; import java.util.Comparator; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; /** * Collects {@link BytesRef} and then allows one to iterate over their sorted order. Implementations * of this interface will be called in a single-threaded scenario. + * * @lucene.experimental - * @lucene.internal + * @lucene.internal */ public interface BytesRefSorter { /** * Adds a single suggestion entry (possibly compound with its bucket). - * + * * @throws IOException If an I/O exception occurs. - * @throws IllegalStateException If an addition attempt is performed after - * a call to {@link #iterator()} has been made. + * @throws IllegalStateException If an addition attempt is performed after a call to {@link + * #iterator()} has been made. */ void add(BytesRef utf8) throws IOException, IllegalStateException; /** - * Sorts the entries added in {@link #add(BytesRef)} and returns - * an iterator over all sorted entries. - * + * Sorts the entries added in {@link #add(BytesRef)} and returns an iterator over all sorted + * entries. + * * @throws IOException If an I/O exception occurs. */ - BytesRefIterator iterator() throws IOException; - - /** - * Comparator used to determine the sort order of entries. - */ - Comparator getComparator(); + BytesRefIterator iterator() throws IOException; + + /** Comparator used to determine the sort order of entries. */ + Comparator getComparator(); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java index 5b7e7142349..08940d87fcc 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java @@ -19,7 +19,6 @@ package org.apache.lucene.search.suggest.fst; import java.io.Closeable; import java.io.IOException; import java.util.Comparator; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -30,6 +29,7 @@ import org.apache.lucene.util.OfflineSorter; /** * Builds and iterates over sequences stored on disk. + * * @lucene.experimental * @lucene.internal */ @@ -38,16 +38,17 @@ public class ExternalRefSorter implements BytesRefSorter, Closeable { private OfflineSorter.ByteSequencesWriter writer; private IndexOutput input; private String sortedFileName; - - /** - * Will buffer all sequences to a temporary file and then sort (all on-disk). - */ + + /** Will buffer all sequences to a temporary file and then sort (all on-disk). */ public ExternalRefSorter(OfflineSorter sorter) throws IOException { this.sorter = sorter; - this.input = sorter.getDirectory().createTempOutput(sorter.getTempFileNamePrefix(), "RefSorterRaw", IOContext.DEFAULT); + this.input = + sorter + .getDirectory() + .createTempOutput(sorter.getTempFileNamePrefix(), "RefSorterRaw", IOContext.DEFAULT); this.writer = new OfflineSorter.ByteSequencesWriter(this.input); } - + @Override public void add(BytesRef utf8) throws IOException { if (writer == null) { @@ -55,12 +56,12 @@ public class ExternalRefSorter implements BytesRefSorter, Closeable { } writer.write(utf8); } - + @Override public BytesRefIterator iterator() throws IOException { if (sortedFileName == null) { closeWriter(); - + boolean success = false; try { sortedFileName = sorter.sort(input.getName()); @@ -72,13 +73,16 @@ public class ExternalRefSorter implements BytesRefSorter, Closeable { IOUtils.deleteFilesIgnoringExceptions(sorter.getDirectory(), input.getName()); } } - + input = null; } - - return new ByteSequenceIterator(new OfflineSorter.ByteSequencesReader(sorter.getDirectory().openChecksumInput(sortedFileName, IOContext.READONCE), sortedFileName)); + + return new ByteSequenceIterator( + new OfflineSorter.ByteSequencesReader( + sorter.getDirectory().openChecksumInput(sortedFileName, IOContext.READONCE), + sortedFileName)); } - + private void closeWriter() throws IOException { if (writer != null) { CodecUtil.writeFooter(input); @@ -86,33 +90,29 @@ public class ExternalRefSorter implements BytesRefSorter, Closeable { writer = null; } } - - /** - * Removes any written temporary files. - */ + + /** Removes any written temporary files. */ @Override public void close() throws IOException { try { closeWriter(); } finally { - IOUtils.deleteFilesIgnoringExceptions(sorter.getDirectory(), - input == null ? null : input.getName(), - sortedFileName); + IOUtils.deleteFilesIgnoringExceptions( + sorter.getDirectory(), input == null ? null : input.getName(), sortedFileName); } } - - /** - * Iterate over byte refs in a file. - */ - // TODO: this class is a bit silly ... sole purpose is to "remove" Closeable from what #iterator returns: + + /** Iterate over byte refs in a file. */ + // TODO: this class is a bit silly ... sole purpose is to "remove" Closeable from what #iterator + // returns: static class ByteSequenceIterator implements BytesRefIterator { private final OfflineSorter.ByteSequencesReader reader; private BytesRef scratch; - + public ByteSequenceIterator(OfflineSorter.ByteSequencesReader reader) { this.reader = reader; } - + @Override public BytesRef next() throws IOException { boolean success = false; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java index 3f0fb97d651..87009dedc53 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java @@ -18,14 +18,13 @@ package org.apache.lucene.search.suggest.fst; import java.io.IOException; import java.util.*; - import org.apache.lucene.util.*; import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.FST.Arc; /** * Finite state automata based implementation of "autocomplete" functionality. - * + * * @see FSTCompletionBuilder * @lucene.experimental */ @@ -37,9 +36,7 @@ import org.apache.lucene.util.fst.FST.Arc; // TODO: support for Analyzers (infix suggestions, synonyms?) public class FSTCompletion { - /** - * A single completion for a given key. - */ + /** A single completion for a given key. */ public static final class Completion implements Comparable { /** UTF-8 bytes of the suggestion */ public final BytesRef utf8; @@ -63,53 +60,41 @@ public class FSTCompletion { } } - /** - * Default number of buckets. - */ + /** Default number of buckets. */ public static final int DEFAULT_BUCKETS = 10; /** - * An empty result. Keep this an {@link ArrayList} to keep all the returned - * lists of single type (monomorphic calls). + * An empty result. Keep this an {@link ArrayList} to keep all the returned lists of single type + * (monomorphic calls). */ private static final ArrayList EMPTY_RESULT = new ArrayList<>(); - /** - * Finite state automaton encoding all the lookup terms. See class notes for - * details. - */ + /** Finite state automaton encoding all the lookup terms. See class notes for details. */ private final FST automaton; /** - * An array of arcs leaving the root automaton state and encoding weights of - * all completions in their sub-trees. + * An array of arcs leaving the root automaton state and encoding weights of all completions in + * their sub-trees. */ private final Arc[] rootArcs; - /** - * @see #FSTCompletion(FST, boolean, boolean) - */ + /** @see #FSTCompletion(FST, boolean, boolean) */ private boolean exactFirst; - /** - * @see #FSTCompletion(FST, boolean, boolean) - */ + /** @see #FSTCompletion(FST, boolean, boolean) */ private boolean higherWeightsFirst; /** * Constructs an FSTCompletion, specifying higherWeightsFirst and exactFirst. - * @param automaton - * Automaton with completions. See {@link FSTCompletionBuilder}. - * @param higherWeightsFirst - * Return most popular suggestions first. This is the default - * behavior for this implementation. Setting it to false - * has no effect (use constant term weights to sort alphabetically - * only). - * @param exactFirst - * Find and push an exact match to the first position of the result - * list if found. + * + * @param automaton Automaton with completions. See {@link FSTCompletionBuilder}. + * @param higherWeightsFirst Return most popular suggestions first. This is the default behavior + * for this implementation. Setting it to false has no effect (use constant term + * weights to sort alphabetically only). + * @param exactFirst Find and push an exact match to the first position of the result list if + * found. */ - @SuppressWarnings({"unchecked","rawtypes"}) + @SuppressWarnings({"unchecked", "rawtypes"}) public FSTCompletion(FST automaton, boolean higherWeightsFirst, boolean exactFirst) { this.automaton = automaton; if (automaton != null) { @@ -123,17 +108,15 @@ public class FSTCompletion { /** * Defaults to higher weights first and exact first. + * * @see #FSTCompletion(FST, boolean, boolean) */ public FSTCompletion(FST automaton) { this(automaton, true, true); } - /** - * Cache the root node's output arcs starting with completions with the - * highest weights. - */ - @SuppressWarnings({"unchecked","rawtypes"}) + /** Cache the root node's output arcs starting with completions with the highest weights. */ + @SuppressWarnings({"unchecked", "rawtypes"}) private static Arc[] cacheRootArcs(FST automaton) { try { List> rootArcs = new ArrayList<>(); @@ -145,7 +128,7 @@ public class FSTCompletion { if (arc.isLast()) break; automaton.readNextArc(arc, fstReader); } - + Collections.reverse(rootArcs); // we want highest weights first. return rootArcs.toArray(new Arc[rootArcs.size()]); } catch (IOException e) { @@ -154,21 +137,14 @@ public class FSTCompletion { } /** - * Returns the first exact match by traversing root arcs, starting from the - * arc rootArcIndex. - * - * @param rootArcIndex - * The first root arc index in {@link #rootArcs} to consider when - * matching. - * - * @param utf8 - * The sequence of utf8 bytes to follow. - * - * @return Returns the bucket number of the match or -1 if no - * match was found. + * Returns the first exact match by traversing root arcs, starting from the arc rootArcIndex + * . + * + * @param rootArcIndex The first root arc index in {@link #rootArcs} to consider when matching. + * @param utf8 The sequence of utf8 bytes to follow. + * @return Returns the bucket number of the match or -1 if no match was found. */ - private int getExactMatchStartingFromRootArc( - int rootArcIndex, BytesRef utf8) { + private int getExactMatchStartingFromRootArc(int rootArcIndex, BytesRef utf8) { // Get the UTF-8 bytes representation of the input key. try { final FST.Arc scratch = new FST.Arc<>(); @@ -176,7 +152,7 @@ public class FSTCompletion { for (; rootArcIndex < rootArcs.length; rootArcIndex++) { final FST.Arc rootArc = rootArcs[rootArcIndex]; final FST.Arc arc = scratch.copyFrom(rootArc); - + // Descend into the automaton using the key as prefix. if (descendWithPrefix(arc, utf8)) { automaton.readFirstTargetArc(arc, arc, fstReader); @@ -190,20 +166,18 @@ public class FSTCompletion { // Should never happen, but anyway. throw new RuntimeException(e); } - + // No match. return -1; } - + /** * Lookup suggestions to key. - * - * @param key - * The prefix to which suggestions should be sought. - * @param num - * At most this number of suggestions will be returned. - * @return Returns the suggestions, sorted by their approximated weight first - * (decreasing) and then alphabetically (UTF-8 codepoint order). + * + * @param key The prefix to which suggestions should be sought. + * @param num At most this number of suggestions will be returned. + * @return Returns the suggestions, sorted by their approximated weight first (decreasing) and + * then alphabetically (UTF-8 codepoint order). */ public List lookup(CharSequence key, int num) { if (key.length() == 0 || automaton == null) { @@ -229,12 +203,10 @@ public class FSTCompletion { } /** - * Lookup suggestions sorted alphabetically if weights are not - * constant. This is a workaround: in general, use constant weights for - * alphabetically sorted result. + * Lookup suggestions sorted alphabetically if weights are not constant. This is a + * workaround: in general, use constant weights for alphabetically sorted result. */ - private List lookupSortedAlphabetically(BytesRef key, int num) - throws IOException { + private List lookupSortedAlphabetically(BytesRef key, int num) throws IOException { // Greedily get num results from each weight branch. List res = lookupSortedByWeight(key, num, true); @@ -248,15 +220,13 @@ public class FSTCompletion { /** * Lookup suggestions sorted by weight (descending order). - * - * @param collectAll - * If true, the routine terminates immediately when - * num suggestions have been collected. If - * false, it will collect suggestions from all weight - * arcs (needed for {@link #lookupSortedAlphabetically}. + * + * @param collectAll If true, the routine terminates immediately when num + * suggestions have been collected. If false, it will collect suggestions + * from all weight arcs (needed for {@link #lookupSortedAlphabetically}. */ - private ArrayList lookupSortedByWeight(BytesRef key, - int num, boolean collectAll) throws IOException { + private ArrayList lookupSortedByWeight(BytesRef key, int num, boolean collectAll) + throws IOException { // Don't overallocate the results buffers. This also serves the purpose of // allowing the user of this class to request all matches using Integer.MAX_VALUE as // the number of results. @@ -295,20 +265,20 @@ public class FSTCompletion { } return res; } - + /** * Checks if the list of * {@link org.apache.lucene.search.suggest.Lookup.LookupResult}s already has a * key. If so, reorders that * {@link org.apache.lucene.search.suggest.Lookup.LookupResult} to the first * position. - * + * * @return Returns true if and only if list contained * key. */ private boolean checkExistingAndReorder(ArrayList list, BytesRef key) { // We assume list does not have duplicates (because of how the FST is created). - for (int i = list.size(); --i >= 0;) { + for (int i = list.size(); --i >= 0; ) { if (key.equals(list.get(i).utf8)) { // Key found. Unless already at i==0, remove it and push up front so // that the ordering @@ -319,21 +289,16 @@ public class FSTCompletion { } return false; } - + /** - * Descend along the path starting at arc and going through bytes - * in the argument. - * - * @param arc - * The starting arc. This argument is modified in-place. - * @param utf8 - * The term to descend along. - * @return If true, arc will be set to the arc - * matching last byte of term. false is - * returned if no such prefix exists. + * Descend along the path starting at arc and going through bytes in the argument. + * + * @param arc The starting arc. This argument is modified in-place. + * @param utf8 The term to descend along. + * @return If true, arc will be set to the arc matching last byte of + * term. false is returned if no such prefix exists. */ - private boolean descendWithPrefix(Arc arc, BytesRef utf8) - throws IOException { + private boolean descendWithPrefix(Arc arc, BytesRef utf8) throws IOException { final int max = utf8.offset + utf8.length; // Cannot save as instance var since multiple threads // can use FSTCompletion at once... @@ -346,16 +311,15 @@ public class FSTCompletion { } return true; } - + /** - * Recursive collect lookup results from the automaton subgraph starting at - * arc. - * - * @param num - * Maximum number of results needed (early termination). + * Recursive collect lookup results from the automaton subgraph starting at arc. + * + * @param num Maximum number of results needed (early termination). */ - private boolean collect(List res, int num, int bucket, - BytesRef output, Arc arc) throws IOException { + private boolean collect( + List res, int num, int bucket, BytesRef output, Arc arc) + throws IOException { if (output.length == output.bytes.length) { output.bytes = ArrayUtil.grow(output.bytes); } @@ -374,7 +338,7 @@ public class FSTCompletion { } output.length = save; } - + if (arc.isLast()) { break; } @@ -383,24 +347,20 @@ public class FSTCompletion { return false; } - /** - * Returns the bucket count (discretization thresholds). - */ + /** Returns the bucket count (discretization thresholds). */ public int getBucketCount() { return rootArcs.length; } /** - * Returns the bucket assigned to a given key (if found) or -1 if - * no exact match exists. + * Returns the bucket assigned to a given key (if found) or -1 if no exact match + * exists. */ public int getBucket(CharSequence key) { return getExactMatchStartingFromRootArc(0, new BytesRef(key)); } - /** - * Returns the internal automaton. - */ + /** Returns the internal automaton. */ public FST getFST() { return automaton; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionBuilder.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionBuilder.java index e3237db3f74..7661933a4e7 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionBuilder.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionBuilder.java @@ -19,7 +19,6 @@ package org.apache.lucene.search.suggest.fst; import java.io.Closeable; import java.io.IOException; import java.util.Comparator; - import org.apache.lucene.search.suggest.InMemorySorter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -29,125 +28,102 @@ import org.apache.lucene.util.fst.*; /** * Finite state automata based implementation of "autocomplete" functionality. - * + * *

    Implementation details

    - * - *

    - * The construction step in {@link #finalize()} works as follows: + * + *

    The construction step in {@link #finalize()} works as follows: + * *

      - *
    • A set of input terms and their buckets is given.
    • - *
    • All terms in the input are prefixed with a synthetic pseudo-character - * (code) of the weight bucket the term fell into. For example a term - * abc with a discretized weight equal '1' would become - * 1abc.
    • - *
    • The terms are then sorted by their raw value of UTF-8 character values - * (including the synthetic bucket code in front).
    • - *
    • A finite state automaton ({@link FST}) is constructed from the input. The - * root node has arcs labeled with all possible weights. We cache all these - * arcs, highest-weight first.
    • + *
    • A set of input terms and their buckets is given. + *
    • All terms in the input are prefixed with a synthetic pseudo-character (code) of the weight + * bucket the term fell into. For example a term abc with a discretized weight + * equal '1' would become 1abc. + *
    • The terms are then sorted by their raw value of UTF-8 character values (including the + * synthetic bucket code in front). + *
    • A finite state automaton ({@link FST}) is constructed from the input. The root node has + * arcs labeled with all possible weights. We cache all these arcs, highest-weight first. *
    - * - *

    - * At runtime, in {@link FSTCompletion#lookup(CharSequence, int)}, - * the automaton is utilized as follows: + * + *

    At runtime, in {@link FSTCompletion#lookup(CharSequence, int)}, the automaton is utilized as + * follows: + * *

      - *
    • For each possible term weight encoded in the automaton (cached arcs from - * the root above), starting with the highest one, we descend along the path of - * the input key. If the key is not a prefix of a sequence in the automaton - * (path ends prematurely), we exit immediately -- no completions.
    • - *
    • Otherwise, we have found an internal automaton node that ends the key. - * The entire subautomaton (all paths) starting from this node form the key's - * completions. We start the traversal of this subautomaton. Every time we - * reach a final state (arc), we add a single suggestion to the list of results - * (the weight of this suggestion is constant and equal to the root path we - * started from). The tricky part is that because automaton edges are sorted and - * we scan depth-first, we can terminate the entire procedure as soon as we - * collect enough suggestions the user requested.
    • - *
    • In case the number of suggestions collected in the step above is still - * insufficient, we proceed to the next (smaller) weight leaving the root node - * and repeat the same algorithm again.
    • + *
    • For each possible term weight encoded in the automaton (cached arcs from the root above), + * starting with the highest one, we descend along the path of the input key. If the key is + * not a prefix of a sequence in the automaton (path ends prematurely), we exit immediately -- + * no completions. + *
    • Otherwise, we have found an internal automaton node that ends the key. The entire + * subautomaton (all paths) starting from this node form the key's completions. We start + * the traversal of this subautomaton. Every time we reach a final state (arc), we add a + * single suggestion to the list of results (the weight of this suggestion is constant and + * equal to the root path we started from). The tricky part is that because automaton edges + * are sorted and we scan depth-first, we can terminate the entire procedure as soon as we + * collect enough suggestions the user requested. + *
    • In case the number of suggestions collected in the step above is still insufficient, we + * proceed to the next (smaller) weight leaving the root node and repeat the same algorithm + * again. *
    - * + * *

    Runtime behavior and performance characteristic

    - * - * The algorithm described above is optimized for finding suggestions to short - * prefixes in a top-weights-first order. This is probably the most common use - * case: it allows presenting suggestions early and sorts them by the global - * frequency (and then alphabetically). - * - *

    - * If there is an exact match in the automaton, it is returned first on the - * results list (even with by-weight sorting). - * - *

    - * Note that the maximum lookup time for any prefix is the time of - * descending to the subtree, plus traversal of the subtree up to the number of - * requested suggestions (because they are already presorted by weight on the - * root level and alphabetically at any node level). - * - *

    - * To order alphabetically only (no ordering by priorities), use identical term - * weights for all terms. Alphabetical suggestions are returned even if - * non-constant weights are used, but the algorithm for doing this is - * suboptimal. - * - *

    - * "alphabetically" in any of the documentation above indicates UTF-8 - * representation order, nothing else. - * - *

    - * NOTE: the FST file format is experimental and subject to suddenly - * change, requiring you to rebuild the FST suggest index. - * + * + * The algorithm described above is optimized for finding suggestions to short prefixes in a + * top-weights-first order. This is probably the most common use case: it allows presenting + * suggestions early and sorts them by the global frequency (and then alphabetically). + * + *

    If there is an exact match in the automaton, it is returned first on the results list (even + * with by-weight sorting). + * + *

    Note that the maximum lookup time for any prefix is the time of descending to the + * subtree, plus traversal of the subtree up to the number of requested suggestions (because they + * are already presorted by weight on the root level and alphabetically at any node level). + * + *

    To order alphabetically only (no ordering by priorities), use identical term weights for all + * terms. Alphabetical suggestions are returned even if non-constant weights are used, but the + * algorithm for doing this is suboptimal. + * + *

    "alphabetically" in any of the documentation above indicates UTF-8 representation order, + * nothing else. + * + *

    NOTE: the FST file format is experimental and subject to suddenly change, requiring you + * to rebuild the FST suggest index. + * * @see FSTCompletion * @lucene.experimental */ public class FSTCompletionBuilder { - /** - * Default number of buckets. - */ + /** Default number of buckets. */ public static final int DEFAULT_BUCKETS = 10; /** - * The number of separate buckets for weights (discretization). The more - * buckets, the more fine-grained term weights (priorities) can be assigned. - * The speed of lookup will not decrease for prefixes which have - * highly-weighted completions (because these are filled-in first), but will - * decrease significantly for low-weighted terms (but these should be - * infrequent, so it is all right). - * - *

    - * The number of buckets must be within [1, 255] range. + * The number of separate buckets for weights (discretization). The more buckets, the more + * fine-grained term weights (priorities) can be assigned. The speed of lookup will not decrease + * for prefixes which have highly-weighted completions (because these are filled-in first), but + * will decrease significantly for low-weighted terms (but these should be infrequent, so it is + * all right). + * + *

    The number of buckets must be within [1, 255] range. */ private final int buckets; - /** - * Finite state automaton encoding all the lookup terms. See class notes for - * details. - */ + /** Finite state automaton encoding all the lookup terms. See class notes for details. */ FST automaton; /** - * FST construction require re-sorting the input. This is the class that - * collects all the input entries, their weights and then provides sorted - * order. + * FST construction require re-sorting the input. This is the class that collects all the input + * entries, their weights and then provides sorted order. */ private final BytesRefSorter sorter; - - /** - * Scratch buffer for {@link #add(BytesRef, int)}. - */ + + /** Scratch buffer for {@link #add(BytesRef, int)}. */ private final BytesRefBuilder scratch = new BytesRefBuilder(); - /** - * Max tail sharing length. - */ + /** Max tail sharing length. */ private final int shareMaxTailLength; /** - * Creates an {@link FSTCompletion} with default options: 10 buckets, exact match - * promoted to first position and {@link InMemorySorter} with a comparator obtained from - * {@link Comparator#naturalOrder()}. + * Creates an {@link FSTCompletion} with default options: 10 buckets, exact match promoted to + * first position and {@link InMemorySorter} with a comparator obtained from {@link + * Comparator#naturalOrder()}. */ public FSTCompletionBuilder() { this(DEFAULT_BUCKETS, new InMemorySorter(Comparator.naturalOrder()), Integer.MAX_VALUE); @@ -155,34 +131,27 @@ public class FSTCompletionBuilder { /** * Creates an FSTCompletion with the specified options. - * @param buckets - * The number of buckets for weight discretization. Buckets are used - * in {@link #add(BytesRef, int)} and must be smaller than the number - * given here. - * - * @param sorter - * {@link BytesRefSorter} used for re-sorting input for the automaton. - * For large inputs, use on-disk sorting implementations. The sorter - * is closed automatically in {@link #build()} if it implements - * {@link Closeable}. - * - * @param shareMaxTailLength - * Max shared suffix sharing length. - * - * See the description of this parameter in {@link org.apache.lucene.util.fst.FSTCompiler.Builder}. - * In general, for very large inputs you'll want to construct a non-minimal - * automaton which will be larger, but the construction will take far less ram. - * For minimal automata, set it to {@link Integer#MAX_VALUE}. + * + * @param buckets The number of buckets for weight discretization. Buckets are used in {@link + * #add(BytesRef, int)} and must be smaller than the number given here. + * @param sorter {@link BytesRefSorter} used for re-sorting input for the automaton. For large + * inputs, use on-disk sorting implementations. The sorter is closed automatically in {@link + * #build()} if it implements {@link Closeable}. + * @param shareMaxTailLength Max shared suffix sharing length. + *

    See the description of this parameter in {@link + * org.apache.lucene.util.fst.FSTCompiler.Builder}. In general, for very large inputs you'll + * want to construct a non-minimal automaton which will be larger, but the construction will + * take far less ram. For minimal automata, set it to {@link Integer#MAX_VALUE}. */ public FSTCompletionBuilder(int buckets, BytesRefSorter sorter, int shareMaxTailLength) { if (buckets < 1 || buckets > 255) { - throw new IllegalArgumentException("Buckets must be >= 1 and <= 255: " - + buckets); + throw new IllegalArgumentException("Buckets must be >= 1 and <= 255: " + buckets); } - - if (sorter == null) throw new IllegalArgumentException( - "BytesRefSorter must not be null."); - + + if (sorter == null) { + throw new IllegalArgumentException("BytesRefSorter must not be null."); + } + this.sorter = sorter; this.buckets = buckets; this.shareMaxTailLength = shareMaxTailLength; @@ -190,22 +159,19 @@ public class FSTCompletionBuilder { /** * Appends a single suggestion and its weight to the internal buffers. - * - * @param utf8 - * The suggestion (utf8 representation) to be added. The content is - * copied and the object can be reused. - * @param bucket - * The bucket to place this suggestion in. Must be non-negative and - * smaller than the number of buckets passed in the constructor. - * Higher numbers indicate suggestions that should be presented - * before suggestions placed in smaller buckets. + * + * @param utf8 The suggestion (utf8 representation) to be added. The content is copied and the + * object can be reused. + * @param bucket The bucket to place this suggestion in. Must be non-negative and smaller than the + * number of buckets passed in the constructor. Higher numbers indicate suggestions that + * should be presented before suggestions placed in smaller buckets. */ public void add(BytesRef utf8, int bucket) throws IOException { if (bucket < 0 || bucket >= buckets) { throw new IllegalArgumentException( "Bucket outside of the allowed range [0, " + buckets + "): " + bucket); } - + scratch.grow(utf8.length + 10); scratch.clear(); scratch.append((byte) bucket); @@ -214,8 +180,8 @@ public class FSTCompletionBuilder { } /** - * Builds the final automaton from a list of added entries. This method may - * take a longer while as it needs to build the automaton. + * Builds the final automaton from a list of added entries. This method may take a longer while as + * it needs to build the automaton. */ public FSTCompletion build() throws IOException { this.automaton = buildAutomaton(sorter); @@ -227,29 +193,29 @@ public class FSTCompletionBuilder { return new FSTCompletion(automaton); } - /** - * Builds the final automaton from a list of entries. - */ + /** Builds the final automaton from a list of entries. */ private FST buildAutomaton(BytesRefSorter sorter) throws IOException { // Build the automaton. final Outputs outputs = NoOutputs.getSingleton(); final Object empty = outputs.getNoOutput(); - final FSTCompiler fstCompiler = new FSTCompiler.Builder<>(FST.INPUT_TYPE.BYTE1, outputs) - .shareMaxTailLength(shareMaxTailLength).build(); + final FSTCompiler fstCompiler = + new FSTCompiler.Builder<>(FST.INPUT_TYPE.BYTE1, outputs) + .shareMaxTailLength(shareMaxTailLength) + .build(); BytesRefBuilder scratch = new BytesRefBuilder(); BytesRef entry; final IntsRefBuilder scratchIntsRef = new IntsRefBuilder(); int count = 0; BytesRefIterator iter = sorter.iterator(); - while((entry = iter.next()) != null) { + while ((entry = iter.next()) != null) { count++; if (scratch.get().compareTo(entry) != 0) { fstCompiler.add(Util.toIntsRef(entry, scratchIntsRef), empty); scratch.copyBytes(entry); } } - + return count == 0 ? null : fstCompiler.compile(); } } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java index 547b3268acb..fcf72b75e1f 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java @@ -22,7 +22,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.search.suggest.InputIterator; import org.apache.lucene.search.suggest.Lookup; @@ -48,45 +47,43 @@ import org.apache.lucene.util.fst.NoOutputs; /** * An adapter from {@link Lookup} API to {@link FSTCompletion}. - * - *

    This adapter differs from {@link FSTCompletion} in that it attempts - * to discretize any "weights" as passed from in {@link InputIterator#weight()} - * to match the number of buckets. For the rationale for bucketing, see - * {@link FSTCompletion}. - * + * + *

    This adapter differs from {@link FSTCompletion} in that it attempts to discretize any + * "weights" as passed from in {@link InputIterator#weight()} to match the number of buckets. For + * the rationale for bucketing, see {@link FSTCompletion}. + * *

    Note:Discretization requires an additional sorting pass. - * - *

    The range of weights for bucketing/ discretization is determined - * by sorting the input by weight and then dividing into - * equal ranges. Then, scores within each range are assigned to that bucket. - * - *

    Note that this means that even large differences in weights may be lost - * during automaton construction, but the overall distinction between "classes" - * of weights will be preserved regardless of the distribution of weights. - * - *

    For fine-grained control over which weights are assigned to which buckets, - * use {@link FSTCompletion} directly or {@link TSTLookup}, for example. - * + * + *

    The range of weights for bucketing/ discretization is determined by sorting the input by + * weight and then dividing into equal ranges. Then, scores within each range are assigned to that + * bucket. + * + *

    Note that this means that even large differences in weights may be lost during automaton + * construction, but the overall distinction between "classes" of weights will be preserved + * regardless of the distribution of weights. + * + *

    For fine-grained control over which weights are assigned to which buckets, use {@link + * FSTCompletion} directly or {@link TSTLookup}, for example. + * * @see FSTCompletion * @lucene.experimental */ public class FSTCompletionLookup extends Lookup implements Accountable { - /** - * An invalid bucket count if we're creating an object - * of this class from an existing FST. - * + /** + * An invalid bucket count if we're creating an object of this class from an existing FST. + * * @see #FSTCompletionLookup(Directory, String, FSTCompletion, boolean) */ private static int INVALID_BUCKETS_COUNT = -1; - + /** - * Shared tail length for conflating in the created automaton. Setting this - * to larger values ({@link Integer#MAX_VALUE}) will create smaller (or minimal) - * automata at the cost of RAM for keeping nodes hash in the {@link FST}. - * + * Shared tail length for conflating in the created automaton. Setting this to larger values + * ({@link Integer#MAX_VALUE}) will create smaller (or minimal) automata at the cost of RAM for + * keeping nodes hash in the {@link FST}. + * *

    Empirical pick. */ - private final static int sharedTailLength = 5; + private static final int sharedTailLength = 5; private final Directory tempDir; private final String tempFileNamePrefix; @@ -94,50 +91,42 @@ public class FSTCompletionLookup extends Lookup implements Accountable { private int buckets; private boolean exactMatchFirst; - /** - * Automaton used for completions with higher weights reordering. - */ + /** Automaton used for completions with higher weights reordering. */ private FSTCompletion higherWeightsCompletion; - /** - * Automaton used for normal completions. - */ + /** Automaton used for normal completions. */ private FSTCompletion normalCompletion; /** Number of entries the lookup was built with */ private long count = 0; - /** - * This constructor should only be used to read a previously saved suggester. - */ + /** This constructor should only be used to read a previously saved suggester. */ public FSTCompletionLookup() { this(null, null); } /** - * This constructor prepares for creating a suggested FST using the - * {@link #build(InputIterator)} method. The number of weight - * discretization buckets is set to {@link FSTCompletion#DEFAULT_BUCKETS} and - * exact matches are promoted to the top of the suggestions list. + * This constructor prepares for creating a suggested FST using the {@link #build(InputIterator)} + * method. The number of weight discretization buckets is set to {@link + * FSTCompletion#DEFAULT_BUCKETS} and exact matches are promoted to the top of the suggestions + * list. */ public FSTCompletionLookup(Directory tempDir, String tempFileNamePrefix) { this(tempDir, tempFileNamePrefix, FSTCompletion.DEFAULT_BUCKETS, true); } /** - * This constructor prepares for creating a suggested FST using the - * {@link #build(InputIterator)} method. - * - * @param buckets - * The number of weight discretization buckets (see - * {@link FSTCompletion} for details). - * - * @param exactMatchFirst - * If true exact matches are promoted to the top of the - * suggestions list. Otherwise they appear in the order of - * discretized weight and alphabetical within the bucket. + * This constructor prepares for creating a suggested FST using the {@link #build(InputIterator)} + * method. + * + * @param buckets The number of weight discretization buckets (see {@link FSTCompletion} for + * details). + * @param exactMatchFirst If true exact matches are promoted to the top of the + * suggestions list. Otherwise they appear in the order of discretized weight and alphabetical + * within the bucket. */ - public FSTCompletionLookup(Directory tempDir, String tempFileNamePrefix, int buckets, boolean exactMatchFirst) { + public FSTCompletionLookup( + Directory tempDir, String tempFileNamePrefix, int buckets, boolean exactMatchFirst) { this.buckets = buckets; this.exactMatchFirst = exactMatchFirst; this.tempDir = tempDir; @@ -146,20 +135,20 @@ public class FSTCompletionLookup extends Lookup implements Accountable { /** * This constructor takes a pre-built automaton. - * - * @param completion - * An instance of {@link FSTCompletion}. - * @param exactMatchFirst - * If true exact matches are promoted to the top of the - * suggestions list. Otherwise they appear in the order of - * discretized weight and alphabetical within the bucket. + * + * @param completion An instance of {@link FSTCompletion}. + * @param exactMatchFirst If true exact matches are promoted to the top of the + * suggestions list. Otherwise they appear in the order of discretized weight and alphabetical + * within the bucket. */ - public FSTCompletionLookup(Directory tempDir, String tempFileNamePrefix, FSTCompletion completion, boolean exactMatchFirst) { + public FSTCompletionLookup( + Directory tempDir, + String tempFileNamePrefix, + FSTCompletion completion, + boolean exactMatchFirst) { this(tempDir, tempFileNamePrefix, INVALID_BUCKETS_COUNT, exactMatchFirst); - this.normalCompletion = new FSTCompletion( - completion.getFST(), false, exactMatchFirst); - this.higherWeightsCompletion = new FSTCompletion( - completion.getFST(), true, exactMatchFirst); + this.normalCompletion = new FSTCompletion(completion.getFST(), false, exactMatchFirst); + this.higherWeightsCompletion = new FSTCompletion(completion.getFST(), true, exactMatchFirst); } @Override @@ -173,7 +162,8 @@ public class FSTCompletionLookup extends Lookup implements Accountable { OfflineSorter sorter = new OfflineSorter(tempDir, tempFileNamePrefix); ExternalRefSorter externalSorter = new ExternalRefSorter(sorter); - IndexOutput tempInput = tempDir.createTempOutput(tempFileNamePrefix, "input", IOContext.DEFAULT); + IndexOutput tempInput = + tempDir.createTempOutput(tempFileNamePrefix, "input", IOContext.DEFAULT); String tempSortedFileName = null; OfflineSorter.ByteSequencesWriter writer = new OfflineSorter.ByteSequencesWriter(tempInput); @@ -183,7 +173,7 @@ public class FSTCompletionLookup extends Lookup implements Accountable { // If negative floats are allowed some trickery needs to be done to find their byte order. count = 0; try { - byte [] buffer = new byte [0]; + byte[] buffer = new byte[0]; ByteArrayDataOutput output = new ByteArrayDataOutput(buffer); BytesRef spare; int inputLineCount = 0; @@ -206,10 +196,13 @@ public class FSTCompletionLookup extends Lookup implements Accountable { tempSortedFileName = sorter.sort(tempInput.getName()); tempDir.deleteFile(tempInput.getName()); - FSTCompletionBuilder builder = new FSTCompletionBuilder( - buckets, externalSorter, sharedTailLength); + FSTCompletionBuilder builder = + new FSTCompletionBuilder(buckets, externalSorter, sharedTailLength); - reader = new OfflineSorter.ByteSequencesReader(tempDir.openChecksumInput(tempSortedFileName, IOContext.READONCE), tempSortedFileName); + reader = + new OfflineSorter.ByteSequencesReader( + tempDir.openChecksumInput(tempSortedFileName, IOContext.READONCE), + tempSortedFileName); long line = 0; int previousBucket = 0; int previousScore = 0; @@ -244,25 +237,26 @@ public class FSTCompletionLookup extends Lookup implements Accountable { // The two FSTCompletions share the same automaton. this.higherWeightsCompletion = builder.build(); - this.normalCompletion = new FSTCompletion( - higherWeightsCompletion.getFST(), false, exactMatchFirst); - + this.normalCompletion = + new FSTCompletion(higherWeightsCompletion.getFST(), false, exactMatchFirst); + } finally { IOUtils.closeWhileHandlingException(reader, writer, externalSorter); IOUtils.deleteFilesIgnoringExceptions(tempDir, tempInput.getName(), tempSortedFileName); } } - + /** weight -> cost */ private static int encodeWeight(long value) { if (value < Integer.MIN_VALUE || value > Integer.MAX_VALUE) { throw new UnsupportedOperationException("cannot encode value: " + value); } - return (int)value; + return (int) value; } @Override - public List lookup(CharSequence key, Set contexts, boolean higherWeightsFirst, int num) { + public List lookup( + CharSequence key, Set contexts, boolean higherWeightsFirst, int num) { if (contexts != null) { throw new IllegalArgumentException("this suggester doesn't support contexts"); } @@ -272,7 +266,7 @@ public class FSTCompletionLookup extends Lookup implements Accountable { } else { completions = normalCompletion.lookup(key, num); } - + final ArrayList results = new ArrayList<>(completions.size()); CharsRefBuilder spare = new CharsRefBuilder(); for (Completion c : completions) { @@ -283,15 +277,14 @@ public class FSTCompletionLookup extends Lookup implements Accountable { } /** - * Returns the bucket (weight) as a Long for the provided key if it exists, - * otherwise null if it does not. + * Returns the bucket (weight) as a Long for the provided key if it exists, otherwise null if it + * does not. */ public Object get(CharSequence key) { final int bucket = normalCompletion.getBucket(key); return bucket == -1 ? null : Long.valueOf(bucket); } - @Override public synchronized boolean store(DataOutput output) throws IOException { output.writeVLong(count); @@ -305,20 +298,25 @@ public class FSTCompletionLookup extends Lookup implements Accountable { @Override public synchronized boolean load(DataInput input) throws IOException { count = input.readVLong(); - this.higherWeightsCompletion = new FSTCompletion(new FST<>( - input, input, NoOutputs.getSingleton())); - this.normalCompletion = new FSTCompletion( - higherWeightsCompletion.getFST(), false, exactMatchFirst); + this.higherWeightsCompletion = + new FSTCompletion(new FST<>(input, input, NoOutputs.getSingleton())); + this.normalCompletion = + new FSTCompletion(higherWeightsCompletion.getFST(), false, exactMatchFirst); return true; } @Override public long ramBytesUsed() { - long mem = RamUsageEstimator.shallowSizeOf(this) + RamUsageEstimator.shallowSizeOf(normalCompletion) + RamUsageEstimator.shallowSizeOf(higherWeightsCompletion); + long mem = + RamUsageEstimator.shallowSizeOf(this) + + RamUsageEstimator.shallowSizeOf(normalCompletion) + + RamUsageEstimator.shallowSizeOf(higherWeightsCompletion); if (normalCompletion != null) { mem += normalCompletion.getFST().ramBytesUsed(); } - if (higherWeightsCompletion != null && (normalCompletion == null || normalCompletion.getFST() != higherWeightsCompletion.getFST())) { + if (higherWeightsCompletion != null + && (normalCompletion == null + || normalCompletion.getFST() != higherWeightsCompletion.getFST())) { // the fst should be shared between the 2 completion instances, don't count it twice mem += higherWeightsCompletion.getFST().ramBytesUsed(); } @@ -331,8 +329,11 @@ public class FSTCompletionLookup extends Lookup implements Accountable { if (normalCompletion != null) { resources.add(Accountables.namedAccountable("fst", normalCompletion.getFST())); } - if (higherWeightsCompletion != null && (normalCompletion == null || normalCompletion.getFST() != higherWeightsCompletion.getFST())) { - resources.add(Accountables.namedAccountable("higher weights fst", higherWeightsCompletion.getFST())); + if (higherWeightsCompletion != null + && (normalCompletion == null + || normalCompletion.getFST() != higherWeightsCompletion.getFST())) { + resources.add( + Accountables.namedAccountable("higher weights fst", higherWeightsCompletion.getFST())); } return Collections.unmodifiableList(resources); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java index d621e24de97..1b3661bd334 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java @@ -23,7 +23,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Set; - import org.apache.lucene.search.suggest.InputIterator; import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.search.suggest.SortedInputIterator; @@ -40,41 +39,35 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.IntsRefBuilder; import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter; -import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.FST.Arc; import org.apache.lucene.util.fst.FST.BytesReader; +import org.apache.lucene.util.fst.FSTCompiler; import org.apache.lucene.util.fst.PositiveIntOutputs; import org.apache.lucene.util.fst.Util; import org.apache.lucene.util.fst.Util.Result; import org.apache.lucene.util.fst.Util.TopResults; /** - * Suggester based on a weighted FST: it first traverses the prefix, - * then walks the n shortest paths to retrieve top-ranked - * suggestions. - *

    - * NOTE: - * Input weights must be between 0 and {@link Integer#MAX_VALUE}, any - * other values will be rejected. - * + * Suggester based on a weighted FST: it first traverses the prefix, then walks the n + * shortest paths to retrieve top-ranked suggestions. + * + *

    NOTE: Input weights must be between 0 and {@link Integer#MAX_VALUE}, any other values + * will be rejected. + * * @lucene.experimental */ // redundant 'implements Accountable' to workaround javadocs bugs public class WFSTCompletionLookup extends Lookup implements Accountable { - - /** - * FST, weights are encoded as costs: (Integer.MAX_VALUE-weight) - */ + + /** FST, weights are encoded as costs: (Integer.MAX_VALUE-weight) */ // NOTE: like FSTSuggester, this is really a WFSA, if you want to // customize the code to add some output you should use PairOutputs. private FST fst = null; - - /** - * True if exact match suggestions should always be returned first. - */ + + /** True if exact match suggestions should always be returned first. */ private final boolean exactFirst; - + /** Number of entries the lookup was built with */ private long count = 0; @@ -82,26 +75,26 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { private final String tempFileNamePrefix; /** - * Calls {@link #WFSTCompletionLookup(Directory,String,boolean) WFSTCompletionLookup(null,null,true)} + * Calls {@link #WFSTCompletionLookup(Directory,String,boolean) + * WFSTCompletionLookup(null,null,true)} */ public WFSTCompletionLookup(Directory tempDir, String tempFileNamePrefix) { this(tempDir, tempFileNamePrefix, true); } - + /** * Creates a new suggester. - * - * @param exactFirst true if suggestions that match the - * prefix exactly should always be returned first, regardless - * of score. This has no performance impact, but could result - * in low-quality suggestions. + * + * @param exactFirst true if suggestions that match the prefix exactly should always + * be returned first, regardless of score. This has no performance impact, but could result in + * low-quality suggestions. */ public WFSTCompletionLookup(Directory tempDir, String tempFileNamePrefix, boolean exactFirst) { this.exactFirst = exactFirst; this.tempDir = tempDir; this.tempFileNamePrefix = tempFileNamePrefix; } - + @Override public void build(InputIterator iterator) throws IOException { if (iterator.hasPayloads()) { @@ -119,12 +112,12 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { FSTCompiler fstCompiler = new FSTCompiler<>(FST.INPUT_TYPE.BYTE1, outputs); while ((scratch = iter.next()) != null) { long cost = iter.weight(); - + if (previous == null) { previous = new BytesRefBuilder(); } else if (scratch.equals(previous.get())) { continue; // for duplicate suggestions, the best weight is actually - // added + // added } Util.toIntsRef(scratch, scratchInts); fstCompiler.add(scratchInts.get(), cost); @@ -134,7 +127,6 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { fst = fstCompiler.compile(); } - @Override public boolean store(DataOutput output) throws IOException { output.writeVLong(count); @@ -153,7 +145,8 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { } @Override - public List lookup(CharSequence key, Set contexts, boolean onlyMorePopular, int num) { + public List lookup( + CharSequence key, Set contexts, boolean onlyMorePopular, int num) { if (contexts != null) { throw new IllegalArgumentException("this suggester doesn't support contexts"); } @@ -171,22 +164,25 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { scratch.copyChars(key); int prefixLength = scratch.length(); Arc arc = new Arc<>(); - + // match the prefix portion exactly Long prefixOutput = null; try { prefixOutput = lookupPrefix(scratch.get(), arc); - } catch (IOException bogus) { throw new RuntimeException(bogus); } - + } catch (IOException bogus) { + throw new RuntimeException(bogus); + } + if (prefixOutput == null) { return Collections.emptyList(); } - + List results = new ArrayList<>(num); CharsRefBuilder spare = new CharsRefBuilder(); if (exactFirst && arc.isFinal()) { spare.copyUTF8Bytes(scratch.get()); - results.add(new LookupResult(spare.toString(), decodeWeight(prefixOutput + arc.nextFinalOutput()))); + results.add( + new LookupResult(spare.toString(), decodeWeight(prefixOutput + arc.nextFinalOutput()))); if (--num == 0) { return results; // that was quick } @@ -200,7 +196,7 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { } catch (IOException bogus) { throw new RuntimeException(bogus); } - + BytesRefBuilder suffix = new BytesRefBuilder(); for (Result completion : completions) { scratch.setLength(prefixLength); @@ -212,14 +208,14 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { } return results; } - - private Long lookupPrefix(BytesRef scratch, Arc arc) throws /*Bogus*/IOException { + + private Long lookupPrefix(BytesRef scratch, Arc arc) throws /*Bogus*/ IOException { assert 0 == fst.outputs.getNoOutput().longValue(); long output = 0; BytesReader bytesReader = fst.getBytesReader(); - + fst.getFirstArc(arc); - + byte[] bytes = scratch.bytes; int pos = scratch.offset; int end = pos + scratch.length; @@ -230,14 +226,11 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { output += arc.output().longValue(); } } - + return output; } - - /** - * Returns the weight associated with an input string, - * or null if it does not exist. - */ + + /** Returns the weight associated with an input string, or null if it does not exist. */ public Object get(CharSequence key) { if (fst == null) { return null; @@ -246,36 +239,47 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { Long result = null; try { result = lookupPrefix(new BytesRef(key), arc); - } catch (IOException bogus) { throw new RuntimeException(bogus); } + } catch (IOException bogus) { + throw new RuntimeException(bogus); + } if (result == null || !arc.isFinal()) { return null; } else { return Integer.valueOf(decodeWeight(result + arc.nextFinalOutput())); } } - + /** cost -> weight */ private static int decodeWeight(long encoded) { - return (int)(Integer.MAX_VALUE - encoded); + return (int) (Integer.MAX_VALUE - encoded); } - + /** weight -> cost */ private static int encodeWeight(long value) { if (value < 0 || value > Integer.MAX_VALUE) { throw new UnsupportedOperationException("cannot encode value: " + value); } - return Integer.MAX_VALUE - (int)value; + return Integer.MAX_VALUE - (int) value; } - + private static final class WFSTInputIterator extends SortedInputIterator { - WFSTInputIterator(Directory tempDir, String tempFileNamePrefix, InputIterator source) throws IOException { + WFSTInputIterator(Directory tempDir, String tempFileNamePrefix, InputIterator source) + throws IOException { super(tempDir, tempFileNamePrefix, source); assert source.hasPayloads() == false; } @Override - protected void encode(ByteSequencesWriter writer, ByteArrayDataOutput output, byte[] buffer, BytesRef spare, BytesRef payload, Set contexts, long weight) throws IOException { + protected void encode( + ByteSequencesWriter writer, + ByteArrayDataOutput output, + byte[] buffer, + BytesRef spare, + BytesRef payload, + Set contexts, + long weight) + throws IOException { if (spare.length + 4 >= buffer.length) { buffer = ArrayUtil.grow(buffer, spare.length + 4); } @@ -284,29 +288,30 @@ public class WFSTCompletionLookup extends Lookup implements Accountable { output.writeInt(encodeWeight(weight)); writer.write(buffer, 0, output.getPosition()); } - + @Override protected long decode(BytesRef scratch, ByteArrayDataInput tmpInput) { scratch.length -= 4; // int // skip suggestion: - tmpInput.reset(scratch.bytes, scratch.offset+scratch.length, 4); + tmpInput.reset(scratch.bytes, scratch.offset + scratch.length, 4); return tmpInput.readInt(); } } - - static final Comparator weightComparator = new Comparator () { - @Override - public int compare(Long left, Long right) { - return left.compareTo(right); - } - }; + + static final Comparator weightComparator = + new Comparator() { + @Override + public int compare(Long left, Long right) { + return left.compareTo(right); + } + }; /** Returns byte size of the underlying FST. */ @Override public long ramBytesUsed() { return (fst == null) ? 0 : fst.ramBytesUsed(); } - + @Override public Collection getChildResources() { if (fst == null) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/package-info.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/package-info.java index 24b0b4e63f4..2d7b7dbecbf 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/package-info.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/package-info.java @@ -14,8 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Finite-state based autosuggest. - */ + +/** Finite-state based autosuggest. */ package org.apache.lucene.search.suggest.fst; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java index 5dce144c201..1cf3666739d 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Set; - import org.apache.lucene.search.suggest.InputIterator; import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.search.suggest.jaspell.JaspellTernarySearchTrie.TSTNode; @@ -32,9 +31,8 @@ import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRefBuilder; /** - * Suggest implementation based on - * JaSpell. - * + * Suggest implementation based on JaSpell. + * * @see JaspellTernarySearchTrie * @deprecated Migrate to one of the newer suggesters which are much more RAM efficient. */ @@ -46,11 +44,12 @@ public class JaspellLookup extends Lookup implements Accountable { /** Number of entries the lookup was built with */ private long count = 0; - - /** - * Creates a new empty trie + + /** + * Creates a new empty trie + * * @see #build(InputIterator) - * */ + */ public JaspellLookup() {} @Override @@ -78,11 +77,10 @@ public class JaspellLookup extends Lookup implements Accountable { } } - /** - * Adds a new node if key already exists, - * otherwise replaces its value. - *

    - * This method always returns false. + /** + * Adds a new node if key already exists, otherwise replaces its value. + * + *

    This method always returns false. */ public boolean add(CharSequence key, Object value) { trie.put(key, value); @@ -90,16 +88,14 @@ public class JaspellLookup extends Lookup implements Accountable { return false; } - /** - * Returns the value for the specified key, or null - * if the key does not exist. - */ + /** Returns the value for the specified key, or null if the key does not exist. */ public Object get(CharSequence key) { return trie.get(key); } @Override - public List lookup(CharSequence key, Set contexts, boolean onlyMorePopular, int num) { + public List lookup( + CharSequence key, Set contexts, boolean onlyMorePopular, int num) { if (contexts != null) { throw new IllegalArgumentException("this suggester doesn't support contexts"); } @@ -113,13 +109,12 @@ public class JaspellLookup extends Lookup implements Accountable { } if (list == null || list.size() == 0) { return res; - } int maxCnt = Math.min(num, list.size()); if (onlyMorePopular) { LookupPriorityQueue queue = new LookupPriorityQueue(num); for (String s : list) { - long freq = ((Number)trie.get(s)).longValue(); + long freq = ((Number) trie.get(s)).longValue(); queue.insertWithOverflow(new LookupResult(new CharsRef(s), freq)); } for (LookupResult lr : queue.getResults()) { @@ -128,9 +123,9 @@ public class JaspellLookup extends Lookup implements Accountable { } else { for (int i = 0; i < maxCnt; i++) { String s = list.get(i); - long freq = ((Number)trie.get(s)).longValue(); + long freq = ((Number) trie.get(s)).longValue(); res.add(new LookupResult(new CharsRef(s), freq)); - } + } } return res; } @@ -139,7 +134,7 @@ public class JaspellLookup extends Lookup implements Accountable { private static final byte EQ_KID = 0x02; private static final byte HI_KID = 0x04; private static final byte HAS_VALUE = 0x08; - + private void readRecursively(DataInput in, TSTNode node) throws IOException { node.splitchar = in.readString().charAt(0); byte mask = in.readByte(); @@ -175,7 +170,7 @@ public class JaspellLookup extends Lookup implements Accountable { if (node.data != null) mask |= HAS_VALUE; out.writeByte(mask); if (node.data != null) { - out.writeLong(((Number)node.data).longValue()); + out.writeLong(((Number) node.data).longValue()); } writeRecursively(out, node.relatives[TSTNode.LOKID]); writeRecursively(out, node.relatives[TSTNode.EQKID]); @@ -206,7 +201,7 @@ public class JaspellLookup extends Lookup implements Accountable { public long ramBytesUsed() { return trie.ramBytesUsed(); } - + @Override public long getCount() { return count; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java index d60c6043ac1..14fe43ee2fc 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java @@ -2,10 +2,10 @@ * Copyright (c) 2005 Bruno Martins * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * 1. Redistributions of source code must retain the above copyright + * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the @@ -13,9 +13,9 @@ * 3. Neither the name of the organization nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR @@ -37,44 +37,34 @@ import java.util.List; import java.util.Locale; import java.util.Vector; import java.util.zip.GZIPInputStream; - import org.apache.lucene.util.Accountable; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.RamUsageEstimator; /** - * Implementation of a Ternary Search Trie, a data structure for storing - * String objects that combines the compact size of a binary search - * tree with the speed of a digital search trie, and is therefore ideal for - * practical use in sorting and searching data. - * - *

    - * This data structure is faster than hashing for many typical search problems, - * and supports a broader range of useful problems and operations. Ternary - * searches are faster than hashing and more powerful, too. - *

    - * - *

    - * The theory of ternary search trees was described at a symposium in 1997 (see - * "Fast Algorithms for Sorting and Searching Strings," by J.L. Bentley and R. - * Sedgewick, Proceedings of the 8th Annual ACM-SIAM Symposium on Discrete - * Algorithms, January 1997). Algorithms in C, Third Edition, by Robert - * Sedgewick (Addison-Wesley, 1998) provides yet another view of ternary search - * trees. - *

    + * Implementation of a Ternary Search Trie, a data structure for storing String objects + * that combines the compact size of a binary search tree with the speed of a digital search trie, + * and is therefore ideal for practical use in sorting and searching data. + * + *

    This data structure is faster than hashing for many typical search problems, and supports a + * broader range of useful problems and operations. Ternary searches are faster than hashing and + * more powerful, too. + * + *

    The theory of ternary search trees was described at a symposium in 1997 (see "Fast Algorithms + * for Sorting and Searching Strings," by J.L. Bentley and R. Sedgewick, Proceedings of the 8th + * Annual ACM-SIAM Symposium on Discrete Algorithms, January 1997). Algorithms in C, Third Edition, + * by Robert Sedgewick (Addison-Wesley, 1998) provides yet another view of ternary search trees. * * @deprecated Migrate to one of the newer suggesters which are much more RAM efficient. */ @Deprecated public class JaspellTernarySearchTrie implements Accountable { - /** - * An inner class of Ternary Search Trie that represents a node in the trie. - */ + /** An inner class of Ternary Search Trie that represents a node in the trie. */ protected static final class TSTNode implements Accountable { /** Index values for accessing relatives array. */ - protected final static int PARENT = 0, LOKID = 1, EQKID = 2, HIKID = 3; + protected static final int PARENT = 0, LOKID = 1, EQKID = 2, HIKID = 3; /** The key to the node. */ protected Object data; @@ -87,11 +77,9 @@ public class JaspellTernarySearchTrie implements Accountable { /** * Constructor method. - * - *@param splitchar - * The char used in the split. - *@param parent - * The parent node. + * + * @param splitchar The char used in the split. + * @param parent The parent node. */ protected TSTNode(char splitchar, TSTNode parent) { this.splitchar = splitchar; @@ -102,7 +90,7 @@ public class JaspellTernarySearchTrie implements Accountable { public long ramBytesUsed() { long mem = RamUsageEstimator.shallowSizeOf(this) + RamUsageEstimator.shallowSizeOf(relatives); // We don't need to add parent since our parent added itself: - for (int i=1;i<4;i++) { + for (int i = 1; i < 4; i++) { TSTNode node = relatives[i]; if (node != null) { mem += node.ramBytesUsed(); @@ -114,19 +102,16 @@ public class JaspellTernarySearchTrie implements Accountable { /** * Compares characters by alphabetical order. - * - *@param cCompare2 - * The first char in the comparison. - *@param cRef - * The second char in the comparison. - *@return A negative number, 0 or a positive number if the second char is - * less, equal or greater. + * + * @param cCompare2 The first char in the comparison. + * @param cRef The second char in the comparison. + * @return A negative number, 0 or a positive number if the second char is less, equal or greater. */ private static int compareCharsAlphabetically(char cCompare2, char cRef) { return Character.toLowerCase(cCompare2) - Character.toLowerCase(cRef); } - - /* what follows is the original Jaspell code. + + /* what follows is the original Jaspell code. private static int compareCharsAlphabetically(int cCompare2, int cRef) { int cCompare = 0; if (cCompare2 >= 65) { @@ -154,82 +139,65 @@ public class JaspellTernarySearchTrie implements Accountable { } */ - /** - * The default number of values returned by the matchAlmost - * method. - */ + /** The default number of values returned by the matchAlmost method. */ private int defaultNumReturnValues = -1; - /** - * the number of differences allowed in a call to the - * matchAlmostKey method. - */ + /** the number of differences allowed in a call to the matchAlmostKey method. */ private int matchAlmostDiff; /** The base node in the trie. */ private TSTNode rootNode; - + private final Locale locale; - /** - * Constructs an empty Ternary Search Trie. - */ + /** Constructs an empty Ternary Search Trie. */ public JaspellTernarySearchTrie() { this(Locale.ROOT); } - - /** - * Constructs an empty Ternary Search Trie, - * specifying the Locale used for lowercasing. - */ + + /** Constructs an empty Ternary Search Trie, specifying the Locale used for lowercasing. */ public JaspellTernarySearchTrie(Locale locale) { this.locale = locale; } - + // for loading void setRoot(TSTNode newRoot) { rootNode = newRoot; } - + // for saving TSTNode getRoot() { return rootNode; } /** - * Constructs a Ternary Search Trie and loads data from a Path - * into the Trie. The file is a normal text document, where each line is of - * the form word TAB float. - * - *@param file - * The Path with the data to load into the Trie. - *@exception IOException - * A problem occurred while reading the data. + * Constructs a Ternary Search Trie and loads data from a Path into the Trie. The + * file is a normal text document, where each line is of the form word TAB float. + * + * @param file The Path with the data to load into the Trie. + * @exception IOException A problem occurred while reading the data. */ public JaspellTernarySearchTrie(Path file) throws IOException { this(file, false); } /** - * Constructs a Ternary Search Trie and loads data from a File - * into the Trie. The file is a normal text document, where each line is of - * the form "word TAB float". - * - *@param file - * The File with the data to load into the Trie. - *@param compression - * If true, the file is compressed with the GZIP algorithm, and if - * false, the file is a normal text document. - *@exception IOException - * A problem occurred while reading the data. + * Constructs a Ternary Search Trie and loads data from a File into the Trie. The + * file is a normal text document, where each line is of the form "word TAB float". + * + * @param file The File with the data to load into the Trie. + * @param compression If true, the file is compressed with the GZIP algorithm, and if false, the + * file is a normal text document. + * @exception IOException A problem occurred while reading the data. */ - public JaspellTernarySearchTrie(Path file, boolean compression) - throws IOException { + public JaspellTernarySearchTrie(Path file, boolean compression) throws IOException { this(); BufferedReader in; if (compression) - in = new BufferedReader(IOUtils.getDecodingReader(new GZIPInputStream( - Files.newInputStream(file)), StandardCharsets.UTF_8)); + in = + new BufferedReader( + IOUtils.getDecodingReader( + new GZIPInputStream(Files.newInputStream(file)), StandardCharsets.UTF_8)); else in = Files.newBufferedReader(file, StandardCharsets.UTF_8); try { String word; @@ -252,8 +220,7 @@ public class JaspellTernarySearchTrie implements Accountable { int charIndex = 0; while (true) { if (currentNode == null) break; - int charComp = compareCharsAlphabetically(key.charAt(charIndex), - currentNode.splitchar); + int charComp = compareCharsAlphabetically(key.charAt(charIndex), currentNode.splitchar); if (charComp == 0) { charIndex++; if (charIndex == key.length()) { @@ -282,12 +249,11 @@ public class JaspellTernarySearchTrie implements Accountable { } /** - * Deletes the node passed in as an argument. If this node has non-null data, - * then both the node and the data will be deleted. It also deletes any other - * nodes in the trie that are no longer needed after the deletion of the node. - * - *@param nodeToDelete - * The node to delete. + * Deletes the node passed in as an argument. If this node has non-null data, then both the node + * and the data will be deleted. It also deletes any other nodes in the trie that are no longer + * needed after the deletion of the node. + * + * @param nodeToDelete The node to delete. */ private void deleteNode(TSTNode nodeToDelete) { if (nodeToDelete == null) { @@ -302,28 +268,24 @@ public class JaspellTernarySearchTrie implements Accountable { /** * Recursively visits each node to be deleted. - * - * To delete a node, first set its data to null, then pass it into this - * method, then pass the node returned by this method into this method (make - * sure you don't delete the data of any of the nodes returned from this - * method!) and continue in this fashion until the node returned by this - * method is null. - * - * The TSTNode instance returned by this method will be next node to be - * operated on by deleteNodeRecursion (This emulates recursive - * method call while avoiding the JVM overhead normally associated with a - * recursive method.) - * - *@param currentNode - * The node to delete. - *@return The next node to be called in deleteNodeRecursion. + * + *

    To delete a node, first set its data to null, then pass it into this method, then pass the + * node returned by this method into this method (make sure you don't delete the data of any of + * the nodes returned from this method!) and continue in this fashion until the node returned by + * this method is null. + * + *

    The TSTNode instance returned by this method will be next node to be operated on by + * deleteNodeRecursion (This emulates recursive method call while avoiding the JVM overhead + * normally associated with a recursive method.) + * + * @param currentNode The node to delete. + * @return The next node to be called in deleteNodeRecursion. */ private TSTNode deleteNodeRecursion(TSTNode currentNode) { if (currentNode == null) { return null; } - if (currentNode.relatives[TSTNode.EQKID] != null - || currentNode.data != null) { + if (currentNode.relatives[TSTNode.EQKID] != null || currentNode.data != null) { return null; } // can't delete this node if it has a non-null eq kid or data @@ -355,10 +317,8 @@ public class JaspellTernarySearchTrie implements Accountable { currentNode.relatives[TSTNode.LOKID].relatives[TSTNode.PARENT] = currentParent; return currentParent; } - int deltaHi = currentNode.relatives[TSTNode.HIKID].splitchar - - currentNode.splitchar; - int deltaLo = currentNode.splitchar - - currentNode.relatives[TSTNode.LOKID].splitchar; + int deltaHi = currentNode.relatives[TSTNode.HIKID].splitchar - currentNode.splitchar; + int deltaLo = currentNode.splitchar - currentNode.relatives[TSTNode.LOKID].splitchar; int movingKid; TSTNode targetNode; if (deltaHi == deltaLo) { @@ -392,10 +352,9 @@ public class JaspellTernarySearchTrie implements Accountable { /** * Retrieve the object indexed by a key. - * - *@param key - * A String index. - *@return The object retrieved from the Ternary Search Trie. + * + * @param key A String index. + * @return The object retrieved from the Ternary Search Trie. */ public Object get(CharSequence key) { TSTNode node = getNode(key); @@ -406,12 +365,11 @@ public class JaspellTernarySearchTrie implements Accountable { } /** - * Retrieve the Float indexed by key, increment it by one unit - * and store the new Float. - * - *@param key - * A String index. - *@return The Float retrieved from the Ternary Search Trie. + * Retrieve the Float indexed by key, increment it by one unit and store the new + * Float. + * + * @param key A String index. + * @return The Float retrieved from the Ternary Search Trie. */ public Float getAndIncrement(String key) { String key2 = key.trim().toLowerCase(locale); @@ -431,10 +389,9 @@ public class JaspellTernarySearchTrie implements Accountable { /** * Returns the key that indexes the node argument. - * - *@param node - * The node whose index is to be calculated. - *@return The String that indexes the node argument. + * + * @param node The node whose index is to be calculated. + * @return The String that indexes the node argument. */ protected String getKey(TSTNode node) { StringBuilder getKeyBuffer = new StringBuilder(); @@ -456,28 +413,25 @@ public class JaspellTernarySearchTrie implements Accountable { } /** - * Returns the node indexed by key, or null if that node doesn't - * exist. Search begins at root node. - * - *@param key - * A String that indexes the node that is returned. - *@return The node object indexed by key. This object is an instance of an - * inner class named TernarySearchTrie.TSTNode. + * Returns the node indexed by key, or null if that node doesn't exist. Search begins + * at root node. + * + * @param key A String that indexes the node that is returned. + * @return The node object indexed by key. This object is an instance of an inner class named + * TernarySearchTrie.TSTNode. */ public TSTNode getNode(CharSequence key) { return getNode(key, rootNode); } /** - * Returns the node indexed by key, or null if that node doesn't - * exist. The search begins at root node. - * - *@param key - * A String that indexes the node that is returned. - *@param startNode - * The top node defining the subtrie to be searched. - *@return The node object indexed by key. This object is an instance of an - * inner class named TernarySearchTrie.TSTNode. + * Returns the node indexed by key, or null if that node doesn't exist. The search + * begins at root node. + * + * @param key A String that indexes the node that is returned. + * @param startNode The top node defining the subtrie to be searched. + * @return The node object indexed by key. This object is an instance of an inner class named + * TernarySearchTrie.TSTNode. */ protected TSTNode getNode(CharSequence key, TSTNode startNode) { if (key == null || startNode == null || key.length() == 0) { @@ -489,8 +443,7 @@ public class JaspellTernarySearchTrie implements Accountable { if (currentNode == null) { return null; } - int charComp = compareCharsAlphabetically(key.charAt(charIndex), - currentNode.splitchar); + int charComp = compareCharsAlphabetically(key.charAt(charIndex), currentNode.splitchar); if (charComp == 0) { charIndex++; if (charIndex == key.length()) { @@ -506,27 +459,22 @@ public class JaspellTernarySearchTrie implements Accountable { } /** - * Returns the node indexed by key, creating that node if it doesn't exist, - * and creating any required intermediate nodes if they don't exist. - * - *@param key - * A String that indexes the node that is returned. - *@return The node object indexed by key. This object is an instance of an - * inner class named TernarySearchTrie.TSTNode. - *@exception NullPointerException - * If the key is null. - *@exception IllegalArgumentException - * If the key is an empty String. + * Returns the node indexed by key, creating that node if it doesn't exist, and creating any + * required intermediate nodes if they don't exist. + * + * @param key A String that indexes the node that is returned. + * @return The node object indexed by key. This object is an instance of an inner class named + * TernarySearchTrie.TSTNode. + * @exception NullPointerException If the key is null. + * @exception IllegalArgumentException If the key is an empty String. */ - protected TSTNode getOrCreateNode(CharSequence key) throws NullPointerException, - IllegalArgumentException { + protected TSTNode getOrCreateNode(CharSequence key) + throws NullPointerException, IllegalArgumentException { if (key == null) { - throw new NullPointerException( - "attempt to get or create node with null key"); + throw new NullPointerException("attempt to get or create node with null key"); } if (key.length() == 0) { - throw new IllegalArgumentException( - "attempt to get or create node with key of zero length"); + throw new IllegalArgumentException("attempt to get or create node with key of zero length"); } if (rootNode == null) { rootNode = new TSTNode(key.charAt(0), null); @@ -534,28 +482,24 @@ public class JaspellTernarySearchTrie implements Accountable { TSTNode currentNode = rootNode; int charIndex = 0; while (true) { - int charComp = compareCharsAlphabetically(key.charAt(charIndex), - currentNode.splitchar); + int charComp = compareCharsAlphabetically(key.charAt(charIndex), currentNode.splitchar); if (charComp == 0) { charIndex++; if (charIndex == key.length()) { return currentNode; } if (currentNode.relatives[TSTNode.EQKID] == null) { - currentNode.relatives[TSTNode.EQKID] = new TSTNode(key - .charAt(charIndex), currentNode); + currentNode.relatives[TSTNode.EQKID] = new TSTNode(key.charAt(charIndex), currentNode); } currentNode = currentNode.relatives[TSTNode.EQKID]; } else if (charComp < 0) { if (currentNode.relatives[TSTNode.LOKID] == null) { - currentNode.relatives[TSTNode.LOKID] = new TSTNode(key - .charAt(charIndex), currentNode); + currentNode.relatives[TSTNode.LOKID] = new TSTNode(key.charAt(charIndex), currentNode); } currentNode = currentNode.relatives[TSTNode.LOKID]; } else { if (currentNode.relatives[TSTNode.HIKID] == null) { - currentNode.relatives[TSTNode.HIKID] = new TSTNode(key - .charAt(charIndex), currentNode); + currentNode.relatives[TSTNode.HIKID] = new TSTNode(key.charAt(charIndex), currentNode); } currentNode = currentNode.relatives[TSTNode.HIKID]; } @@ -563,128 +507,133 @@ public class JaspellTernarySearchTrie implements Accountable { } /** - * Returns a List of keys that almost match the argument key. - * Keys returned will have exactly diff characters that do not match the - * target key, where diff is equal to the last value passed in as an argument - * to the setMatchAlmostDiff method. - *

    - * If the matchAlmost method is called before the - * setMatchAlmostDiff method has been called for the first time, - * then diff = 0. - * - *@param key - * The target key. - *@return A List with the results. + * Returns a List of keys that almost match the argument key. Keys returned will have + * exactly diff characters that do not match the target key, where diff is equal to the last value + * passed in as an argument to the setMatchAlmostDiff method. + * + *

    If the matchAlmost method is called before the setMatchAlmostDiff + * method has been called for the first time, then diff = 0. + * + * @param key The target key. + * @return A List with the results. */ public List matchAlmost(String key) { return matchAlmost(key, defaultNumReturnValues); } /** - * Returns a List of keys that almost match the argument key. - * Keys returned will have exactly diff characters that do not match the - * target key, where diff is equal to the last value passed in as an argument - * to the setMatchAlmostDiff method. - *

    - * If the matchAlmost method is called before the - * setMatchAlmostDiff method has been called for the first time, - * then diff = 0. - * - *@param key - * The target key. - *@param numReturnValues - * The maximum number of values returned by this method. - *@return A List with the results + * Returns a List of keys that almost match the argument key. Keys returned will have + * exactly diff characters that do not match the target key, where diff is equal to the last value + * passed in as an argument to the setMatchAlmostDiff method. + * + *

    If the matchAlmost method is called before the setMatchAlmostDiff + * method has been called for the first time, then diff = 0. + * + * @param key The target key. + * @param numReturnValues The maximum number of values returned by this method. + * @return A List with the results */ public List matchAlmost(CharSequence key, int numReturnValues) { - return matchAlmostRecursion(rootNode, 0, matchAlmostDiff, key, - ((numReturnValues < 0) ? -1 : numReturnValues), new Vector(), false); + return matchAlmostRecursion( + rootNode, + 0, + matchAlmostDiff, + key, + ((numReturnValues < 0) ? -1 : numReturnValues), + new Vector(), + false); } /** - * Recursivelly vists the nodes in order to find the ones that almost match a - * given key. - * - *@param currentNode - * The current node. - *@param charIndex - * The current char. - *@param d - * The number of differences so far. - *@param matchAlmostNumReturnValues - * The maximum number of values in the result List. - *@param matchAlmostResult2 - * The results so far. - *@param upTo - * If true all keys having up to and including matchAlmostDiff - * mismatched letters will be included in the result (including a key - * that is exactly the same as the target string) otherwise keys will - * be included in the result only if they have exactly - * matchAlmostDiff number of mismatched letters. - *@param matchAlmostKey - * The key being searched. - *@return A List with the results. + * Recursivelly vists the nodes in order to find the ones that almost match a given key. + * + * @param currentNode The current node. + * @param charIndex The current char. + * @param d The number of differences so far. + * @param matchAlmostNumReturnValues The maximum number of values in the result List. + * @param matchAlmostResult2 The results so far. + * @param upTo If true all keys having up to and including matchAlmostDiff mismatched letters will + * be included in the result (including a key that is exactly the same as the target string) + * otherwise keys will be included in the result only if they have exactly matchAlmostDiff + * number of mismatched letters. + * @param matchAlmostKey The key being searched. + * @return A List with the results. */ - private List matchAlmostRecursion(TSTNode currentNode, int charIndex, - int d, CharSequence matchAlmostKey, int matchAlmostNumReturnValues, - List matchAlmostResult2, boolean upTo) { + private List matchAlmostRecursion( + TSTNode currentNode, + int charIndex, + int d, + CharSequence matchAlmostKey, + int matchAlmostNumReturnValues, + List matchAlmostResult2, + boolean upTo) { if ((currentNode == null) - || (matchAlmostNumReturnValues != -1 && matchAlmostResult2.size() >= matchAlmostNumReturnValues) - || (d < 0) || (charIndex >= matchAlmostKey.length())) { + || (matchAlmostNumReturnValues != -1 + && matchAlmostResult2.size() >= matchAlmostNumReturnValues) + || (d < 0) + || (charIndex >= matchAlmostKey.length())) { return matchAlmostResult2; } - int charComp = compareCharsAlphabetically(matchAlmostKey.charAt(charIndex), - currentNode.splitchar); + int charComp = + compareCharsAlphabetically(matchAlmostKey.charAt(charIndex), currentNode.splitchar); List matchAlmostResult = matchAlmostResult2; if ((d > 0) || (charComp < 0)) { - matchAlmostResult = matchAlmostRecursion( - currentNode.relatives[TSTNode.LOKID], charIndex, d, - matchAlmostKey, matchAlmostNumReturnValues, matchAlmostResult, + matchAlmostResult = + matchAlmostRecursion( + currentNode.relatives[TSTNode.LOKID], + charIndex, + d, + matchAlmostKey, + matchAlmostNumReturnValues, + matchAlmostResult, upTo); } int nextD = (charComp == 0) ? d : d - 1; boolean cond = (upTo) ? (nextD >= 0) : (nextD == 0); - if ((matchAlmostKey.length() == charIndex + 1) && cond - && (currentNode.data != null)) { + if ((matchAlmostKey.length() == charIndex + 1) && cond && (currentNode.data != null)) { matchAlmostResult.add(getKey(currentNode)); } - matchAlmostResult = matchAlmostRecursion( - currentNode.relatives[TSTNode.EQKID], charIndex + 1, nextD, - matchAlmostKey, matchAlmostNumReturnValues, matchAlmostResult, upTo); + matchAlmostResult = + matchAlmostRecursion( + currentNode.relatives[TSTNode.EQKID], + charIndex + 1, + nextD, + matchAlmostKey, + matchAlmostNumReturnValues, + matchAlmostResult, + upTo); if ((d > 0) || (charComp > 0)) { - matchAlmostResult = matchAlmostRecursion( - currentNode.relatives[TSTNode.HIKID], charIndex, d, - matchAlmostKey, matchAlmostNumReturnValues, matchAlmostResult, + matchAlmostResult = + matchAlmostRecursion( + currentNode.relatives[TSTNode.HIKID], + charIndex, + d, + matchAlmostKey, + matchAlmostNumReturnValues, + matchAlmostResult, upTo); } return matchAlmostResult; } /** - * Returns an alphabetical List of all keys in the trie that - * begin with a given prefix. Only keys for nodes having non-null data are - * included in the List. - * - *@param prefix - * Each key returned from this method will begin with the characters - * in prefix. - *@return A List with the results. + * Returns an alphabetical List of all keys in the trie that begin with a given + * prefix. Only keys for nodes having non-null data are included in the List. + * + * @param prefix Each key returned from this method will begin with the characters in prefix. + * @return A List with the results. */ public List matchPrefix(String prefix) { return matchPrefix(prefix, defaultNumReturnValues); } /** - * Returns an alphabetical List of all keys in the trie that - * begin with a given prefix. Only keys for nodes having non-null data are - * included in the List. - * - *@param prefix - * Each key returned from this method will begin with the characters - * in prefix. - *@param numReturnValues - * The maximum number of values returned from this method. - *@return A List with the results + * Returns an alphabetical List of all keys in the trie that begin with a given + * prefix. Only keys for nodes having non-null data are included in the List. + * + * @param prefix Each key returned from this method will begin with the characters in prefix. + * @param numReturnValues The maximum number of values returned from this method. + * @return A List with the results */ public List matchPrefix(CharSequence prefix, int numReturnValues) { Vector sortKeysResult = new Vector<>(); @@ -695,48 +644,48 @@ public class JaspellTernarySearchTrie implements Accountable { if (startNode.data != null) { sortKeysResult.addElement(getKey(startNode)); } - return sortKeysRecursion(startNode.relatives[TSTNode.EQKID], - ((numReturnValues < 0) ? -1 : numReturnValues), sortKeysResult); + return sortKeysRecursion( + startNode.relatives[TSTNode.EQKID], + ((numReturnValues < 0) ? -1 : numReturnValues), + sortKeysResult); } /** * Returns the number of nodes in the trie that have non-null data. - * - *@return The number of nodes in the trie that have non-null data. + * + * @return The number of nodes in the trie that have non-null data. */ public int numDataNodes() { return numDataNodes(rootNode); } /** - * Returns the number of nodes in the subtrie below and including the starting - * node. The method counts only nodes that have non-null data. - * - *@param startingNode - * The top node of the subtrie. the node that defines the subtrie. - *@return The total number of nodes in the subtrie. + * Returns the number of nodes in the subtrie below and including the starting node. The method + * counts only nodes that have non-null data. + * + * @param startingNode The top node of the subtrie. the node that defines the subtrie. + * @return The total number of nodes in the subtrie. */ protected int numDataNodes(TSTNode startingNode) { return recursiveNodeCalculator(startingNode, true, 0); } /** - * Returns the total number of nodes in the trie. The method counts nodes - * whether or not they have data. - * - *@return The total number of nodes in the trie. + * Returns the total number of nodes in the trie. The method counts nodes whether or not they have + * data. + * + * @return The total number of nodes in the trie. */ public int numNodes() { return numNodes(rootNode); } /** - * Returns the total number of nodes in the subtrie below and including the - * starting Node. The method counts nodes whether or not they have data. - * - *@param startingNode - * The top node of the subtrie. The node that defines the subtrie. - *@return The total number of nodes in the subtrie. + * Returns the total number of nodes in the subtrie below and including the starting Node. The + * method counts nodes whether or not they have data. + * + * @param startingNode The top node of the subtrie. The node that defines the subtrie. + * @return The total number of nodes in the subtrie. */ protected int numNodes(TSTNode startingNode) { return recursiveNodeCalculator(startingNode, false, 0); @@ -744,11 +693,9 @@ public class JaspellTernarySearchTrie implements Accountable { /** * Stores a value in the trie. The value may be retrieved using the key. - * - *@param key - * A String that indexes the object to be stored. - *@param value - * The object to be stored in the Trie. + * + * @param key A String that indexes the object to be stored. + * @param value The object to be stored in the Trie. */ public void put(CharSequence key, Object value) { getOrCreateNode(key).data = value; @@ -756,26 +703,20 @@ public class JaspellTernarySearchTrie implements Accountable { /** * Recursivelly visists each node to calculate the number of nodes. - * - *@param currentNode - * The current node. - *@param checkData - * If true we check the data to be different of null. - *@param numNodes2 - * The number of nodes so far. - *@return The number of nodes accounted. + * + * @param currentNode The current node. + * @param checkData If true we check the data to be different of null. + * @param numNodes2 The number of nodes so far. + * @return The number of nodes accounted. */ - private int recursiveNodeCalculator(TSTNode currentNode, boolean checkData, - int numNodes2) { + private int recursiveNodeCalculator(TSTNode currentNode, boolean checkData, int numNodes2) { if (currentNode == null) { return numNodes2; } - int numNodes = recursiveNodeCalculator( - currentNode.relatives[TSTNode.LOKID], checkData, numNodes2); - numNodes = recursiveNodeCalculator(currentNode.relatives[TSTNode.EQKID], - checkData, numNodes); - numNodes = recursiveNodeCalculator(currentNode.relatives[TSTNode.HIKID], - checkData, numNodes); + int numNodes = + recursiveNodeCalculator(currentNode.relatives[TSTNode.LOKID], checkData, numNodes2); + numNodes = recursiveNodeCalculator(currentNode.relatives[TSTNode.EQKID], checkData, numNodes); + numNodes = recursiveNodeCalculator(currentNode.relatives[TSTNode.HIKID], checkData, numNodes); if (checkData) { if (currentNode.data != null) { numNodes++; @@ -787,27 +728,23 @@ public class JaspellTernarySearchTrie implements Accountable { } /** - * Removes the value indexed by key. Also removes all nodes that are rendered - * unnecessary by the removal of this data. - * - *@param key - * A string that indexes the object to be removed from - * the Trie. + * Removes the value indexed by key. Also removes all nodes that are rendered unnecessary by the + * removal of this data. + * + * @param key A string that indexes the object to be removed from the Trie. */ public void remove(String key) { deleteNode(getNode(key.trim().toLowerCase(locale))); } /** - * Sets the number of characters by which words can differ from target word - * when calling the matchAlmost method. - *

    - * Arguments less than 0 will set the char difference to 0, and arguments - * greater than 3 will set the char difference to 3. - * - *@param diff - * The number of characters by which words can differ from target - * word. + * Sets the number of characters by which words can differ from target word when calling the + * matchAlmost method. + * + *

    Arguments less than 0 will set the char difference to 0, and arguments greater than 3 will + * set the char difference to 3. + * + * @param diff The number of characters by which words can differ from target word. */ public void setMatchAlmostDiff(int diff) { if (diff < 0) { @@ -820,75 +757,66 @@ public class JaspellTernarySearchTrie implements Accountable { } /** - * Sets the default maximum number of values returned from the - * matchPrefix and matchAlmost methods. - *

    - * The value should be set this to -1 to get an unlimited number of return - * values. note that the methods mentioned above provide overloaded versions - * that allow you to specify the maximum number of return values, in which - * case this value is temporarily overridden. - * - **@param num - * The number of values that will be returned when calling the - * methods above. + * Sets the default maximum number of values returned from the matchPrefix and + * matchAlmost methods. + * + *

    The value should be set this to -1 to get an unlimited number of return values. note that + * the methods mentioned above provide overloaded versions that allow you to specify the maximum + * number of return values, in which case this value is temporarily overridden. + * + *

    *@param num The number of values that will be returned when calling the methods above. */ public void setNumReturnValues(int num) { defaultNumReturnValues = (num < 0) ? -1 : num; } /** - * Returns keys sorted in alphabetical order. This includes the start Node and - * all nodes connected to the start Node. - *

    - * The number of keys returned is limited to numReturnValues. To get a list - * that isn't limited in size, set numReturnValues to -1. - * - *@param startNode - * The top node defining the subtrie to be searched. - *@param numReturnValues - * The maximum number of values returned from this method. - *@return A List with the results. + * Returns keys sorted in alphabetical order. This includes the start Node and all nodes connected + * to the start Node. + * + *

    The number of keys returned is limited to numReturnValues. To get a list that isn't limited + * in size, set numReturnValues to -1. + * + * @param startNode The top node defining the subtrie to be searched. + * @param numReturnValues The maximum number of values returned from this method. + * @return A List with the results. */ protected List sortKeys(TSTNode startNode, int numReturnValues) { - return sortKeysRecursion(startNode, ((numReturnValues < 0) ? -1 - : numReturnValues), new Vector()); + return sortKeysRecursion( + startNode, ((numReturnValues < 0) ? -1 : numReturnValues), new Vector()); } /** - * Returns keys sorted in alphabetical order. This includes the current Node - * and all nodes connected to the current Node. - *

    - * Sorted keys will be appended to the end of the resulting List. - * The result may be empty when this method is invoked, but may not be - * null. - * - *@param currentNode - * The current node. - *@param sortKeysNumReturnValues - * The maximum number of values in the result. - *@param sortKeysResult2 - * The results so far. - *@return A List with the results. + * Returns keys sorted in alphabetical order. This includes the current Node and all nodes + * connected to the current Node. + * + *

    Sorted keys will be appended to the end of the resulting List. The result may + * be empty when this method is invoked, but may not be null. + * + * @param currentNode The current node. + * @param sortKeysNumReturnValues The maximum number of values in the result. + * @param sortKeysResult2 The results so far. + * @return A List with the results. */ - private List sortKeysRecursion(TSTNode currentNode, - int sortKeysNumReturnValues, List sortKeysResult2) { + private List sortKeysRecursion( + TSTNode currentNode, int sortKeysNumReturnValues, List sortKeysResult2) { if (currentNode == null) { return sortKeysResult2; } - List sortKeysResult = sortKeysRecursion( - currentNode.relatives[TSTNode.LOKID], sortKeysNumReturnValues, - sortKeysResult2); - if (sortKeysNumReturnValues != -1 - && sortKeysResult.size() >= sortKeysNumReturnValues) { + List sortKeysResult = + sortKeysRecursion( + currentNode.relatives[TSTNode.LOKID], sortKeysNumReturnValues, sortKeysResult2); + if (sortKeysNumReturnValues != -1 && sortKeysResult.size() >= sortKeysNumReturnValues) { return sortKeysResult; } if (currentNode.data != null) { sortKeysResult.add(getKey(currentNode)); } - sortKeysResult = sortKeysRecursion(currentNode.relatives[TSTNode.EQKID], - sortKeysNumReturnValues, sortKeysResult); - return sortKeysRecursion(currentNode.relatives[TSTNode.HIKID], - sortKeysNumReturnValues, sortKeysResult); + sortKeysResult = + sortKeysRecursion( + currentNode.relatives[TSTNode.EQKID], sortKeysNumReturnValues, sortKeysResult); + return sortKeysRecursion( + currentNode.relatives[TSTNode.HIKID], sortKeysNumReturnValues, sortKeysResult); } @Override diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/package-info.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/package-info.java index bd074afcc10..e4855cec909 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/package-info.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/package-info.java @@ -14,8 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * JaSpell-based autosuggest. - */ -package org.apache.lucene.search.suggest.jaspell; \ No newline at end of file + +/** JaSpell-based autosuggest. */ +package org.apache.lucene.search.suggest.jaspell; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/package-info.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/package-info.java index a80b317201c..29bbe6397dd 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/package-info.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Support for Autocomplete/Autosuggest - */ +/** Support for Autocomplete/Autosuggest */ package org.apache.lucene.search.suggest; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTAutocomplete.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTAutocomplete.java index 4af7aef4c81..6aa7b5f77c2 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTAutocomplete.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTAutocomplete.java @@ -20,29 +20,23 @@ import java.util.*; /** * Ternary Search Trie implementation. - * + * * @see TernaryTreeNode */ public class TSTAutocomplete { - + TSTAutocomplete() {} /** - * Inserting keys in TST in the order middle,small,big (lexicographic measure) - * recursively creates a balanced tree which reduces insertion and search - * times significantly. - * - * @param tokens - * Sorted list of keys to be inserted in TST. - * @param lo - * stores the lower index of current list. - * @param hi - * stores the higher index of current list. - * @param root - * a reference object to root of TST. + * Inserting keys in TST in the order middle,small,big (lexicographic measure) recursively creates + * a balanced tree which reduces insertion and search times significantly. + * + * @param tokens Sorted list of keys to be inserted in TST. + * @param lo stores the lower index of current list. + * @param hi stores the higher index of current list. + * @param root a reference object to root of TST. */ - public void balancedTree(Object[] tokens, Object[] vals, int lo, int hi, - TernaryTreeNode root) { + public void balancedTree(Object[] tokens, Object[] vals, int lo, int hi, TernaryTreeNode root) { if (lo > hi) return; int mid = (lo + hi) / 2; root = insert(root, (String) tokens[mid], vals[mid], 0); @@ -51,20 +45,15 @@ public class TSTAutocomplete { } /** - * Inserts a key in TST creating a series of Binary Search Trees at each node. - * The key is actually stored across the eqKid of each node in a successive - * manner. - * - * @param currentNode - * a reference node where the insertion will take currently. - * @param s - * key to be inserted in TST. - * @param x - * index of character in key to be inserted currently. + * Inserts a key in TST creating a series of Binary Search Trees at each node. The key is actually + * stored across the eqKid of each node in a successive manner. + * + * @param currentNode a reference node where the insertion will take currently. + * @param s key to be inserted in TST. + * @param x index of character in key to be inserted currently. * @return currentNode The new reference to root node of TST */ - public TernaryTreeNode insert(TernaryTreeNode currentNode, CharSequence s, - Object val, int x) { + public TernaryTreeNode insert(TernaryTreeNode currentNode, CharSequence s, Object val, int x) { if (s == null || s.length() <= x) { return currentNode; } @@ -96,21 +85,15 @@ public class TSTAutocomplete { } /** - * Auto-completes a given prefix query using Depth-First Search with the end - * of prefix as source node each time finding a new leaf to get a complete key - * to be added in the suggest list. - * - * @param root - * a reference to root node of TST. - * @param s - * prefix query to be auto-completed. - * @param x - * index of current character to be searched while traversing through - * the prefix in TST. + * Auto-completes a given prefix query using Depth-First Search with the end of prefix as source + * node each time finding a new leaf to get a complete key to be added in the suggest list. + * + * @param root a reference to root node of TST. + * @param s prefix query to be auto-completed. + * @param x index of current character to be searched while traversing through the prefix in TST. * @return suggest list of auto-completed keys for the given prefix query. */ - public ArrayList prefixCompletion(TernaryTreeNode root, - CharSequence s, int x) { + public ArrayList prefixCompletion(TernaryTreeNode root, CharSequence s, int x) { TernaryTreeNode p = root; ArrayList suggest = new ArrayList<>(); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTLookup.java index d0b92476e9f..c57ddb25c57 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTLookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTLookup.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.Set; - import org.apache.lucene.search.suggest.InputIterator; import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.search.suggest.SortedInputIterator; @@ -33,9 +32,9 @@ import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.RamUsageEstimator; /** - * Suggest implementation based on a - * Ternary Search Tree - * + * Suggest implementation based on a Ternary Search Tree + * * @see TSTAutocomplete */ public class TSTLookup extends Lookup { @@ -47,66 +46,73 @@ public class TSTLookup extends Lookup { private final Directory tempDir; private final String tempFileNamePrefix; - - /** + + /** * Creates a new TSTLookup with an empty Ternary Search Tree. + * * @see #build(InputIterator) */ public TSTLookup() { this(null, null); } - /** + /** * Creates a new TSTLookup, for building. + * * @see #build(InputIterator) */ public TSTLookup(Directory tempDir, String tempFileNamePrefix) { this.tempDir = tempDir; this.tempFileNamePrefix = tempFileNamePrefix; } - + // TODO: Review if this comparator is really needed for TST to work correctly!!! /** TST uses UTF-16 sorting, so we need a suitable BytesRef comparator to do this. */ - private final static Comparator utf8SortedAsUTF16SortOrder = (a, b) -> { - final byte[] aBytes = a.bytes; - int aUpto = a.offset; - final byte[] bBytes = b.bytes; - int bUpto = b.offset; - - final int aStop = aUpto + Math.min(a.length, b.length); + private static final Comparator utf8SortedAsUTF16SortOrder = + (a, b) -> { + final byte[] aBytes = a.bytes; + int aUpto = a.offset; + final byte[] bBytes = b.bytes; + int bUpto = b.offset; - while(aUpto < aStop) { - int aByte = aBytes[aUpto++] & 0xff; - int bByte = bBytes[bUpto++] & 0xff; + final int aStop = aUpto + Math.min(a.length, b.length); - if (aByte != bByte) { + while (aUpto < aStop) { + int aByte = aBytes[aUpto++] & 0xff; + int bByte = bBytes[bUpto++] & 0xff; - // See http://icu-project.org/docs/papers/utf16_code_point_order.html#utf-8-in-utf-16-order + if (aByte != bByte) { - // We know the terms are not equal, but, we may - // have to carefully fixup the bytes at the - // difference to match UTF16's sort order: - - // NOTE: instead of moving supplementary code points (0xee and 0xef) to the unused 0xfe and 0xff, - // we move them to the unused 0xfc and 0xfd [reserved for future 6-byte character sequences] - // this reserves 0xff for preflex's term reordering (surrogate dance), and if unicode grows such - // that 6-byte sequences are needed we have much bigger problems anyway. - if (aByte >= 0xee && bByte >= 0xee) { - if ((aByte & 0xfe) == 0xee) { - aByte += 0xe; - } - if ((bByte&0xfe) == 0xee) { - bByte += 0xe; + // See + // http://icu-project.org/docs/papers/utf16_code_point_order.html#utf-8-in-utf-16-order + + // We know the terms are not equal, but, we may + // have to carefully fixup the bytes at the + // difference to match UTF16's sort order: + + // NOTE: instead of moving supplementary code points (0xee and 0xef) to the unused 0xfe + // and 0xff, + // we move them to the unused 0xfc and 0xfd [reserved for future 6-byte character + // sequences] + // this reserves 0xff for preflex's term reordering (surrogate dance), and if unicode + // grows such + // that 6-byte sequences are needed we have much bigger problems anyway. + if (aByte >= 0xee && bByte >= 0xee) { + if ((aByte & 0xfe) == 0xee) { + aByte += 0xe; + } + if ((bByte & 0xfe) == 0xee) { + bByte += 0xe; + } + } + return aByte - bByte; } } - return aByte - bByte; - } - } - // One is a prefix of the other, or, they are equal: - return a.length - b.length; - }; + // One is a prefix of the other, or, they are equal: + return a.length - b.length; + }; @Override public void build(InputIterator iterator) throws IOException { @@ -119,7 +125,8 @@ public class TSTLookup extends Lookup { root = new TernaryTreeNode(); // make sure it's sorted and the comparator uses UTF16 sort order - iterator = new SortedInputIterator(tempDir, tempFileNamePrefix, iterator, utf8SortedAsUTF16SortOrder); + iterator = + new SortedInputIterator(tempDir, tempFileNamePrefix, iterator, utf8SortedAsUTF16SortOrder); count = 0; ArrayList tokens = new ArrayList<>(); ArrayList vals = new ArrayList<>(); @@ -134,11 +141,10 @@ public class TSTLookup extends Lookup { autocomplete.balancedTree(tokens.toArray(), vals.toArray(), 0, tokens.size() - 1, root); } - /** - * Adds a new node if key already exists, - * otherwise replaces its value. - *

    - * This method always returns true. + /** + * Adds a new node if key already exists, otherwise replaces its value. + * + *

    This method always returns true. */ public boolean add(CharSequence key, Object value) { autocomplete.insert(root, key, value, 0); @@ -146,10 +152,7 @@ public class TSTLookup extends Lookup { return true; } - /** - * Returns the value for the specified key, or null - * if the key does not exist. - */ + /** Returns the value for the specified key, or null if the key does not exist. */ public Object get(CharSequence key) { List list = autocomplete.prefixCompletion(root, key, 0); if (list == null || list.isEmpty()) { @@ -162,7 +165,7 @@ public class TSTLookup extends Lookup { } return null; } - + private static boolean charSeqEquals(CharSequence left, CharSequence right) { int len = left.length(); if (len != right.length()) { @@ -177,7 +180,8 @@ public class TSTLookup extends Lookup { } @Override - public List lookup(CharSequence key, Set contexts, boolean onlyMorePopular, int num) { + public List lookup( + CharSequence key, Set contexts, boolean onlyMorePopular, int num) { if (contexts != null) { throw new IllegalArgumentException("this suggester doesn't support contexts"); } @@ -189,9 +193,9 @@ public class TSTLookup extends Lookup { int maxCnt = Math.min(num, list.size()); if (onlyMorePopular) { LookupPriorityQueue queue = new LookupPriorityQueue(num); - + for (TernaryTreeNode ttn : list) { - queue.insertWithOverflow(new LookupResult(ttn.token, ((Number)ttn.val).longValue())); + queue.insertWithOverflow(new LookupResult(ttn.token, ((Number) ttn.val).longValue())); } for (LookupResult lr : queue.getResults()) { res.add(lr); @@ -199,12 +203,12 @@ public class TSTLookup extends Lookup { } else { for (int i = 0; i < maxCnt; i++) { TernaryTreeNode ttn = list.get(i); - res.add(new LookupResult(ttn.token, ((Number)ttn.val).longValue())); + res.add(new LookupResult(ttn.token, ((Number) ttn.val).longValue())); } } return res; } - + private static final byte LO_KID = 0x01; private static final byte EQ_KID = 0x02; private static final byte HI_KID = 0x04; @@ -248,7 +252,7 @@ public class TSTLookup extends Lookup { if (node.val != null) mask |= HAS_VALUE; out.writeByte(mask); if (node.token != null) out.writeString(node.token); - if (node.val != null) out.writeLong(((Number)node.val).longValue()); + if (node.val != null) out.writeLong(((Number) node.val).longValue()); // recurse and write kids if (node.loKid != null) { writeRecursively(out, node.loKid); @@ -285,7 +289,7 @@ public class TSTLookup extends Lookup { } return mem; } - + @Override public long getCount() { return count; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TernaryTreeNode.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TernaryTreeNode.java index 34292fef351..7ff62f99283 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TernaryTreeNode.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TernaryTreeNode.java @@ -18,30 +18,28 @@ package org.apache.lucene.search.suggest.tst; import org.apache.lucene.util.RamUsageEstimator; -/** - * The class creates a TST node. - */ - +/** The class creates a TST node. */ public class TernaryTreeNode { - - /** Creates a new empty node */ + + /** Creates a new empty node */ public TernaryTreeNode() {} /** the character stored by a node. */ char splitchar; /** a reference object to the node containing character smaller than this node's character. */ TernaryTreeNode loKid; - /** - * a reference object to the node containing character next to this node's character as - * occurring in the inserted token. + /** + * a reference object to the node containing character next to this node's character as occurring + * in the inserted token. */ TernaryTreeNode eqKid; /** a reference object to the node containing character higher than this node's character. */ TernaryTreeNode hiKid; - /** - * used by leaf nodes to store the complete tokens to be added to suggest list while + /** + * used by leaf nodes to store the complete tokens to be added to suggest list while * auto-completing the prefix. */ String token; + Object val; long sizeInBytes() { @@ -56,7 +54,10 @@ public class TernaryTreeNode { mem += hiKid.sizeInBytes(); } if (token != null) { - mem += RamUsageEstimator.shallowSizeOf(token) + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + Character.BYTES * token.length(); + mem += + RamUsageEstimator.shallowSizeOf(token) + + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + + Character.BYTES * token.length(); } mem += RamUsageEstimator.shallowSizeOf(val); return mem; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/package-info.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/package-info.java index b136dce7caf..49a42579d4b 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/package-info.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/package-info.java @@ -14,8 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Ternary Search Tree based autosuggest. - */ + +/** Ternary Search Tree based autosuggest. */ package org.apache.lucene.search.suggest.tst; diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java index 87f8306c818..29d1afd9f12 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java @@ -30,14 +30,14 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; public class TestDirectSpellChecker extends LuceneTestCase { - + public void testInternalLevenshteinDistance() throws Exception { DirectSpellChecker spellchecker = new DirectSpellChecker(); Directory dir = newDirectory(); Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, true); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, analyzer); - String[] termsToAdd = { "metanoia", "metanoian", "metanoiai", "metanoias", "metanoi𐑍" }; + String[] termsToAdd = {"metanoia", "metanoian", "metanoiai", "metanoias", "metanoi𐑍"}; for (int i = 0; i < termsToAdd.length; i++) { Document doc = new Document(); doc.add(newTextField("repentance", termsToAdd[i], Field.Store.NO)); @@ -46,19 +46,21 @@ public class TestDirectSpellChecker extends LuceneTestCase { IndexReader ir = writer.getReader(); String misspelled = "metanoix"; - SuggestWord[] similar = spellchecker.suggestSimilar(new Term("repentance", misspelled), 4, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + SuggestWord[] similar = + spellchecker.suggestSimilar( + new Term("repentance", misspelled), 4, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertTrue(similar.length == 4); - + StringDistance sd = spellchecker.getDistance(); assertTrue(sd instanceof LuceneLevenshteinDistance); - for(SuggestWord word : similar) { - assertTrue(word.score==sd.getDistance(word.string, misspelled)); - assertTrue(word.score==sd.getDistance(misspelled, word.string)); + for (SuggestWord word : similar) { + assertTrue(word.score == sd.getDistance(word.string, misspelled)); + assertTrue(word.score == sd.getDistance(misspelled, word.string)); } - + IOUtils.close(ir, writer, dir, analyzer); } - + public void testSimpleExamples() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); spellChecker.setMinQueryLength(0); @@ -74,35 +76,41 @@ public class TestDirectSpellChecker extends LuceneTestCase { IndexReader ir = writer.getReader(); - SuggestWord[] similar = spellChecker.suggestSimilar(new Term("numbers", - "fvie"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + SuggestWord[] similar = + spellChecker.suggestSimilar( + new Term("numbers", "fvie"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertTrue(similar.length > 0); assertEquals("five", similar[0].string); - similar = spellChecker.suggestSimilar(new Term("numbers", "five"), 2, ir, - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + new Term("numbers", "five"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); if (similar.length > 0) { assertFalse(similar[0].string.equals("five")); // don't suggest a word for itself } - similar = spellChecker.suggestSimilar(new Term("numbers", "fvie"), 2, ir, - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + new Term("numbers", "fvie"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertTrue(similar.length > 0); assertEquals("five", similar[0].string); - similar = spellChecker.suggestSimilar(new Term("numbers", "fiv"), 2, ir, - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + new Term("numbers", "fiv"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertTrue(similar.length > 0); assertEquals("five", similar[0].string); - similar = spellChecker.suggestSimilar(new Term("numbers", "fives"), 2, ir, - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + new Term("numbers", "fives"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertTrue(similar.length > 0); assertEquals("five", similar[0].string); assertTrue(similar.length > 0); - similar = spellChecker.suggestSimilar(new Term("numbers", "fie"), 2, ir, - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + new Term("numbers", "fie"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals("five", similar[0].string); // add some more documents @@ -116,14 +124,15 @@ public class TestDirectSpellChecker extends LuceneTestCase { ir = writer.getReader(); // look ma, no spellcheck index rebuild - similar = spellChecker.suggestSimilar(new Term("numbers", "tousand"), 10, - ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); - assertTrue(similar.length > 0); + similar = + spellChecker.suggestSimilar( + new Term("numbers", "tousand"), 10, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + assertTrue(similar.length > 0); assertEquals("thousand", similar[0].string); IOUtils.close(ir, writer, dir, analyzer); } - + public void testOptions() throws Exception { Directory dir = newDirectory(); Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); @@ -138,64 +147,72 @@ public class TestDirectSpellChecker extends LuceneTestCase { writer.addDocument(doc); doc.add(newTextField("text", "fobar", Field.Store.NO)); writer.addDocument(doc); - + IndexReader ir = writer.getReader(); - + DirectSpellChecker spellChecker = new DirectSpellChecker(); spellChecker.setMaxQueryFrequency(0F); - SuggestWord[] similar = spellChecker.suggestSimilar(new Term("text", - "fobar"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); + SuggestWord[] similar = + spellChecker.suggestSimilar( + new Term("text", "fobar"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(0, similar.length); - + // confirm that a term shorter than minQueryLength is not spellchecked spellChecker = new DirectSpellChecker(); // reset defaults spellChecker.setMinQueryLength(5); - similar = spellChecker.suggestSimilar(new Term("text", "foba"), 1, ir, - SuggestMode.SUGGEST_MORE_POPULAR); + similar = + spellChecker.suggestSimilar( + new Term("text", "foba"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(0, similar.length); // confirm that a term longer than maxQueryLength is not spellchecked spellChecker = new DirectSpellChecker(); // reset defaults spellChecker.setMaxQueryLength(5); - similar = spellChecker.suggestSimilar(new Term("text", "foobrr"), 1, ir, - SuggestMode.SUGGEST_MORE_POPULAR); + similar = + spellChecker.suggestSimilar( + new Term("text", "foobrr"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(0, similar.length); - + spellChecker = new DirectSpellChecker(); // reset defaults spellChecker.setMaxEdits(1); - similar = spellChecker.suggestSimilar(new Term("text", "foobazzz"), 1, ir, - SuggestMode.SUGGEST_MORE_POPULAR); + similar = + spellChecker.suggestSimilar( + new Term("text", "foobazzz"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(0, similar.length); - + spellChecker = new DirectSpellChecker(); // reset defaults spellChecker.setAccuracy(0.9F); - similar = spellChecker.suggestSimilar(new Term("text", "foobazzz"), 1, ir, - SuggestMode.SUGGEST_MORE_POPULAR); + similar = + spellChecker.suggestSimilar( + new Term("text", "foobazzz"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(0, similar.length); - + spellChecker = new DirectSpellChecker(); // reset defaults spellChecker.setMinPrefix(0); - similar = spellChecker.suggestSimilar(new Term("text", "roobaz"), 1, ir, - SuggestMode.SUGGEST_MORE_POPULAR); + similar = + spellChecker.suggestSimilar( + new Term("text", "roobaz"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(1, similar.length); - similar = spellChecker.suggestSimilar(new Term("text", "roobaz"), 1, ir, - SuggestMode.SUGGEST_MORE_POPULAR); + similar = + spellChecker.suggestSimilar( + new Term("text", "roobaz"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); spellChecker = new DirectSpellChecker(); // reset defaults spellChecker.setMinPrefix(1); - similar = spellChecker.suggestSimilar(new Term("text", "roobaz"), 1, ir, - SuggestMode.SUGGEST_MORE_POPULAR); + similar = + spellChecker.suggestSimilar( + new Term("text", "roobaz"), 1, ir, SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(0, similar.length); - + spellChecker = new DirectSpellChecker(); // reset defaults spellChecker.setMaxEdits(2); - similar = spellChecker.suggestSimilar(new Term("text", "fobar"), 2, ir, - SuggestMode.SUGGEST_ALWAYS); + similar = + spellChecker.suggestSimilar(new Term("text", "fobar"), 2, ir, SuggestMode.SUGGEST_ALWAYS); assertEquals(2, similar.length); - IOUtils.close(ir, writer, dir, analyzer);; + IOUtils.close(ir, writer, dir, analyzer); } - + public void testBogusField() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); Directory dir = newDirectory(); @@ -210,14 +227,14 @@ public class TestDirectSpellChecker extends LuceneTestCase { IndexReader ir = writer.getReader(); - SuggestWord[] similar = spellChecker.suggestSimilar(new Term( - "bogusFieldBogusField", "fvie"), 2, ir, - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + SuggestWord[] similar = + spellChecker.suggestSimilar( + new Term("bogusFieldBogusField", "fvie"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(0, similar.length); - + IOUtils.close(ir, writer, dir, analyzer); } - + // simple test that transpositions work, we suggest five for fvie with ed=1 public void testTransposition() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); @@ -233,15 +250,15 @@ public class TestDirectSpellChecker extends LuceneTestCase { IndexReader ir = writer.getReader(); - SuggestWord[] similar = spellChecker.suggestSimilar(new Term( - "numbers", "fvie"), 1, ir, - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + SuggestWord[] similar = + spellChecker.suggestSimilar( + new Term("numbers", "fvie"), 1, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(1, similar.length); assertEquals("five", similar[0].string); - + IOUtils.close(ir, writer, dir, analyzer); } - + // simple test that transpositions work, we suggest seventeen for seevntene with ed=2 public void testTransposition2() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); @@ -257,12 +274,12 @@ public class TestDirectSpellChecker extends LuceneTestCase { IndexReader ir = writer.getReader(); - SuggestWord[] similar = spellChecker.suggestSimilar(new Term( - "numbers", "seevntene"), 2, ir, - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + SuggestWord[] similar = + spellChecker.suggestSimilar( + new Term("numbers", "seevntene"), 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(1, similar.length); assertEquals("seventeen", similar[0].string); - + IOUtils.close(ir, writer, dir, analyzer); } } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestJaroWinklerDistance.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestJaroWinklerDistance.java index 4e04b98c8c5..cb83c744276 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestJaroWinklerDistance.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestJaroWinklerDistance.java @@ -21,12 +21,12 @@ import org.apache.lucene.util.LuceneTestCase; public class TestJaroWinklerDistance extends LuceneTestCase { private StringDistance sd = new JaroWinklerDistance(); - + public void testGetDistance() { float d = sd.getDistance("al", "al"); assertTrue(d == 1.0f); d = sd.getDistance("martha", "marhta"); - assertTrue(d > 0.961 && d <0.962); + assertTrue(d > 0.961 && d < 0.962); d = sd.getDistance("jones", "johnson"); assertTrue(d > 0.832 && d < 0.833); d = sd.getDistance("abcvwxyz", "cabvwxyz"); @@ -42,7 +42,6 @@ public class TestJaroWinklerDistance extends LuceneTestCase { assertTrue(d1 > d2); d1 = sd.getDistance("brittney spears", "britney spears"); d2 = sd.getDistance("brittney spears", "brittney startzman"); - assertTrue(d1 > d2); + assertTrue(d1 > d2); } - -} \ No newline at end of file +} diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLevenshteinDistance.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLevenshteinDistance.java index 88c7587e0bf..694d685cd64 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLevenshteinDistance.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLevenshteinDistance.java @@ -21,33 +21,32 @@ import org.apache.lucene.util.LuceneTestCase; public class TestLevenshteinDistance extends LuceneTestCase { private StringDistance sd = new LevenshteinDistance(); - + public void testGetDistance() { float d = sd.getDistance("al", "al"); - assertEquals(d,1.0f,0.001); + assertEquals(d, 1.0f, 0.001); d = sd.getDistance("martha", "marhta"); - assertEquals(d,0.6666,0.001); + assertEquals(d, 0.6666, 0.001); d = sd.getDistance("jones", "johnson"); - assertEquals(d,0.4285,0.001); + assertEquals(d, 0.4285, 0.001); d = sd.getDistance("abcvwxyz", "cabvwxyz"); - assertEquals(d,0.75,0.001); + assertEquals(d, 0.75, 0.001); d = sd.getDistance("dwayne", "duane"); - assertEquals(d,0.666,0.001); + assertEquals(d, 0.666, 0.001); d = sd.getDistance("dixon", "dicksonx"); - assertEquals(d,0.5,0.001); + assertEquals(d, 0.5, 0.001); d = sd.getDistance("six", "ten"); - assertEquals(d,0,0.001); + assertEquals(d, 0, 0.001); float d1 = sd.getDistance("zac ephron", "zac efron"); float d2 = sd.getDistance("zac ephron", "kai ephron"); - assertEquals(d1,d2,0.001); + assertEquals(d1, d2, 0.001); d1 = sd.getDistance("brittney spears", "britney spears"); d2 = sd.getDistance("brittney spears", "brittney startzman"); assertTrue(d1 > d2); } - + public void testEmpty() throws Exception { float d = sd.getDistance("", "al"); - assertEquals(d,0.0f,0.001); + assertEquals(d, 0.0f, 0.001); } - } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java index 902bec8245f..f8d362ec696 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.spell; import java.io.IOException; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -32,9 +31,8 @@ import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.LuceneTestCase; /** - * Test case for LuceneDictionary. - * It first creates a simple index and then a couple of instances of LuceneDictionary - * on different fields and checks if all the right text comes back. + * Test case for LuceneDictionary. It first creates a simple index and then a couple of instances of + * LuceneDictionary on different fields and checks if all the right text comes back. */ public class TestLuceneDictionary extends LuceneTestCase { @@ -54,19 +52,19 @@ public class TestLuceneDictionary extends LuceneTestCase { Document doc; - doc = new Document(); + doc = new Document(); doc.add(newTextField("aaa", "foo", Field.Store.YES)); writer.addDocument(doc); - doc = new Document(); + doc = new Document(); doc.add(newTextField("aaa", "foo", Field.Store.YES)); writer.addDocument(doc); - doc = new Document(); + doc = new Document(); doc.add(newTextField("contents", "Tom", Field.Store.YES)); writer.addDocument(doc); - doc = new Document(); + doc = new Document(); doc.add(newTextField("contents", "Jerry", Field.Store.YES)); writer.addDocument(doc); @@ -80,13 +78,14 @@ public class TestLuceneDictionary extends LuceneTestCase { @Override public void tearDown() throws Exception { - if (indexReader != null) + if (indexReader != null) { indexReader.close(); + } store.close(); analyzer.close(); super.tearDown(); } - + public void testFieldNonExistent() throws IOException { try { indexReader = DirectoryReader.open(store); @@ -96,7 +95,9 @@ public class TestLuceneDictionary extends LuceneTestCase { assertNull("More elements than expected", spare = it.next()); } finally { - if (indexReader != null) { indexReader.close(); } + if (indexReader != null) { + indexReader.close(); + } } } @@ -110,7 +111,9 @@ public class TestLuceneDictionary extends LuceneTestCase { assertTrue("First element isn't correct", spare.utf8ToString().equals("foo")); assertNull("More elements than expected", it.next()); } finally { - if (indexReader != null) { indexReader.close(); } + if (indexReader != null) { + indexReader.close(); + } } } @@ -136,9 +139,10 @@ public class TestLuceneDictionary extends LuceneTestCase { } assertTrue("Number of words incorrect", counter == 0); - } - finally { - if (indexReader != null) { indexReader.close(); } + } finally { + if (indexReader != null) { + indexReader.close(); + } } } @@ -151,11 +155,12 @@ public class TestLuceneDictionary extends LuceneTestCase { // just iterate through words assertEquals("First element isn't correct", "Jerry", it.next().utf8ToString()); - assertEquals("Second element isn't correct", "Tom", it.next().utf8ToString()); + assertEquals("Second element isn't correct", "Tom", it.next().utf8ToString()); assertNull("Nonexistent element is really null", it.next()); - } - finally { - if (indexReader != null) { indexReader.close(); } + } finally { + if (indexReader != null) { + indexReader.close(); + } } } @@ -169,17 +174,19 @@ public class TestLuceneDictionary extends LuceneTestCase { assertNotNull("First element doesn't exist.", spare = it.next()); assertEquals("First element isn't correct", "bar", spare.utf8ToString()); assertNull("More elements than expected", it.next()); - } - finally { - if (indexReader != null) { indexReader.close(); } + } finally { + if (indexReader != null) { + indexReader.close(); + } } } - + public void testSpellchecker() throws IOException { Directory dir = newDirectory(); SpellChecker sc = new SpellChecker(dir); indexReader = DirectoryReader.open(store); - sc.indexDictionary(new LuceneDictionary(indexReader, "contents"), newIndexWriterConfig(null), false); + sc.indexDictionary( + new LuceneDictionary(indexReader, "contents"), newIndexWriterConfig(null), false); String[] suggestions = sc.suggestSimilar("Tam", 1); assertEquals(1, suggestions.length); assertEquals("Tom", suggestions[0]); @@ -190,5 +197,4 @@ public class TestLuceneDictionary extends LuceneTestCase { sc.close(); dir.close(); } - } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestNGramDistance.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestNGramDistance.java index 63ff48bf9cb..6f3f208d9a0 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestNGramDistance.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestNGramDistance.java @@ -20,65 +20,63 @@ import org.apache.lucene.util.LuceneTestCase; public class TestNGramDistance extends LuceneTestCase { - - public void testGetDistance1() { StringDistance nsd = new NGramDistance(1); float d = nsd.getDistance("al", "al"); - assertEquals(d,1.0f,0.001); + assertEquals(d, 1.0f, 0.001); d = nsd.getDistance("a", "a"); - assertEquals(d,1.0f,0.001); + assertEquals(d, 1.0f, 0.001); d = nsd.getDistance("b", "a"); - assertEquals(d,0.0f,0.001); + assertEquals(d, 0.0f, 0.001); d = nsd.getDistance("martha", "marhta"); - assertEquals(d,0.6666,0.001); + assertEquals(d, 0.6666, 0.001); d = nsd.getDistance("jones", "johnson"); - assertEquals(d,0.4285,0.001); + assertEquals(d, 0.4285, 0.001); d = nsd.getDistance("natural", "contrary"); - assertEquals(d,0.25,0.001); + assertEquals(d, 0.25, 0.001); d = nsd.getDistance("abcvwxyz", "cabvwxyz"); - assertEquals(d,0.75,0.001); + assertEquals(d, 0.75, 0.001); d = nsd.getDistance("dwayne", "duane"); - assertEquals(d,0.666,0.001); + assertEquals(d, 0.666, 0.001); d = nsd.getDistance("dixon", "dicksonx"); - assertEquals(d,0.5,0.001); + assertEquals(d, 0.5, 0.001); d = nsd.getDistance("six", "ten"); - assertEquals(d,0,0.001); + assertEquals(d, 0, 0.001); float d1 = nsd.getDistance("zac ephron", "zac efron"); float d2 = nsd.getDistance("zac ephron", "kai ephron"); - assertEquals(d1,d2,0.001); + assertEquals(d1, d2, 0.001); d1 = nsd.getDistance("brittney spears", "britney spears"); d2 = nsd.getDistance("brittney spears", "brittney startzman"); assertTrue(d1 > d2); d1 = nsd.getDistance("12345678", "12890678"); d2 = nsd.getDistance("12345678", "72385698"); - assertEquals(d1,d2,001); + assertEquals(d1, d2, 001); } - + public void testGetDistance2() { StringDistance sd = new NGramDistance(2); float d = sd.getDistance("al", "al"); - assertEquals(d,1.0f,0.001); + assertEquals(d, 1.0f, 0.001); d = sd.getDistance("a", "a"); - assertEquals(d,1.0f,0.001); + assertEquals(d, 1.0f, 0.001); d = sd.getDistance("b", "a"); - assertEquals(d,0.0f,0.001); + assertEquals(d, 0.0f, 0.001); d = sd.getDistance("a", "aa"); - assertEquals(d,0.5f,0.001); + assertEquals(d, 0.5f, 0.001); d = sd.getDistance("martha", "marhta"); - assertEquals(d,0.6666,0.001); + assertEquals(d, 0.6666, 0.001); d = sd.getDistance("jones", "johnson"); - assertEquals(d,0.4285,0.001); + assertEquals(d, 0.4285, 0.001); d = sd.getDistance("natural", "contrary"); - assertEquals(d,0.25,0.001); + assertEquals(d, 0.25, 0.001); d = sd.getDistance("abcvwxyz", "cabvwxyz"); - assertEquals(d,0.625,0.001); + assertEquals(d, 0.625, 0.001); d = sd.getDistance("dwayne", "duane"); - assertEquals(d,0.5833,0.001); + assertEquals(d, 0.5833, 0.001); d = sd.getDistance("dixon", "dicksonx"); - assertEquals(d,0.5,0.001); + assertEquals(d, 0.5, 0.001); d = sd.getDistance("six", "ten"); - assertEquals(d,0,0.001); + assertEquals(d, 0, 0.001); float d1 = sd.getDistance("zac ephron", "zac efron"); float d2 = sd.getDistance("zac ephron", "kai ephron"); assertTrue(d1 > d2); @@ -87,31 +85,31 @@ public class TestNGramDistance extends LuceneTestCase { assertTrue(d1 > d2); d1 = sd.getDistance("0012345678", "0012890678"); d2 = sd.getDistance("0012345678", "0072385698"); - assertEquals(d1,d2,0.001); + assertEquals(d1, d2, 0.001); } - + public void testGetDistance3() { StringDistance sd = new NGramDistance(3); float d = sd.getDistance("al", "al"); - assertEquals(d,1.0f,0.001); + assertEquals(d, 1.0f, 0.001); d = sd.getDistance("a", "a"); - assertEquals(d,1.0f,0.001); + assertEquals(d, 1.0f, 0.001); d = sd.getDistance("b", "a"); - assertEquals(d,0.0f,0.001); + assertEquals(d, 0.0f, 0.001); d = sd.getDistance("martha", "marhta"); - assertEquals(d,0.7222,0.001); + assertEquals(d, 0.7222, 0.001); d = sd.getDistance("jones", "johnson"); - assertEquals(d,0.4762,0.001); + assertEquals(d, 0.4762, 0.001); d = sd.getDistance("natural", "contrary"); - assertEquals(d,0.2083,0.001); + assertEquals(d, 0.2083, 0.001); d = sd.getDistance("abcvwxyz", "cabvwxyz"); - assertEquals(d,0.5625,0.001); + assertEquals(d, 0.5625, 0.001); d = sd.getDistance("dwayne", "duane"); - assertEquals(d,0.5277,0.001); + assertEquals(d, 0.5277, 0.001); d = sd.getDistance("dixon", "dicksonx"); - assertEquals(d,0.4583,0.001); + assertEquals(d, 0.4583, 0.001); d = sd.getDistance("six", "ten"); - assertEquals(d,0,0.001); + assertEquals(d, 0, 0.001); float d1 = sd.getDistance("zac ephron", "zac efron"); float d2 = sd.getDistance("zac ephron", "kai ephron"); assertTrue(d1 > d2); @@ -126,6 +124,6 @@ public class TestNGramDistance extends LuceneTestCase { public void testEmpty() throws Exception { StringDistance nsd = new NGramDistance(1); float d = nsd.getDistance("", "al"); - assertEquals(d,0.0f,0.001); + assertEquals(d, 0.0f, 0.001); } } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestPlainTextDictionary.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestPlainTextDictionary.java index abf2cab9d17..05f4684beac 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestPlainTextDictionary.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestPlainTextDictionary.java @@ -18,14 +18,10 @@ package org.apache.lucene.search.spell; import java.io.IOException; import java.io.StringReader; - import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -/** - * Test case for PlainTextDictionary - * - */ +/** Test case for PlainTextDictionary */ public class TestPlainTextDictionary extends LuceneTestCase { public void testBuild() throws IOException { @@ -42,5 +38,4 @@ public class TestPlainTextDictionary extends LuceneTestCase { spellChecker.close(); ramDir.close(); } - } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java index ae6c9727926..2609b3e10ff 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java @@ -25,7 +25,6 @@ import java.util.Locale; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -41,9 +40,7 @@ import org.apache.lucene.util.English; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NamedThreadFactory; -/** - * Spell checker test case - */ +/** Spell checker test case */ public class TestSpellChecker extends LuceneTestCase { private SpellCheckerMock spellChecker; private Directory userindex, spellindex; @@ -53,8 +50,8 @@ public class TestSpellChecker extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); - - //create a user index + + // create a user index userindex = newDirectory(); analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter(userindex, new IndexWriterConfig(analyzer)); @@ -62,50 +59,56 @@ public class TestSpellChecker extends LuceneTestCase { for (int i = 0; i < 1000; i++) { Document doc = new Document(); doc.add(newTextField("field1", English.intToEnglish(i), Field.Store.YES)); - doc.add(newTextField("field2", English.intToEnglish(i + 1), Field.Store.YES)); // + word thousand - doc.add(newTextField("field3", "fvei" + (i % 2 == 0 ? " five" : ""), Field.Store.YES)); // + word thousand + doc.add( + newTextField("field2", English.intToEnglish(i + 1), Field.Store.YES)); // + word thousand + doc.add( + newTextField( + "field3", "fvei" + (i % 2 == 0 ? " five" : ""), Field.Store.YES)); // + word thousand writer.addDocument(doc); } { Document doc = new Document(); doc.add(newTextField("field1", "eight", Field.Store.YES)); // "eight" in - // the index - // twice + // the index + // twice writer.addDocument(doc); } { Document doc = new Document(); - doc - .add(newTextField("field1", "twenty-one twenty-one", Field.Store.YES)); // "twenty-one" in the index thrice + doc.add( + newTextField( + "field1", + "twenty-one twenty-one", + Field.Store.YES)); // "twenty-one" in the index thrice writer.addDocument(doc); } { Document doc = new Document(); doc.add(newTextField("field1", "twenty", Field.Store.YES)); // "twenty" - // in the - // index - // twice + // in the + // index + // twice writer.addDocument(doc); } - + writer.close(); searchers = Collections.synchronizedList(new ArrayList()); // create the spellChecker spellindex = newDirectory(); spellChecker = new SpellCheckerMock(spellindex); } - + @Override public void tearDown() throws Exception { userindex.close(); - if (!spellChecker.isClosed()) + if (!spellChecker.isClosed()) { spellChecker.close(); + } spellindex.close(); analyzer.close(); super.tearDown(); } - public void testBuild() throws IOException { IndexReader r = DirectoryReader.open(userindex); @@ -118,12 +121,12 @@ public class TestSpellChecker extends LuceneTestCase { int num_field2 = this.numdoc(); assertEquals(num_field2, num_field1 + 1); - + assertLastSearcherOpen(4); - + checkCommonSuggestions(r); checkLevenshteinSuggestions(r); - + spellChecker.setStringDistance(new JaroWinklerDistance()); spellChecker.setAccuracy(0.8f); checkCommonSuggestions(r); @@ -137,7 +140,7 @@ public class TestSpellChecker extends LuceneTestCase { similar = spellChecker.suggestSimilar("fiv", 2); assertTrue(similar.length > 0); assertEquals(similar[0], "five"); - + spellChecker.setStringDistance(new NGramDistance(2)); spellChecker.setAccuracy(0.5f); checkCommonSuggestions(r); @@ -149,119 +152,131 @@ public class TestSpellChecker extends LuceneTestCase { public void testComparator() throws Exception { IndexReader r = DirectoryReader.open(userindex); Directory compIdx = newDirectory(); - SpellChecker compareSP = new SpellCheckerMock(compIdx, new LevenshteinDistance(), new SuggestWordFrequencyComparator()); + SpellChecker compareSP = + new SpellCheckerMock( + compIdx, new LevenshteinDistance(), new SuggestWordFrequencyComparator()); addwords(r, compareSP, "field3"); - String[] similar = compareSP.suggestSimilar("fvie", 2, r, "field3", - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + String[] similar = + compareSP.suggestSimilar("fvie", 2, r, "field3", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertTrue(similar.length == 2); - //five and fvei have the same score, but different frequencies. + // five and fvei have the same score, but different frequencies. assertEquals("fvei", similar[0]); assertEquals("five", similar[1]); r.close(); - if (!compareSP.isClosed()) + if (!compareSP.isClosed()) { compareSP.close(); + } compIdx.close(); } - + public void testBogusField() throws Exception { IndexReader r = DirectoryReader.open(userindex); Directory compIdx = newDirectory(); - SpellChecker compareSP = new SpellCheckerMock(compIdx, new LevenshteinDistance(), new SuggestWordFrequencyComparator()); + SpellChecker compareSP = + new SpellCheckerMock( + compIdx, new LevenshteinDistance(), new SuggestWordFrequencyComparator()); addwords(r, compareSP, "field3"); - String[] similar = compareSP.suggestSimilar("fvie", 2, r, - "bogusFieldBogusField", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + String[] similar = + compareSP.suggestSimilar( + "fvie", 2, r, "bogusFieldBogusField", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(0, similar.length); r.close(); - if (!compareSP.isClosed()) + if (!compareSP.isClosed()) { compareSP.close(); + } compIdx.close(); } - + public void testSuggestModes() throws Exception { IndexReader r = DirectoryReader.open(userindex); spellChecker.clearIndex(); addwords(r, spellChecker, "field1"); - + { - String[] similar = spellChecker.suggestSimilar("eighty", 2, r, "field1", - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + String[] similar = + spellChecker.suggestSimilar( + "eighty", 2, r, "field1", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(1, similar.length); assertEquals("eighty", similar[0]); } - + { - String[] similar = spellChecker.suggestSimilar("eight", 2, r, "field1", - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + String[] similar = + spellChecker.suggestSimilar( + "eight", 2, r, "field1", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(1, similar.length); assertEquals("eight", similar[0]); } - + { - String[] similar = spellChecker.suggestSimilar("eighty", 5, r, "field1", - SuggestMode.SUGGEST_MORE_POPULAR); + String[] similar = + spellChecker.suggestSimilar("eighty", 5, r, "field1", SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(5, similar.length); assertEquals("eight", similar[0]); } - + { - String[] similar = spellChecker.suggestSimilar("twenty", 5, r, "field1", - SuggestMode.SUGGEST_MORE_POPULAR); + String[] similar = + spellChecker.suggestSimilar("twenty", 5, r, "field1", SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(1, similar.length); assertEquals("twenty-one", similar[0]); } - + { - String[] similar = spellChecker.suggestSimilar("eight", 5, r, "field1", - SuggestMode.SUGGEST_MORE_POPULAR); + String[] similar = + spellChecker.suggestSimilar("eight", 5, r, "field1", SuggestMode.SUGGEST_MORE_POPULAR); assertEquals(0, similar.length); } - + { - String[] similar = spellChecker.suggestSimilar("eighty", 5, r, "field1", - SuggestMode.SUGGEST_ALWAYS); + String[] similar = + spellChecker.suggestSimilar("eighty", 5, r, "field1", SuggestMode.SUGGEST_ALWAYS); assertEquals(5, similar.length); assertEquals("eight", similar[0]); } - + { - String[] similar = spellChecker.suggestSimilar("eight", 5, r, "field1", - SuggestMode.SUGGEST_ALWAYS); + String[] similar = + spellChecker.suggestSimilar("eight", 5, r, "field1", SuggestMode.SUGGEST_ALWAYS); assertEquals(5, similar.length); assertEquals("eighty", similar[0]); } r.close(); } + private void checkCommonSuggestions(IndexReader r) throws IOException { String[] similar = spellChecker.suggestSimilar("fvie", 2); assertTrue(similar.length > 0); assertEquals(similar[0], "five"); - + similar = spellChecker.suggestSimilar("five", 2); if (similar.length > 0) { assertFalse(similar[0].equals("five")); // don't suggest a word for itself } - + similar = spellChecker.suggestSimilar("fiv", 2); assertTrue(similar.length > 0); assertEquals(similar[0], "five"); - + similar = spellChecker.suggestSimilar("fives", 2); assertTrue(similar.length > 0); assertEquals(similar[0], "five"); - + assertTrue(similar.length > 0); similar = spellChecker.suggestSimilar("fie", 2); assertEquals(similar[0], "five"); - + // test restraint to a field - similar = spellChecker.suggestSimilar("tousand", 10, r, "field1", - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + "tousand", 10, r, "field1", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(0, similar.length); // there isn't the term thousand in the field field1 - similar = spellChecker.suggestSimilar("tousand", 10, r, "field2", - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + "tousand", 10, r, "field2", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(1, similar.length); // there is the term thousand in the field field2 } @@ -273,7 +288,7 @@ public class TestSpellChecker extends LuceneTestCase { similar = spellChecker.suggestSimilar("five", 2); assertEquals(1, similar.length); - assertEquals(similar[0], "nine"); // don't suggest a word for itself + assertEquals(similar[0], "nine"); // don't suggest a word for itself similar = spellChecker.suggestSimilar("fiv", 2); assertEquals(1, similar.length); @@ -292,20 +307,22 @@ public class TestSpellChecker extends LuceneTestCase { assertEquals(2, similar.length); assertEquals(similar[0], "five"); assertEquals(similar[1], "nine"); - + similar = spellChecker.suggestSimilar("fi", 2); assertEquals(1, similar.length); assertEquals(similar[0], "five"); // test restraint to a field - similar = spellChecker.suggestSimilar("tousand", 10, r, "field1", - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + "tousand", 10, r, "field1", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(0, similar.length); // there isn't the term thousand in the field field1 - similar = spellChecker.suggestSimilar("tousand", 10, r, "field2", - SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); + similar = + spellChecker.suggestSimilar( + "tousand", 10, r, "field2", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(1, similar.length); // there is the term thousand in the field field2 - + similar = spellChecker.suggestSimilar("onety", 2); assertEquals(2, similar.length); assertEquals(similar[0], "ninety"); @@ -320,7 +337,7 @@ public class TestSpellChecker extends LuceneTestCase { assertEquals(similar[0], "one"); assertEquals(similar[1], "ninety"); } - + private void checkNGramSuggestions() throws IOException { String[] similar = spellChecker.suggestSimilar("onety", 2); assertEquals(2, similar.length); @@ -332,18 +349,18 @@ public class TestSpellChecker extends LuceneTestCase { long time = System.currentTimeMillis(); sc.indexDictionary(new LuceneDictionary(r, field), newIndexWriterConfig(null), false); time = System.currentTimeMillis() - time; - //System.out.println("time to build " + field + ": " + time); + // System.out.println("time to build " + field + ": " + time); } private int numdoc() throws IOException { IndexReader rs = DirectoryReader.open(spellindex); int num = rs.numDocs(); assertTrue(num != 0); - //System.out.println("num docs: " + num); + // System.out.println("num docs: " + num); rs.close(); return num; } - + public void testClose() throws IOException { IndexReader r = DirectoryReader.open(userindex); spellChecker.clearIndex(); @@ -358,33 +375,44 @@ public class TestSpellChecker extends LuceneTestCase { spellChecker.close(); assertSearchersClosed(); - expectThrows(AlreadyClosedException.class, () -> { - spellChecker.close(); - }); + expectThrows( + AlreadyClosedException.class, + () -> { + spellChecker.close(); + }); - expectThrows(AlreadyClosedException.class, () -> { - checkCommonSuggestions(r); - }); - - expectThrows(AlreadyClosedException.class, () -> { - spellChecker.clearIndex(); - }); - - expectThrows(AlreadyClosedException.class, () -> { - spellChecker.indexDictionary(new LuceneDictionary(r, field), newIndexWriterConfig(null), false); - }); - - expectThrows(AlreadyClosedException.class, () -> { - spellChecker.setSpellIndex(spellindex); - }); + expectThrows( + AlreadyClosedException.class, + () -> { + checkCommonSuggestions(r); + }); + + expectThrows( + AlreadyClosedException.class, + () -> { + spellChecker.clearIndex(); + }); + + expectThrows( + AlreadyClosedException.class, + () -> { + spellChecker.indexDictionary( + new LuceneDictionary(r, field), newIndexWriterConfig(null), false); + }); + + expectThrows( + AlreadyClosedException.class, + () -> { + spellChecker.setSpellIndex(spellindex); + }); assertEquals(4, searchers.size()); assertSearchersClosed(); r.close(); } - + /* - * tests if the internally shared indexsearcher is correctly closed + * tests if the internally shared indexsearcher is correctly closed * when the spellchecker is concurrently accessed and closed. */ public void testConcurrentAccess() throws IOException, InterruptedException { @@ -400,31 +428,34 @@ public class TestSpellChecker extends LuceneTestCase { int num_field2 = this.numdoc(); assertEquals(num_field2, num_field1 + 1); int numThreads = 5 + random().nextInt(5); - ExecutorService executor = Executors.newFixedThreadPool(numThreads, new NamedThreadFactory("testConcurrentAccess")); + ExecutorService executor = + Executors.newFixedThreadPool(numThreads, new NamedThreadFactory("testConcurrentAccess")); SpellCheckWorker[] workers = new SpellCheckWorker[numThreads]; for (int i = 0; i < numThreads; i++) { SpellCheckWorker spellCheckWorker = new SpellCheckWorker(r); executor.execute(spellCheckWorker); workers[i] = spellCheckWorker; - } int iterations = 5 + random().nextInt(5); for (int i = 0; i < iterations; i++) { Thread.sleep(100); // concurrently reset the spell index spellChecker.setSpellIndex(this.spellindex); - // for debug - prints the internal open searchers + // for debug - prints the internal open searchers // showSearchersOpen(); } - + spellChecker.close(); executor.shutdown(); // wait for 60 seconds - usually this is very fast but coverage runs could take quite long executor.awaitTermination(60L, TimeUnit.SECONDS); - + for (int i = 0; i < workers.length; i++) { assertFalse(String.format(Locale.ROOT, "worker thread %d failed", i), workers[i].failed); - assertTrue(String.format(Locale.ROOT, "worker thread %d is still running but should be terminated", i), workers[i].terminated); + assertTrue( + String.format( + Locale.ROOT, "worker thread %d is still running but should be terminated", i), + workers[i].terminated); } // 4 searchers more than iterations // 1. at creation @@ -434,21 +465,23 @@ public class TestSpellChecker extends LuceneTestCase { assertSearchersClosed(); r.close(); } - + private void assertLastSearcherOpen(int numSearchers) { assertEquals(numSearchers, searchers.size()); IndexSearcher[] searcherArray = searchers.toArray(new IndexSearcher[0]); for (int i = 0; i < searcherArray.length; i++) { if (i == searcherArray.length - 1) { - assertTrue("expected last searcher open but was closed", + assertTrue( + "expected last searcher open but was closed", searcherArray[i].getIndexReader().getRefCount() > 0); } else { - assertFalse("expected closed searcher but was open - Index: " + i, + assertFalse( + "expected closed searcher but was open - Index: " + i, searcherArray[i].getIndexReader().getRefCount() > 0); } } } - + private void assertSearchersClosed() { for (IndexSearcher searcher : searchers) { assertEquals(0, searcher.getIndexReader().getRefCount()); @@ -456,26 +489,25 @@ public class TestSpellChecker extends LuceneTestCase { } // For debug -// private void showSearchersOpen() { -// int count = 0; -// for (IndexSearcher searcher : searchers) { -// if(searcher.getIndexReader().getRefCount() > 0) -// ++count; -// } -// System.out.println(count); -// } + // private void showSearchersOpen() { + // int count = 0; + // for (IndexSearcher searcher : searchers) { + // if(searcher.getIndexReader().getRefCount() > 0) + // ++count; + // } + // System.out.println(count); + // } - private class SpellCheckWorker implements Runnable { private final IndexReader reader; volatile boolean terminated = false; volatile boolean failed = false; - + SpellCheckWorker(IndexReader reader) { super(); this.reader = reader; } - + @Override public void run() { try { @@ -483,10 +515,10 @@ public class TestSpellChecker extends LuceneTestCase { try { checkCommonSuggestions(reader); } catch (AlreadyClosedException e) { - + return; } catch (Throwable e) { - + e.printStackTrace(); failed = true; return; @@ -496,20 +528,20 @@ public class TestSpellChecker extends LuceneTestCase { terminated = true; } } - } - + class SpellCheckerMock extends SpellChecker { public SpellCheckerMock(Directory spellIndex) throws IOException { super(spellIndex); } - public SpellCheckerMock(Directory spellIndex, StringDistance sd) - throws IOException { + public SpellCheckerMock(Directory spellIndex, StringDistance sd) throws IOException { super(spellIndex, sd); } - public SpellCheckerMock(Directory spellIndex, StringDistance sd, Comparator comparator) throws IOException { + public SpellCheckerMock( + Directory spellIndex, StringDistance sd, Comparator comparator) + throws IOException { super(spellIndex, sd, comparator); } @@ -520,5 +552,4 @@ public class TestSpellChecker extends LuceneTestCase { return searcher; } } - } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java index f27543467ea..458aad2c7b9 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java @@ -19,7 +19,6 @@ package org.apache.lucene.search.spell; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -40,7 +39,7 @@ import org.junit.Assert; public class TestWordBreakSpellChecker extends LuceneTestCase { private Directory dir; private Analyzer analyzer; - + @Override public void setUp() throws Exception { super.setUp(); @@ -54,7 +53,7 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { doc.add(newTextField("numbers", num, Field.Store.NO)); writer.addDocument(doc); } - + { Document doc = new Document(); doc.add(newTextField("numbers", "thou hast sand betwixt thy toes", Field.Store.NO)); @@ -70,100 +69,106 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { doc.add(newTextField("numbers", "tres y cinco", Field.Store.NO)); writer.addDocument(doc); } - + writer.commit(); writer.close(); } - + @Override public void tearDown() throws Exception { IOUtils.close(dir, analyzer); super.tearDown(); - } + } public void testCombiningWords() throws Exception { IndexReader ir = DirectoryReader.open(dir); WordBreakSpellChecker wbsp = new WordBreakSpellChecker(); - - { - Term[] terms = { - new Term("numbers", "one"), - new Term("numbers", "hun"), - new Term("numbers", "dred"), - new Term("numbers", "eight"), - new Term("numbers", "y"), - new Term("numbers", "eight"), + + { + Term[] terms = { + new Term("numbers", "one"), + new Term("numbers", "hun"), + new Term("numbers", "dred"), + new Term("numbers", "eight"), + new Term("numbers", "y"), + new Term("numbers", "eight"), }; wbsp.setMaxChanges(3); wbsp.setMaxCombineWordLength(20); wbsp.setMinSuggestionFrequency(1); - CombineSuggestion[] cs = wbsp.suggestWordCombinations(terms, 10, ir, SuggestMode.SUGGEST_ALWAYS); - Assert.assertTrue(cs.length==5); - - Assert.assertTrue(cs[0].originalTermIndexes.length==2); - Assert.assertTrue(cs[0].originalTermIndexes[0]==1); - Assert.assertTrue(cs[0].originalTermIndexes[1]==2); + CombineSuggestion[] cs = + wbsp.suggestWordCombinations(terms, 10, ir, SuggestMode.SUGGEST_ALWAYS); + Assert.assertTrue(cs.length == 5); + + Assert.assertTrue(cs[0].originalTermIndexes.length == 2); + Assert.assertTrue(cs[0].originalTermIndexes[0] == 1); + Assert.assertTrue(cs[0].originalTermIndexes[1] == 2); Assert.assertTrue(cs[0].suggestion.string.equals("hundred")); - Assert.assertTrue(cs[0].suggestion.score==1); - - Assert.assertTrue(cs[1].originalTermIndexes.length==2); - Assert.assertTrue(cs[1].originalTermIndexes[0]==3); - Assert.assertTrue(cs[1].originalTermIndexes[1]==4); + Assert.assertTrue(cs[0].suggestion.score == 1); + + Assert.assertTrue(cs[1].originalTermIndexes.length == 2); + Assert.assertTrue(cs[1].originalTermIndexes[0] == 3); + Assert.assertTrue(cs[1].originalTermIndexes[1] == 4); Assert.assertTrue(cs[1].suggestion.string.equals("eighty")); - Assert.assertTrue(cs[1].suggestion.score==1); - - Assert.assertTrue(cs[2].originalTermIndexes.length==2); - Assert.assertTrue(cs[2].originalTermIndexes[0]==4); - Assert.assertTrue(cs[2].originalTermIndexes[1]==5); + Assert.assertTrue(cs[1].suggestion.score == 1); + + Assert.assertTrue(cs[2].originalTermIndexes.length == 2); + Assert.assertTrue(cs[2].originalTermIndexes[0] == 4); + Assert.assertTrue(cs[2].originalTermIndexes[1] == 5); Assert.assertTrue(cs[2].suggestion.string.equals("yeight")); - Assert.assertTrue(cs[2].suggestion.score==1); - - for(int i=3 ; i<5 ; i++) { - Assert.assertTrue(cs[i].originalTermIndexes.length==3); - Assert.assertTrue(cs[i].suggestion.score==2); + Assert.assertTrue(cs[2].suggestion.score == 1); + + for (int i = 3; i < 5; i++) { + Assert.assertTrue(cs[i].originalTermIndexes.length == 3); + Assert.assertTrue(cs[i].suggestion.score == 2); Assert.assertTrue( - (cs[i].originalTermIndexes[0]==1 && - cs[i].originalTermIndexes[1]==2 && - cs[i].originalTermIndexes[2]==3 && - cs[i].suggestion.string.equals("hundredeight")) || - (cs[i].originalTermIndexes[0]==3 && - cs[i].originalTermIndexes[1]==4 && - cs[i].originalTermIndexes[2]==5 && - cs[i].suggestion.string.equals("eightyeight")) - ); - } - + (cs[i].originalTermIndexes[0] == 1 + && cs[i].originalTermIndexes[1] == 2 + && cs[i].originalTermIndexes[2] == 3 + && cs[i].suggestion.string.equals("hundredeight")) + || (cs[i].originalTermIndexes[0] == 3 + && cs[i].originalTermIndexes[1] == 4 + && cs[i].originalTermIndexes[2] == 5 + && cs[i].suggestion.string.equals("eightyeight"))); + } + cs = wbsp.suggestWordCombinations(terms, 5, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); - Assert.assertTrue(cs.length==2); - Assert.assertTrue(cs[0].originalTermIndexes.length==2); - Assert.assertTrue(cs[0].suggestion.score==1); - Assert.assertTrue(cs[0].originalTermIndexes[0]==1); - Assert.assertTrue(cs[0].originalTermIndexes[1]==2); + Assert.assertTrue(cs.length == 2); + Assert.assertTrue(cs[0].originalTermIndexes.length == 2); + Assert.assertTrue(cs[0].suggestion.score == 1); + Assert.assertTrue(cs[0].originalTermIndexes[0] == 1); + Assert.assertTrue(cs[0].originalTermIndexes[1] == 2); Assert.assertTrue(cs[0].suggestion.string.equals("hundred")); - Assert.assertTrue(cs[0].suggestion.score==1); - - Assert.assertTrue(cs[1].originalTermIndexes.length==3); - Assert.assertTrue(cs[1].suggestion.score==2); + Assert.assertTrue(cs[0].suggestion.score == 1); + + Assert.assertTrue(cs[1].originalTermIndexes.length == 3); + Assert.assertTrue(cs[1].suggestion.score == 2); Assert.assertTrue(cs[1].originalTermIndexes[0] == 1); Assert.assertTrue(cs[1].originalTermIndexes[1] == 2); Assert.assertTrue(cs[1].originalTermIndexes[2] == 3); Assert.assertTrue(cs[1].suggestion.string.equals("hundredeight")); } ir.close(); - } - + } + public void testBreakingWords() throws Exception { IndexReader ir = DirectoryReader.open(dir); WordBreakSpellChecker wbsp = new WordBreakSpellChecker(); - + { Term term = new Term("numbers", "ninetynine"); wbsp.setMaxChanges(1); wbsp.setMinBreakWordLength(1); wbsp.setMinSuggestionFrequency(1); - SuggestWord[][] sw = wbsp.suggestWordBreaks(term, 5, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==1); - Assert.assertTrue(sw[0].length==2); + SuggestWord[][] sw = + wbsp.suggestWordBreaks( + term, + 5, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 1); + Assert.assertTrue(sw[0].length == 2); Assert.assertTrue(sw[0][0].string.equals("ninety")); Assert.assertTrue(sw[0][1].string.equals("nine")); Assert.assertTrue(sw[0][0].score == 1); @@ -174,88 +179,135 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { wbsp.setMaxChanges(1); wbsp.setMinBreakWordLength(1); wbsp.setMinSuggestionFrequency(1); - SuggestWord[][] sw = wbsp.suggestWordBreaks(term, 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==1); - Assert.assertTrue(sw[0].length==2); + SuggestWord[][] sw = + wbsp.suggestWordBreaks( + term, + 2, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 1); + Assert.assertTrue(sw[0].length == 2); Assert.assertTrue(sw[0][0].string.equals("one")); Assert.assertTrue(sw[0][1].string.equals("thousand")); Assert.assertTrue(sw[0][0].score == 1); Assert.assertTrue(sw[0][1].score == 1); - + wbsp.setMaxChanges(2); wbsp.setMinSuggestionFrequency(1); - sw = wbsp.suggestWordBreaks(term, 1, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==1); - Assert.assertTrue(sw[0].length==2); - + sw = + wbsp.suggestWordBreaks( + term, + 1, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 1); + Assert.assertTrue(sw[0].length == 2); + wbsp.setMaxChanges(2); wbsp.setMinSuggestionFrequency(2); - sw = wbsp.suggestWordBreaks(term, 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==1); - Assert.assertTrue(sw[0].length==2); - + sw = + wbsp.suggestWordBreaks( + term, + 2, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 1); + Assert.assertTrue(sw[0].length == 2); + wbsp.setMaxChanges(2); wbsp.setMinSuggestionFrequency(1); - sw = wbsp.suggestWordBreaks(term, 2, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==2); - Assert.assertTrue(sw[0].length==2); + sw = + wbsp.suggestWordBreaks( + term, + 2, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 2); + Assert.assertTrue(sw[0].length == 2); Assert.assertTrue(sw[0][0].string.equals("one")); Assert.assertTrue(sw[0][1].string.equals("thousand")); Assert.assertTrue(sw[0][0].score == 1); Assert.assertTrue(sw[0][1].score == 1); - Assert.assertTrue(sw[0][1].freq>1); - Assert.assertTrue(sw[0][0].freq>sw[0][1].freq); - Assert.assertTrue(sw[1].length==3); + Assert.assertTrue(sw[0][1].freq > 1); + Assert.assertTrue(sw[0][0].freq > sw[0][1].freq); + Assert.assertTrue(sw[1].length == 3); Assert.assertTrue(sw[1][0].string.equals("one")); Assert.assertTrue(sw[1][1].string.equals("thou")); Assert.assertTrue(sw[1][2].string.equals("sand")); Assert.assertTrue(sw[1][0].score == 2); Assert.assertTrue(sw[1][1].score == 2); Assert.assertTrue(sw[1][2].score == 2); - Assert.assertTrue(sw[1][0].freq>1); - Assert.assertTrue(sw[1][1].freq==1); - Assert.assertTrue(sw[1][2].freq==1); + Assert.assertTrue(sw[1][0].freq > 1); + Assert.assertTrue(sw[1][1].freq == 1); + Assert.assertTrue(sw[1][2].freq == 1); } { Term term = new Term("numbers", "onethousandonehundredeleven"); wbsp.setMaxChanges(3); wbsp.setMinBreakWordLength(1); wbsp.setMinSuggestionFrequency(1); - SuggestWord[][] sw = wbsp.suggestWordBreaks(term, 5, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==0); - + SuggestWord[][] sw = + wbsp.suggestWordBreaks( + term, + 5, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 0); + wbsp.setMaxChanges(4); - sw = wbsp.suggestWordBreaks(term, 5, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==1); - Assert.assertTrue(sw[0].length==5); - + sw = + wbsp.suggestWordBreaks( + term, + 5, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 1); + Assert.assertTrue(sw[0].length == 5); + wbsp.setMaxChanges(5); - sw = wbsp.suggestWordBreaks(term, 5, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==2); - Assert.assertTrue(sw[0].length==5); + sw = + wbsp.suggestWordBreaks( + term, + 5, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 2); + Assert.assertTrue(sw[0].length == 5); Assert.assertTrue(sw[0][1].string.equals("thousand")); - Assert.assertTrue(sw[1].length==6); + Assert.assertTrue(sw[1].length == 6); Assert.assertTrue(sw[1][1].string.equals("thou")); Assert.assertTrue(sw[1][2].string.equals("sand")); } { - //make sure we can handle 2-char codepoints + // make sure we can handle 2-char codepoints Term term = new Term("numbers", "\uD864\uDC79"); wbsp.setMaxChanges(1); wbsp.setMinBreakWordLength(1); wbsp.setMinSuggestionFrequency(1); - SuggestWord[][] sw = wbsp.suggestWordBreaks(term, 5, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); - Assert.assertTrue(sw.length==0); + SuggestWord[][] sw = + wbsp.suggestWordBreaks( + term, + 5, + ir, + SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + Assert.assertTrue(sw.length == 0); } - + ir.close(); } public void testRandom() throws Exception { - int numDocs = TestUtil.nextInt(random(), (10 * RANDOM_MULTIPLIER), - (100 * RANDOM_MULTIPLIER)); + int numDocs = TestUtil.nextInt(random(), (10 * RANDOM_MULTIPLIER), (100 * RANDOM_MULTIPLIER)); IndexReader ir = null; - + Directory dir = newDirectory(); Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, analyzer); @@ -275,21 +327,19 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { } originals.add(orig); int totalLength = orig.codePointCount(0, orig.length()); - int breakAt = orig.offsetByCodePoints(0, - TestUtil.nextInt(random(), 1, totalLength - 1)); + int breakAt = orig.offsetByCodePoints(0, TestUtil.nextInt(random(), 1, totalLength - 1)); String[] broken = new String[2]; broken[0] = orig.substring(0, breakAt); broken[1] = orig.substring(breakAt); breaks.add(broken); Document doc = new Document(); - doc.add(newTextField("random_break", broken[0] + " " + broken[1], - Field.Store.NO)); + doc.add(newTextField("random_break", broken[0] + " " + broken[1], Field.Store.NO)); doc.add(newTextField("random_combine", orig, Field.Store.NO)); writer.addDocument(doc); } writer.commit(); writer.close(); - + ir = DirectoryReader.open(dir); WordBreakSpellChecker wbsp = new WordBreakSpellChecker(); wbsp.setMaxChanges(1); @@ -302,10 +352,14 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { String right = breaks.get(i)[1]; { Term term = new Term("random_break", orig); - - SuggestWord[][] sw = wbsp.suggestWordBreaks(term, originals.size(), - ir, SuggestMode.SUGGEST_ALWAYS, - BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); + + SuggestWord[][] sw = + wbsp.suggestWordBreaks( + term, + originals.size(), + ir, + SuggestMode.SUGGEST_ALWAYS, + BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY); boolean failed = true; for (SuggestWord[] sw1 : sw) { Assert.assertTrue(sw1.length == 2); @@ -313,14 +367,19 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { failed = false; } } - Assert.assertFalse("Failed getting break suggestions\n >Original: " - + orig + "\n >Left: " + left + "\n >Right: " + right, failed); + Assert.assertFalse( + "Failed getting break suggestions\n >Original: " + + orig + + "\n >Left: " + + left + + "\n >Right: " + + right, + failed); } { - Term[] terms = {new Term("random_combine", left), - new Term("random_combine", right)}; - CombineSuggestion[] cs = wbsp.suggestWordCombinations(terms, - originals.size(), ir, SuggestMode.SUGGEST_ALWAYS); + Term[] terms = {new Term("random_combine", left), new Term("random_combine", right)}; + CombineSuggestion[] cs = + wbsp.suggestWordCombinations(terms, originals.size(), ir, SuggestMode.SUGGEST_ALWAYS); boolean failed = true; for (CombineSuggestion cs1 : cs) { Assert.assertTrue(cs1.originalTermIndexes.length == 2); @@ -328,21 +387,25 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { failed = false; } } - Assert.assertFalse("Failed getting combine suggestions\n >Original: " - + orig + "\n >Left: " + left + "\n >Right: " + right, failed); + Assert.assertFalse( + "Failed getting combine suggestions\n >Original: " + + orig + + "\n >Left: " + + left + + "\n >Right: " + + right, + failed); } } IOUtils.close(ir, dir, analyzer); } - - private static final Pattern mockTokenizerWhitespacePattern = Pattern - .compile("[ \\t\\r\\n]"); - + + private static final Pattern mockTokenizerWhitespacePattern = Pattern.compile("[ \\t\\r\\n]"); + private boolean goodTestString(String s) { - if (s.codePointCount(0, s.length()) < 2 - || mockTokenizerWhitespacePattern.matcher(s).find()) { + if (s.codePointCount(0, s.length()) < 2 || mockTokenizerWhitespacePattern.matcher(s).find()) { return false; } return true; } - } +} diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/Average.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/Average.java index 6a79d90ce1e..2c2c5334322 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/Average.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/Average.java @@ -16,55 +16,39 @@ */ package org.apache.lucene.search.suggest; - import java.util.List; import java.util.Locale; -/** - * Average with standard deviation. - */ -final class Average -{ - /** - * Average (in milliseconds). - */ - public final double avg; +/** Average with standard deviation. */ +final class Average { + /** Average (in milliseconds). */ + public final double avg; - /** - * Standard deviation (in milliseconds). - */ - public final double stddev; + /** Standard deviation (in milliseconds). */ + public final double stddev; - /** - * - */ - Average(double avg, double stddev) - { - this.avg = avg; - this.stddev = stddev; + /** */ + Average(double avg, double stddev) { + this.avg = avg; + this.stddev = stddev; + } + + @Override + public String toString() { + return String.format(Locale.ROOT, "%.0f [+- %.2f]", avg, stddev); + } + + static Average from(List values) { + double sum = 0; + double sumSquares = 0; + + for (double l : values) { + sum += l; + sumSquares += l * l; } - @Override - public String toString() - { - return String.format(Locale.ROOT, "%.0f [+- %.2f]", - avg, stddev); - } - - static Average from(List values) - { - double sum = 0; - double sumSquares = 0; - - for (double l : values) - { - sum += l; - sumSquares += l * l; - } - - double avg = sum / (double) values.size(); - return new Average( - (sum / (double) values.size()), - Math.sqrt(sumSquares / (double) values.size() - avg * avg)); - } -} \ No newline at end of file + double avg = sum / (double) values.size(); + return new Average( + (sum / (double) values.size()), Math.sqrt(sumSquares / (double) values.size() - avg * avg)); + } +} diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java index 01028a287da..715278a6ff5 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -47,14 +46,15 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; -// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows machines occasionally +// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows +// machines occasionally public class DocumentDictionaryTest extends LuceneTestCase { - + static final String FIELD_NAME = "f1"; static final String WEIGHT_FIELD_NAME = "w1"; static final String PAYLOAD_FIELD_NAME = "p1"; static final String CONTEXT_FIELD_NAME = "c1"; - + @Test public void testEmptyReader() throws IOException { Directory dir = newDirectory(); @@ -66,16 +66,17 @@ public class DocumentDictionaryTest extends LuceneTestCase { writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); - Dictionary dictionary = new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); assertNull(inputIterator.next()); assertEquals(inputIterator.weight(), 0); assertNull(inputIterator.payload()); - + IOUtils.close(ir, analyzer, dir); } - + @Test public void testBasic() throws IOException { Directory dir = newDirectory(); @@ -83,33 +84,37 @@ public class DocumentDictionaryTest extends LuceneTestCase { IndexWriterConfig iwc = newIndexWriterConfig(analyzer); iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); - Map.Entry, Map> res = generateIndexDocuments(atLeast(1000), false); + Map.Entry, Map> res = + generateIndexDocuments(atLeast(1000), false); Map docs = res.getValue(); List invalidDocTerms = res.getKey(); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); - Dictionary dictionary = new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME)))); IndexableField weightField = doc.getField(WEIGHT_FIELD_NAME); - assertEquals(inputIterator.weight(), (weightField != null) ? weightField.numericValue().longValue() : 0); + assertEquals( + inputIterator.weight(), + (weightField != null) ? weightField.numericValue().longValue() : 0); IndexableField payloadField = doc.getField(PAYLOAD_FIELD_NAME); if (payloadField == null) assertTrue(inputIterator.payload().length == 0); else assertEquals(inputIterator.payload(), payloadField.binaryValue()); } - + for (String invalidTerm : invalidDocTerms) { assertNotNull(docs.remove(invalidTerm)); } assertTrue(docs.isEmpty()); - + IOUtils.close(ir, analyzer, dir); } @@ -133,7 +138,8 @@ public class DocumentDictionaryTest extends LuceneTestCase { writer.close(); IndexReader ir = DirectoryReader.open(dir); - // Even though the payload field is missing, the dictionary iterator should not skip the document + // Even though the payload field is missing, the dictionary iterator should not skip the + // document // because the payload field is optional. Dictionary dictionaryOptionalPayload = new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME); @@ -147,7 +153,7 @@ public class DocumentDictionaryTest extends LuceneTestCase { assertTrue(inputIterator.payload().length == 0); IOUtils.close(ir, analyzer, dir); } - + @Test public void testWithoutPayload() throws IOException { Directory dir = newDirectory(); @@ -155,10 +161,11 @@ public class DocumentDictionaryTest extends LuceneTestCase { IndexWriterConfig iwc = newIndexWriterConfig(analyzer); iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); - Map.Entry, Map> res = generateIndexDocuments(atLeast(1000), false); + Map.Entry, Map> res = + generateIndexDocuments(atLeast(1000), false); Map docs = res.getValue(); List invalidDocTerms = res.getKey(); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); @@ -167,23 +174,25 @@ public class DocumentDictionaryTest extends LuceneTestCase { Dictionary dictionary = new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME)))); IndexableField weightField = doc.getField(WEIGHT_FIELD_NAME); - assertEquals(inputIterator.weight(), (weightField != null) ? weightField.numericValue().longValue() : 0); + assertEquals( + inputIterator.weight(), + (weightField != null) ? weightField.numericValue().longValue() : 0); assertNull(inputIterator.payload()); } - + for (String invalidTerm : invalidDocTerms) { assertNotNull(docs.remove(invalidTerm)); } - + assertTrue(docs.isEmpty()); - + IOUtils.close(ir, analyzer, dir); } - + @Test public void testWithContexts() throws IOException { Directory dir = newDirectory(); @@ -191,23 +200,28 @@ public class DocumentDictionaryTest extends LuceneTestCase { IndexWriterConfig iwc = newIndexWriterConfig(analyzer); iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); - Map.Entry, Map> res = generateIndexDocuments(atLeast(1000), true); + Map.Entry, Map> res = + generateIndexDocuments(atLeast(1000), true); Map docs = res.getValue(); List invalidDocTerms = res.getKey(); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); - Dictionary dictionary = new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME, CONTEXT_FIELD_NAME); + Dictionary dictionary = + new DocumentDictionary( + ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME, CONTEXT_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME)))); IndexableField weightField = doc.getField(WEIGHT_FIELD_NAME); - assertEquals(inputIterator.weight(), (weightField != null) ? weightField.numericValue().longValue() : 0); + assertEquals( + inputIterator.weight(), + (weightField != null) ? weightField.numericValue().longValue() : 0); IndexableField payloadField = doc.getField(PAYLOAD_FIELD_NAME); if (payloadField == null) assertTrue(inputIterator.payload().length == 0); else assertEquals(inputIterator.payload(), payloadField.binaryValue()); @@ -218,15 +232,15 @@ public class DocumentDictionaryTest extends LuceneTestCase { } assertEquals(oriCtxs.size(), contextSet.size()); } - + for (String invalidTerm : invalidDocTerms) { assertNotNull(docs.remove(invalidTerm)); } assertTrue(docs.isEmpty()); - + IOUtils.close(ir, analyzer, dir); } - + @Test public void testWithDeletions() throws IOException { Directory dir = newDirectory(); @@ -234,53 +248,56 @@ public class DocumentDictionaryTest extends LuceneTestCase { IndexWriterConfig iwc = newIndexWriterConfig(analyzer); iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); - Map.Entry, Map> res = generateIndexDocuments(atLeast(1000), false); + Map.Entry, Map> res = + generateIndexDocuments(atLeast(1000), false); Map docs = res.getValue(); List invalidDocTerms = res.getKey(); Random rand = random(); List termsToDel = new ArrayList<>(); - for(Document doc : docs.values()) { + for (Document doc : docs.values()) { IndexableField f = doc.getField(FIELD_NAME); - if(rand.nextBoolean() && f != null && !invalidDocTerms.contains(f.stringValue())) { + if (rand.nextBoolean() && f != null && !invalidDocTerms.contains(f.stringValue())) { termsToDel.add(doc.get(FIELD_NAME)); } writer.addDocument(doc); } writer.commit(); - + Term[] delTerms = new Term[termsToDel.size()]; - for(int i=0; i < termsToDel.size() ; i++) { + for (int i = 0; i < termsToDel.size(); i++) { delTerms[i] = new Term(FIELD_NAME, termsToDel.get(i)); } - - for(Term delTerm: delTerms) { - writer.deleteDocuments(delTerm); + + for (Term delTerm : delTerms) { + writer.deleteDocuments(delTerm); } writer.commit(); writer.close(); - - for(String termToDel: termsToDel) { - assertTrue(null!=docs.remove(termToDel)); + + for (String termToDel : termsToDel) { + assertTrue(null != docs.remove(termToDel)); } - + IndexReader ir = DirectoryReader.open(dir); assertEquals(ir.numDocs(), docs.size()); Dictionary dictionary = new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME)))); IndexableField weightField = doc.getField(WEIGHT_FIELD_NAME); - assertEquals(inputIterator.weight(), (weightField != null) ? weightField.numericValue().longValue() : 0); + assertEquals( + inputIterator.weight(), + (weightField != null) ? weightField.numericValue().longValue() : 0); assertNull(inputIterator.payload()); } - + for (String invalidTerm : invalidDocTerms) { assertNotNull(docs.remove(invalidTerm)); } assertTrue(docs.isEmpty()); - + IOUtils.close(ir, analyzer, dir); } @@ -297,11 +314,13 @@ public class DocumentDictionaryTest extends LuceneTestCase { writer.close(); IndexReader ir = DirectoryReader.open(dir); - Dictionary dictionary = new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME, CONTEXT_FIELD_NAME); + Dictionary dictionary = + new DocumentDictionary( + ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME, CONTEXT_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; Iterator suggestionsIter = suggestions.iterator(); - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Suggestion nextSuggestion = suggestionsIter.next(); assertTrue(f.equals(nextSuggestion.term)); long weight = nextSuggestion.weight; @@ -314,10 +333,11 @@ public class DocumentDictionaryTest extends LuceneTestCase { } /** Returns Pair(list of invalid document terms, Map of document term -> document) */ - private Map.Entry, Map> generateIndexDocuments(int ndocs, boolean requiresContexts) { + private Map.Entry, Map> generateIndexDocuments( + int ndocs, boolean requiresContexts) { Map docs = new HashMap<>(); List invalidDocTerms = new ArrayList<>(); - for(int i = 0; i < ndocs ; i++) { + for (int i = 0; i < ndocs; i++) { Document doc = new Document(); boolean invalidDoc = false; Field field = null; @@ -338,7 +358,7 @@ public class DocumentDictionaryTest extends LuceneTestCase { if (requiresContexts || usually()) { if (usually()) { for (int j = 0; j < atLeast(2); j++) { - doc.add(new StoredField(CONTEXT_FIELD_NAME, new BytesRef("context_" + i + "_"+ j))); + doc.add(new StoredField(CONTEXT_FIELD_NAME, new BytesRef("context_" + i + "_" + j))); } } // we should allow entries without context @@ -346,15 +366,16 @@ public class DocumentDictionaryTest extends LuceneTestCase { // usually have valid weight field in document if (usually()) { - Field weight = (rarely()) ? - new StoredField(WEIGHT_FIELD_NAME, 100d + i) : - new NumericDocValuesField(WEIGHT_FIELD_NAME, 100 + i); + Field weight = + (rarely()) + ? new StoredField(WEIGHT_FIELD_NAME, 100d + i) + : new NumericDocValuesField(WEIGHT_FIELD_NAME, 100 + i); doc.add(weight); } String term = null; if (invalidDoc) { - term = (field!=null) ? field.stringValue() : "invalid_" + i; + term = (field != null) ? field.stringValue() : "invalid_" + i; invalidDocTerms.add(term); } else { term = field.stringValue(); @@ -365,15 +386,16 @@ public class DocumentDictionaryTest extends LuceneTestCase { return new SimpleEntry<>(invalidDocTerms, docs); } - private List indexMultiValuedDocuments(int numDocs, RandomIndexWriter writer) throws IOException { + private List indexMultiValuedDocuments(int numDocs, RandomIndexWriter writer) + throws IOException { List suggestionList = new ArrayList<>(numDocs); - for(int i=0; i contextValues = new HashSet<>(); - long numericValue = -1; //-1 for missing weight + long numericValue = -1; // -1 for missing weight BytesRef term; payloadValue = new BytesRef("payload_" + i); @@ -387,7 +409,7 @@ public class DocumentDictionaryTest extends LuceneTestCase { } int numContexts = atLeast(1); - for (int j=0; j contexts; private BytesRef term; } - - } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java index 3a2d87782a1..cb1c9920813 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -51,14 +50,14 @@ import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; public class DocumentValueSourceDictionaryTest extends LuceneTestCase { - + static final String FIELD_NAME = "f1"; static final String WEIGHT_FIELD_NAME_1 = "w1"; static final String WEIGHT_FIELD_NAME_2 = "w2"; static final String WEIGHT_FIELD_NAME_3 = "w3"; static final String PAYLOAD_FIELD_NAME = "p1"; static final String CONTEXTS_FIELD_NAME = "c1"; - + @Test public void testValueSourceEmptyReader() throws IOException { Directory dir = newDirectory(); @@ -70,7 +69,9 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary( + ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); assertNull(inputIterator.next()); @@ -91,7 +92,9 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary( + ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); assertNull(inputIterator.next()); @@ -100,7 +103,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { IOUtils.close(ir, analyzer, dir); } - + @Test public void testValueSourceBasic() throws IOException { Directory dir = newDirectory(); @@ -109,7 +112,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Map docs = generateIndexDocuments(atLeast(100)); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); @@ -117,10 +120,11 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { IndexReader ir = DirectoryReader.open(dir); LongValuesSource s = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary(ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue(); long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue(); @@ -208,18 +212,20 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Map docs = generateIndexDocuments(atLeast(100)); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); - LongValuesSource sumValueSource = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValueSource, PAYLOAD_FIELD_NAME); + LongValuesSource sumValueSource = + sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3); + Dictionary dictionary = + new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValueSource, PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue(); long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue(); @@ -233,7 +239,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { assertTrue(docs.isEmpty()); IOUtils.close(ir, analyzer, dir); } - + @Test public void testValueSourceWithContext() throws IOException { Directory dir = newDirectory(); @@ -242,7 +248,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Map docs = generateIndexDocuments(atLeast(100)); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); @@ -250,10 +256,12 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { IndexReader ir = DirectoryReader.open(dir); LongValuesSource s = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME, CONTEXTS_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary( + ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME, CONTEXTS_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue(); long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue(); @@ -264,7 +272,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { if (payloadField == null) assertTrue(inputIterator.payload().length == 0); else assertEquals(inputIterator.payload(), payloadField.binaryValue()); Set originalCtxs = new HashSet<>(); - for (IndexableField ctxf: doc.getFields(CONTEXTS_FIELD_NAME)) { + for (IndexableField ctxf : doc.getFields(CONTEXTS_FIELD_NAME)) { originalCtxs.add(ctxf.binaryValue()); } assertEquals(originalCtxs, inputIterator.contexts()); @@ -281,7 +289,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Map docs = generateIndexDocuments(atLeast(100)); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); @@ -289,10 +297,12 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { IndexReader ir = DirectoryReader.open(dir); LongValuesSource sumValues = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValues, PAYLOAD_FIELD_NAME, CONTEXTS_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary( + ir, FIELD_NAME, sumValues, PAYLOAD_FIELD_NAME, CONTEXTS_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue(); long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue(); @@ -303,7 +313,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { if (payloadField == null) assertTrue(inputIterator.payload().length == 0); else assertEquals(inputIterator.payload(), payloadField.binaryValue()); Set originalCtxs = new HashSet<>(); - for (IndexableField ctxf: doc.getFields(CONTEXTS_FIELD_NAME)) { + for (IndexableField ctxf : doc.getFields(CONTEXTS_FIELD_NAME)) { originalCtxs.add(ctxf.binaryValue()); } assertEquals(originalCtxs, inputIterator.contexts()); @@ -320,7 +330,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Map docs = generateIndexDocuments(atLeast(100)); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); @@ -331,7 +341,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, s); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue(); long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue(); @@ -352,7 +362,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Map docs = generateIndexDocuments(atLeast(100)); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); @@ -363,7 +373,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValues); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue(); long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue(); @@ -375,7 +385,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { assertTrue(docs.isEmpty()); IOUtils.close(ir, analyzer, dir); } - + @Test public void testValueSourceWithDeletions() throws IOException { Directory dir = newDirectory(); @@ -386,42 +396,43 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { Map docs = generateIndexDocuments(atLeast(100)); Random rand = random(); List termsToDel = new ArrayList<>(); - for(Document doc : docs.values()) { - if(rand.nextBoolean() && termsToDel.size() < docs.size()-1) { + for (Document doc : docs.values()) { + if (rand.nextBoolean() && termsToDel.size() < docs.size() - 1) { termsToDel.add(doc.get(FIELD_NAME)); } writer.addDocument(doc); } writer.commit(); - + Term[] delTerms = new Term[termsToDel.size()]; - for(int i=0; i < termsToDel.size() ; i++) { + for (int i = 0; i < termsToDel.size(); i++) { delTerms[i] = new Term(FIELD_NAME, termsToDel.get(i)); } - - for(Term delTerm: delTerms) { - writer.deleteDocuments(delTerm); + + for (Term delTerm : delTerms) { + writer.deleteDocuments(delTerm); } writer.commit(); writer.close(); - - for(String termToDel: termsToDel) { - assertTrue(null!=docs.remove(termToDel)); + + for (String termToDel : termsToDel) { + assertTrue(null != docs.remove(termToDel)); } - + IndexReader ir = DirectoryReader.open(dir); assertTrue("NumDocs should be > 0 but was " + ir.numDocs(), ir.numDocs() > 0); assertEquals(ir.numDocs(), docs.size()); LongValuesSource s = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary(ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue(); long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue(); assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME)))); - assertEquals(inputIterator.weight(), w2+w1); + assertEquals(inputIterator.weight(), w2 + w1); IndexableField payloadField = doc.getField(PAYLOAD_FIELD_NAME); if (payloadField == null) assertTrue(inputIterator.payload().length == 0); else assertEquals(inputIterator.payload(), payloadField.binaryValue()); @@ -440,8 +451,8 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { Map docs = generateIndexDocuments(atLeast(100)); Random rand = random(); List termsToDel = new ArrayList<>(); - for(Document doc : docs.values()) { - if(rand.nextBoolean() && termsToDel.size() < docs.size()-1) { + for (Document doc : docs.values()) { + if (rand.nextBoolean() && termsToDel.size() < docs.size() - 1) { termsToDel.add(doc.get(FIELD_NAME)); } writer.addDocument(doc); @@ -449,33 +460,34 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { writer.commit(); Term[] delTerms = new Term[termsToDel.size()]; - for(int i=0; i < termsToDel.size() ; i++) { + for (int i = 0; i < termsToDel.size(); i++) { delTerms[i] = new Term(FIELD_NAME, termsToDel.get(i)); } - for(Term delTerm: delTerms) { + for (Term delTerm : delTerms) { writer.deleteDocuments(delTerm); } writer.commit(); writer.close(); - for(String termToDel: termsToDel) { - assertTrue(null!=docs.remove(termToDel)); + for (String termToDel : termsToDel) { + assertTrue(null != docs.remove(termToDel)); } IndexReader ir = DirectoryReader.open(dir); assertTrue("NumDocs should be > 0 but was " + ir.numDocs(), ir.numDocs() > 0); assertEquals(ir.numDocs(), docs.size()); LongValuesSource sumValues = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValues, PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValues, PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue(); long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue(); assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME)))); - assertEquals(inputIterator.weight(), w2+w1); + assertEquals(inputIterator.weight(), w2 + w1); IndexableField payloadField = doc.getField(PAYLOAD_FIELD_NAME); if (payloadField == null) assertTrue(inputIterator.payload().length == 0); else assertEquals(inputIterator.payload(), payloadField.binaryValue()); @@ -483,7 +495,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { assertTrue(docs.isEmpty()); IOUtils.close(ir, analyzer, dir); } - + @Test public void testWithValueSource() throws IOException { Directory dir = newDirectory(); @@ -492,17 +504,19 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Map docs = generateIndexDocuments(atLeast(100)); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary( + ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME)))); assertEquals(inputIterator.weight(), 10); @@ -522,17 +536,19 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Map docs = generateIndexDocuments(atLeast(100)); - for(Document doc: docs.values()) { + for (Document doc : docs.values()) { writer.addDocument(doc); } writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); - Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME); + Dictionary dictionary = + new DocumentValueSourceDictionary( + ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME); InputIterator inputIterator = dictionary.getEntryIterator(); BytesRef f; - while((f = inputIterator.next())!=null) { + while ((f = inputIterator.next()) != null) { Document doc = docs.remove(f.utf8ToString()); assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME)))); assertEquals(inputIterator.weight(), 10); @@ -546,12 +562,12 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { private Map generateIndexDocuments(int ndocs) { Map docs = new HashMap<>(); - for(int i = 0; i < ndocs ; i++) { + for (int i = 0; i < ndocs; i++) { Field field = new TextField(FIELD_NAME, "field_" + i, Field.Store.YES); Field weight1 = new NumericDocValuesField(WEIGHT_FIELD_NAME_1, 10 + i); Field weight2 = new NumericDocValuesField(WEIGHT_FIELD_NAME_2, 20 + i); Field weight3 = new NumericDocValuesField(WEIGHT_FIELD_NAME_3, 30 + i); - Field contexts = new StoredField(CONTEXTS_FIELD_NAME, new BytesRef("ctx_" + i + "_0")); + Field contexts = new StoredField(CONTEXTS_FIELD_NAME, new BytesRef("ctx_" + i + "_0")); Document doc = new Document(); doc.add(field); // even if payload is not required usually have it @@ -563,7 +579,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase { doc.add(weight2); doc.add(weight3); doc.add(contexts); - for(int j = 1; j < atLeast(3); j++) { + for (int j = 1; j < atLeast(3); j++) { contexts.setBytesValue(new BytesRef("ctx_" + i + "_" + j)); doc.add(contexts); } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java index bf021ead8b7..c1bf8836683 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java @@ -24,16 +24,15 @@ import java.util.AbstractMap.SimpleEntry; import java.util.ArrayList; import java.util.List; import java.util.Map; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.junit.Test; - public class FileDictionaryTest extends LuceneTestCase { - - private Map.Entry, String> generateFileEntry(String fieldDelimiter, boolean hasWeight, boolean hasPayload) { + + private Map.Entry, String> generateFileEntry( + String fieldDelimiter, boolean hasWeight, boolean hasPayload) { List entryValues = new ArrayList<>(); StringBuilder sb = new StringBuilder(); String term = TestUtil.randomSimpleString(random(), 1, 300); @@ -54,33 +53,40 @@ public class FileDictionaryTest extends LuceneTestCase { sb.append("\n"); return new SimpleEntry<>(entryValues, sb.toString()); } - - private Map.Entry>,String> generateFileInput(int count, String fieldDelimiter, boolean hasWeights, boolean hasPayloads) { + + private Map.Entry>, String> generateFileInput( + int count, String fieldDelimiter, boolean hasWeights, boolean hasPayloads) { List> entries = new ArrayList<>(); StringBuilder sb = new StringBuilder(); boolean hasPayload = hasPayloads; for (int i = 0; i < count; i++) { if (hasPayloads) { - hasPayload = (i==0) ? true : random().nextBoolean(); - } - Map.Entry, String> entrySet = generateFileEntry(fieldDelimiter, (!hasPayloads && hasWeights) ? random().nextBoolean() : hasWeights, hasPayload); + hasPayload = (i == 0) ? true : random().nextBoolean(); + } + Map.Entry, String> entrySet = + generateFileEntry( + fieldDelimiter, + (!hasPayloads && hasWeights) ? random().nextBoolean() : hasWeights, + hasPayload); entries.add(entrySet.getKey()); sb.append(entrySet.getValue()); } return new SimpleEntry<>(entries, sb.toString()); } - + @Test public void testFileWithTerm() throws IOException { - Map.Entry>,String> fileInput = generateFileInput(atLeast(100), FileDictionary.DEFAULT_FIELD_DELIMITER, false, false); - InputStream inputReader = new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); + Map.Entry>, String> fileInput = + generateFileInput(atLeast(100), FileDictionary.DEFAULT_FIELD_DELIMITER, false, false); + InputStream inputReader = + new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); FileDictionary dictionary = new FileDictionary(inputReader); List> entries = fileInput.getKey(); InputIterator inputIter = dictionary.getEntryIterator(); assertFalse(inputIter.hasPayloads()); BytesRef term; int count = 0; - while((term = inputIter.next()) != null) { + while ((term = inputIter.next()) != null) { assertTrue(entries.size() > count); List entry = entries.get(count); assertTrue(entry.size() >= 1); // at least a term @@ -91,18 +97,20 @@ public class FileDictionaryTest extends LuceneTestCase { } assertEquals(count, entries.size()); } - + @Test public void testFileWithWeight() throws IOException { - Map.Entry>,String> fileInput = generateFileInput(atLeast(100), FileDictionary.DEFAULT_FIELD_DELIMITER, true, false); - InputStream inputReader = new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); + Map.Entry>, String> fileInput = + generateFileInput(atLeast(100), FileDictionary.DEFAULT_FIELD_DELIMITER, true, false); + InputStream inputReader = + new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); FileDictionary dictionary = new FileDictionary(inputReader); List> entries = fileInput.getKey(); InputIterator inputIter = dictionary.getEntryIterator(); assertFalse(inputIter.hasPayloads()); BytesRef term; int count = 0; - while((term = inputIter.next()) != null) { + while ((term = inputIter.next()) != null) { assertTrue(entries.size() > count); List entry = entries.get(count); assertTrue(entry.size() >= 1); // at least a term @@ -113,18 +121,20 @@ public class FileDictionaryTest extends LuceneTestCase { } assertEquals(count, entries.size()); } - + @Test public void testFileWithWeightAndPayload() throws IOException { - Map.Entry>,String> fileInput = generateFileInput(atLeast(100), FileDictionary.DEFAULT_FIELD_DELIMITER, true, true); - InputStream inputReader = new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); + Map.Entry>, String> fileInput = + generateFileInput(atLeast(100), FileDictionary.DEFAULT_FIELD_DELIMITER, true, true); + InputStream inputReader = + new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); FileDictionary dictionary = new FileDictionary(inputReader); List> entries = fileInput.getKey(); InputIterator inputIter = dictionary.getEntryIterator(); assertTrue(inputIter.hasPayloads()); BytesRef term; int count = 0; - while((term = inputIter.next()) != null) { + while ((term = inputIter.next()) != null) { assertTrue(entries.size() > count); List entry = entries.get(count); assertTrue(entry.size() >= 2); // at least term and weight @@ -139,18 +149,20 @@ public class FileDictionaryTest extends LuceneTestCase { } assertEquals(count, entries.size()); } - + @Test public void testFileWithOneEntry() throws IOException { - Map.Entry>,String> fileInput = generateFileInput(1, FileDictionary.DEFAULT_FIELD_DELIMITER, true, true); - InputStream inputReader = new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); + Map.Entry>, String> fileInput = + generateFileInput(1, FileDictionary.DEFAULT_FIELD_DELIMITER, true, true); + InputStream inputReader = + new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); FileDictionary dictionary = new FileDictionary(inputReader); List> entries = fileInput.getKey(); InputIterator inputIter = dictionary.getEntryIterator(); assertTrue(inputIter.hasPayloads()); BytesRef term; int count = 0; - while((term = inputIter.next()) != null) { + while ((term = inputIter.next()) != null) { assertTrue(entries.size() > count); List entry = entries.get(count); assertTrue(entry.size() >= 2); // at least term and weight @@ -165,19 +177,20 @@ public class FileDictionaryTest extends LuceneTestCase { } assertEquals(count, entries.size()); } - - + @Test public void testFileWithDifferentDelimiter() throws IOException { - Map.Entry>,String> fileInput = generateFileInput(atLeast(100), " , ", true, true); - InputStream inputReader = new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); + Map.Entry>, String> fileInput = + generateFileInput(atLeast(100), " , ", true, true); + InputStream inputReader = + new ByteArrayInputStream(fileInput.getValue().getBytes(StandardCharsets.UTF_8)); FileDictionary dictionary = new FileDictionary(inputReader, " , "); List> entries = fileInput.getKey(); InputIterator inputIter = dictionary.getEntryIterator(); assertTrue(inputIter.hasPayloads()); BytesRef term; int count = 0; - while((term = inputIter.next()) != null) { + while ((term = inputIter.next()) != null) { assertTrue(entries.size() > count); List entry = entries.get(count); assertTrue(entry.size() >= 2); // at least term and weight @@ -192,5 +205,4 @@ public class FileDictionaryTest extends LuceneTestCase { } assertEquals(count, entries.size()); } - } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/Input.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/Input.java index 5cb07902e0b..ef739a41e72 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/Input.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/Input.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest; import java.util.Set; - import org.apache.lucene.util.BytesRef; /** corresponds to {@link InputIterator}'s entries */ @@ -32,27 +31,27 @@ public final class Input { public Input(BytesRef term, long v, BytesRef payload) { this(term, v, payload, true, null, false); } - + public Input(String term, long v, BytesRef payload) { this(new BytesRef(term), v, payload); } - + public Input(BytesRef term, long v, Set contexts) { this(term, v, null, false, contexts, true); } - + public Input(String term, long v, Set contexts) { this(new BytesRef(term), v, null, false, contexts, true); } - + public Input(BytesRef term, long v) { this(term, v, null, false, null, false); } - + public Input(String term, long v) { this(new BytesRef(term), v, null, false, null, false); } - + public Input(String term, int v, BytesRef payload, Set contexts) { this(new BytesRef(term), v, payload, true, contexts, true); } @@ -60,10 +59,13 @@ public final class Input { public Input(BytesRef term, long v, BytesRef payload, Set contexts) { this(term, v, payload, true, contexts, true); } - - - public Input(BytesRef term, long v, BytesRef payload, boolean hasPayloads, Set contexts, + public Input( + BytesRef term, + long v, + BytesRef payload, + boolean hasPayloads, + Set contexts, boolean hasContexts) { this.term = term; this.v = v; @@ -72,12 +74,12 @@ public final class Input { this.contexts = contexts; this.hasContexts = hasContexts; } - + public boolean hasContexts() { return hasContexts; } - + public boolean hasPayloads() { return hasPayloads; } -} \ No newline at end of file +} diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/InputArrayIterator.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/InputArrayIterator.java index a62e887ccbe..8ed4ef95112 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/InputArrayIterator.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/InputArrayIterator.java @@ -19,13 +19,10 @@ package org.apache.lucene.search.suggest; import java.util.Arrays; import java.util.Iterator; import java.util.Set; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -/** - * A {@link InputIterator} over a sequence of {@link Input}s. - */ +/** A {@link InputIterator} over a sequence of {@link Input}s. */ public final class InputArrayIterator implements InputIterator { private final Iterator i; private final boolean hasPayloads; @@ -50,10 +47,11 @@ public final class InputArrayIterator implements InputIterator { public InputArrayIterator(Input[] i) { this(Arrays.asList(i)); } + public InputArrayIterator(Iterable i) { this(i.iterator()); } - + @Override public long weight() { return current.v; @@ -61,7 +59,7 @@ public final class InputArrayIterator implements InputIterator { @Override public BytesRef next() { - if (i.hasNext() || (first && current!=null)) { + if (i.hasNext() || (first && current != null)) { if (first) { first = false; } else { diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java index 5e5a4d552fc..c6b407b889d 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java @@ -29,7 +29,6 @@ import java.util.List; import java.util.Locale; import java.util.Random; import java.util.concurrent.Callable; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -48,60 +47,50 @@ import org.apache.lucene.util.*; import org.junit.BeforeClass; import org.junit.Ignore; -/** - * Benchmarks tests for implementations of {@link Lookup} interface. - */ +/** Benchmarks tests for implementations of {@link Lookup} interface. */ @Ignore("COMMENT ME TO RUN BENCHMARKS!") public class LookupBenchmarkTest extends LuceneTestCase { - @SuppressWarnings({"unchecked","deprecation"}) - private final List> benchmarkClasses = Arrays.asList( - FuzzySuggester.class, - AnalyzingSuggester.class, - AnalyzingInfixSuggester.class, - JaspellLookup.class, - TSTLookup.class, - FSTCompletionLookup.class, - WFSTCompletionLookup.class, - BlendedInfixSuggester.class, - FreeTextSuggester.class - ); + @SuppressWarnings({"unchecked", "deprecation"}) + private final List> benchmarkClasses = + Arrays.asList( + FuzzySuggester.class, + AnalyzingSuggester.class, + AnalyzingInfixSuggester.class, + JaspellLookup.class, + TSTLookup.class, + FSTCompletionLookup.class, + WFSTCompletionLookup.class, + BlendedInfixSuggester.class, + FreeTextSuggester.class); - private final static int rounds = 15; - private final static int warmup = 5; + private static final int rounds = 15; + private static final int warmup = 5; private final int num = 7; private final boolean onlyMorePopular = false; - private final static Random random = new Random(0xdeadbeef); + private static final Random random = new Random(0xdeadbeef); - /** - * Input term/weight pairs. - */ - private static Input [] dictionaryInput; + /** Input term/weight pairs. */ + private static Input[] dictionaryInput; - /** - * Benchmark term/weight pairs (randomized order). - */ + /** Benchmark term/weight pairs (randomized order). */ private static List benchmarkInput; - /** - * Loads terms and frequencies from Wikipedia (cached). - */ + /** Loads terms and frequencies from Wikipedia (cached). */ @BeforeClass public static void setup() throws Exception { assert false : "disable assertions before running benchmarks!"; List input = readTop50KWiki(); Collections.shuffle(input, random); - LookupBenchmarkTest.dictionaryInput = input.toArray(new Input [input.size()]); + LookupBenchmarkTest.dictionaryInput = input.toArray(new Input[input.size()]); Collections.shuffle(input, random); LookupBenchmarkTest.benchmarkInput = input; } static final Charset UTF_8 = StandardCharsets.UTF_8; - /** - * Collect the multilingual input for benchmarks/ tests. - */ + /** Collect the multilingual input for benchmarks/ tests. */ public static List readTop50KWiki() throws Exception { List input = new ArrayList<>(); URL resource = LookupBenchmarkTest.class.getResource("Top50KWiki.utf8"); @@ -120,51 +109,50 @@ public class LookupBenchmarkTest extends LuceneTestCase { return input; } - /** - * Test construction time. - */ + /** Test construction time. */ public void testConstructionTime() throws Exception { System.err.println("-- construction time"); for (final Class cls : benchmarkClasses) { - BenchmarkResult result = measure(new Callable() { - @Override - public Integer call() throws Exception { - final Lookup lookup = buildLookup(cls, dictionaryInput); - return lookup.hashCode(); - } - }); + BenchmarkResult result = + measure( + new Callable() { + @Override + public Integer call() throws Exception { + final Lookup lookup = buildLookup(cls, dictionaryInput); + return lookup.hashCode(); + } + }); System.err.println( - String.format(Locale.ROOT, "%-15s input: %d, time[ms]: %s", + String.format( + Locale.ROOT, + "%-15s input: %d, time[ms]: %s", cls.getSimpleName(), dictionaryInput.length, result.average.toString())); } } - /** - * Test memory required for the storage. - */ + /** Test memory required for the storage. */ public void testStorageNeeds() throws Exception { System.err.println("-- RAM consumption"); for (Class cls : benchmarkClasses) { Lookup lookup = buildLookup(cls, dictionaryInput); long sizeInBytes = lookup.ramBytesUsed(); System.err.println( - String.format(Locale.ROOT, "%-15s size[B]:%,13d", - lookup.getClass().getSimpleName(), - sizeInBytes)); + String.format( + Locale.ROOT, "%-15s size[B]:%,13d", lookup.getClass().getSimpleName(), sizeInBytes)); } } - /** - * Create {@link Lookup} instance and populate it. - */ + /** Create {@link Lookup} instance and populate it. */ private Lookup buildLookup(Class cls, Input[] input) throws Exception { Lookup lookup = null; - if (cls == TSTLookup.class || cls == FSTCompletionLookup.class || cls == WFSTCompletionLookup.class) { - Constructor ctor = cls.getConstructor(Directory.class, String.class); - lookup = ctor.newInstance(FSDirectory.open(createTempDir("LookupBenchmarkTest")), "test"); + if (cls == TSTLookup.class + || cls == FSTCompletionLookup.class + || cls == WFSTCompletionLookup.class) { + Constructor ctor = cls.getConstructor(Directory.class, String.class); + lookup = ctor.newInstance(FSDirectory.open(createTempDir("LookupBenchmarkTest")), "test"); } else { try { lookup = cls.getConstructor().newInstance(); @@ -174,9 +162,12 @@ public class LookupBenchmarkTest extends LuceneTestCase { Constructor ctor = cls.getConstructor(Directory.class, Analyzer.class); lookup = ctor.newInstance(FSDirectory.open(createTempDir("LookupBenchmarkTest")), a); } else if (cls == AnalyzingSuggester.class) { - lookup = new AnalyzingSuggester(FSDirectory.open(createTempDir("LookupBenchmarkTest")), "test", a); + lookup = + new AnalyzingSuggester( + FSDirectory.open(createTempDir("LookupBenchmarkTest")), "test", a); } else if (cls == FuzzySuggester.class) { - lookup = new FuzzySuggester(FSDirectory.open(createTempDir("LookupBenchmarkTest")), "test", a); + lookup = + new FuzzySuggester(FSDirectory.open(createTempDir("LookupBenchmarkTest")), "test", a); } else { Constructor ctor = cls.getConstructor(Analyzer.class); lookup = ctor.newInstance(a); @@ -187,41 +178,39 @@ public class LookupBenchmarkTest extends LuceneTestCase { return lookup; } - /** - * Test performance of lookup on full hits. - */ + /** Test performance of lookup on full hits. */ public void testPerformanceOnFullHits() throws Exception { final int minPrefixLen = 100; final int maxPrefixLen = 200; runPerformanceTest(minPrefixLen, maxPrefixLen, num, onlyMorePopular); } - /** - * Test performance of lookup on longer term prefixes (6-9 letters or shorter). - */ + /** Test performance of lookup on longer term prefixes (6-9 letters or shorter). */ public void testPerformanceOnPrefixes6_9() throws Exception { final int minPrefixLen = 6; final int maxPrefixLen = 9; runPerformanceTest(minPrefixLen, maxPrefixLen, num, onlyMorePopular); } - /** - * Test performance of lookup on short term prefixes (2-4 letters or shorter). - */ + /** Test performance of lookup on short term prefixes (2-4 letters or shorter). */ public void testPerformanceOnPrefixes2_4() throws Exception { final int minPrefixLen = 2; final int maxPrefixLen = 4; runPerformanceTest(minPrefixLen, maxPrefixLen, num, onlyMorePopular); } - /** - * Run the actual benchmark. - */ - public void runPerformanceTest(final int minPrefixLen, final int maxPrefixLen, - final int num, final boolean onlyMorePopular) throws Exception { - System.err.println(String.format(Locale.ROOT, - "-- prefixes: %d-%d, num: %d, onlyMorePopular: %s", - minPrefixLen, maxPrefixLen, num, onlyMorePopular)); + /** Run the actual benchmark. */ + public void runPerformanceTest( + final int minPrefixLen, final int maxPrefixLen, final int num, final boolean onlyMorePopular) + throws Exception { + System.err.println( + String.format( + Locale.ROOT, + "-- prefixes: %d-%d, num: %d, onlyMorePopular: %s", + minPrefixLen, + maxPrefixLen, + num, + onlyMorePopular)); for (Class cls : benchmarkClasses) { final Lookup lookup = buildLookup(cls, dictionaryInput); @@ -229,24 +218,31 @@ public class LookupBenchmarkTest extends LuceneTestCase { final List input = new ArrayList<>(benchmarkInput.size()); for (Input tf : benchmarkInput) { String s = tf.term.utf8ToString(); - String sub = s.substring(0, Math.min(s.length(), - minPrefixLen + random.nextInt(maxPrefixLen - minPrefixLen + 1))); + String sub = + s.substring( + 0, + Math.min( + s.length(), minPrefixLen + random.nextInt(maxPrefixLen - minPrefixLen + 1))); input.add(sub); } - BenchmarkResult result = measure(new Callable() { - @Override - public Integer call() throws Exception { - int v = 0; - for (String term : input) { - v += lookup.lookup(term, onlyMorePopular, num).size(); - } - return v; - } - }); + BenchmarkResult result = + measure( + new Callable() { + @Override + public Integer call() throws Exception { + int v = 0; + for (String term : input) { + v += lookup.lookup(term, onlyMorePopular, num).size(); + } + return v; + } + }); System.err.println( - String.format(Locale.ROOT, "%-15s queries: %d, time[ms]: %s, ~kQPS: %.0f", + String.format( + Locale.ROOT, + "%-15s queries: %d, time[ms]: %s, ~kQPS: %.0f", lookup.getClass().getSimpleName(), input.size(), result.average.toString(), @@ -254,24 +250,21 @@ public class LookupBenchmarkTest extends LuceneTestCase { } } - /** - * Do the measurements. - */ + /** Do the measurements. */ private BenchmarkResult measure(Callable callable) { final double NANOS_PER_MS = 1000000; try { List times = new ArrayList<>(); for (int i = 0; i < warmup + rounds; i++) { - final long start = System.nanoTime(); - guard = callable.call().intValue(); - times.add((System.nanoTime() - start) / NANOS_PER_MS); + final long start = System.nanoTime(); + guard = callable.call().intValue(); + times.add((System.nanoTime() - start) / NANOS_PER_MS); } return new BenchmarkResult(times, warmup, rounds); } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); - } } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java index 9f4c6877e64..731e69d1e86 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java @@ -20,7 +20,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.List; import java.util.Random; - import org.apache.lucene.search.suggest.Lookup.LookupResult; import org.apache.lucene.search.suggest.fst.FSTCompletionLookup; import org.apache.lucene.search.suggest.jaspell.JaspellLookup; @@ -30,22 +29,24 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; public class PersistenceTest extends LuceneTestCase { - public final String[] keys = new String[] { - "one", - "two", - "three", - "four", - "oneness", - "onerous", - "onesimus", - "twofold", - "twonk", - "thrive", - "through", - "threat", - "foundation", - "fourier", - "fourty"}; + public final String[] keys = + new String[] { + "one", + "two", + "three", + "four", + "oneness", + "onerous", + "onesimus", + "twofold", + "twonk", + "thrive", + "through", + "threat", + "foundation", + "fourier", + "fourty" + }; public void testTSTPersistence() throws Exception { runTest(TSTLookup.class, true); @@ -60,11 +61,12 @@ public class PersistenceTest extends LuceneTestCase { runTest(FSTCompletionLookup.class, false); } - private Directory getDirectory() { + private Directory getDirectory() { return newDirectory(); } - private void runTest(Class lookupClass, boolean supportsExactWeights) throws Exception { + private void runTest(Class lookupClass, boolean supportsExactWeights) + throws Exception { // Add all input keys. Lookup lookup; @@ -77,8 +79,9 @@ public class PersistenceTest extends LuceneTestCase { lookup = lookupClass.getConstructor().newInstance(); } Input[] keys = new Input[this.keys.length]; - for (int i = 0; i < keys.length; i++) + for (int i = 0; i < keys.length; i++) { keys[i] = new Input(this.keys[i], i); + } lookup.build(new InputArrayIterator(keys)); // Store the suggester. @@ -93,12 +96,13 @@ public class PersistenceTest extends LuceneTestCase { Random random = random(); long previous = Long.MIN_VALUE; for (Input k : keys) { - List list = lookup.lookup(TestUtil.bytesToCharSequence(k.term, random), false, 1); + List list = + lookup.lookup(TestUtil.bytesToCharSequence(k.term, random), false, 1); assertEquals(1, list.size()); LookupResult lookupResult = list.get(0); assertNotNull(k.term.utf8ToString(), lookupResult.key); - if (supportsExactWeights) { + if (supportsExactWeights) { assertEquals(k.term.utf8ToString(), k.v, lookupResult.value); } else { assertTrue(lookupResult.value + ">=" + previous, lookupResult.value >= previous); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestInputIterator.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestInputIterator.java index ecb77bdb42d..af989a9b607 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestInputIterator.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestInputIterator.java @@ -23,7 +23,6 @@ import java.util.Map; import java.util.Random; import java.util.Set; import java.util.TreeMap; - import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; @@ -40,15 +39,16 @@ public class TestInputIterator extends LuceneTestCase { assertNull(wrapper.next()); } } - + public void testTerms() throws Exception { Random random = random(); int num = atLeast(10000); - + TreeMap> sorted = new TreeMap<>(); TreeMap sortedWithoutPayload = new TreeMap<>(); TreeMap>> sortedWithContext = new TreeMap<>(); - TreeMap>>> sortedWithPayloadAndContext = new TreeMap<>(); + TreeMap>>> + sortedWithPayloadAndContext = new TreeMap<>(); Input[] unsorted = new Input[num]; Input[] unsortedWithoutPayload = new Input[num]; Input[] unsortedWithContexts = new Input[num]; @@ -61,7 +61,7 @@ public class TestInputIterator extends LuceneTestCase { do { key = new BytesRef(TestUtil.randomUnicodeString(random)); payload = new BytesRef(TestUtil.randomUnicodeString(random)); - for(int j = 0; j < atLeast(2); j++) { + for (int j = 0; j < atLeast(2); j++) { ctxs.add(new BytesRef(TestUtil.randomUnicodeString(random))); } } while (sorted.containsKey(key)); @@ -69,31 +69,36 @@ public class TestInputIterator extends LuceneTestCase { sortedWithoutPayload.put(key, value); sorted.put(key, new SimpleEntry<>(value, payload)); sortedWithContext.put(key, new SimpleEntry<>(value, ctxs)); - sortedWithPayloadAndContext.put(key, new SimpleEntry<>(value, new SimpleEntry<>(payload, ctxs))); + sortedWithPayloadAndContext.put( + key, new SimpleEntry<>(value, new SimpleEntry<>(payload, ctxs))); unsorted[i] = new Input(key, value, payload); unsortedWithoutPayload[i] = new Input(key, value); unsortedWithContexts[i] = new Input(key, value, ctxs); unsortedWithPayloadAndContext[i] = new Input(key, value, payload, ctxs); } - + // test the sorted iterator wrapper with payloads try (Directory tempDir = getDirectory()) { - InputIterator wrapper = new SortedInputIterator(tempDir, "sorted", new InputArrayIterator(unsorted)); - Iterator>> expected = sorted.entrySet().iterator(); + InputIterator wrapper = + new SortedInputIterator(tempDir, "sorted", new InputArrayIterator(unsorted)); + Iterator>> expected = + sorted.entrySet().iterator(); while (expected.hasNext()) { - Map.Entry> entry = expected.next(); - + Map.Entry> entry = expected.next(); + assertEquals(entry.getKey(), wrapper.next()); assertEquals(entry.getValue().getKey().longValue(), wrapper.weight()); assertEquals(entry.getValue().getValue(), wrapper.payload()); } assertNull(wrapper.next()); } - + // test the sorted iterator wrapper with contexts try (Directory tempDir = getDirectory()) { - InputIterator wrapper = new SortedInputIterator(tempDir, "sorted", new InputArrayIterator(unsortedWithContexts)); - Iterator>>> actualEntries = sortedWithContext.entrySet().iterator(); + InputIterator wrapper = + new SortedInputIterator(tempDir, "sorted", new InputArrayIterator(unsortedWithContexts)); + Iterator>>> actualEntries = + sortedWithContext.entrySet().iterator(); while (actualEntries.hasNext()) { Map.Entry>> entry = actualEntries.next(); assertEquals(entry.getKey(), wrapper.next()); @@ -106,10 +111,14 @@ public class TestInputIterator extends LuceneTestCase { // test the sorted iterator wrapper with contexts and payload try (Directory tempDir = getDirectory()) { - InputIterator wrapper = new SortedInputIterator(tempDir, "sorter", new InputArrayIterator(unsortedWithPayloadAndContext)); - Iterator>>>> expectedPayloadContextEntries = sortedWithPayloadAndContext.entrySet().iterator(); + InputIterator wrapper = + new SortedInputIterator( + tempDir, "sorter", new InputArrayIterator(unsortedWithPayloadAndContext)); + Iterator>>>> + expectedPayloadContextEntries = sortedWithPayloadAndContext.entrySet().iterator(); while (expectedPayloadContextEntries.hasNext()) { - Map.Entry>>> entry = expectedPayloadContextEntries.next(); + Map.Entry>>> entry = + expectedPayloadContextEntries.next(); assertEquals(entry.getKey(), wrapper.next()); assertEquals(entry.getValue().getKey().longValue(), wrapper.weight()); Set actualCtxs = entry.getValue().getValue().getValue(); @@ -133,20 +142,24 @@ public class TestInputIterator extends LuceneTestCase { // test the sorted iterator wrapper without payloads try (Directory tempDir = getDirectory()) { - InputIterator wrapperWithoutPayload = new SortedInputIterator(tempDir, "sorted", new InputArrayIterator(unsortedWithoutPayload)); - Iterator> expectedWithoutPayload = sortedWithoutPayload.entrySet().iterator(); + InputIterator wrapperWithoutPayload = + new SortedInputIterator( + tempDir, "sorted", new InputArrayIterator(unsortedWithoutPayload)); + Iterator> expectedWithoutPayload = + sortedWithoutPayload.entrySet().iterator(); while (expectedWithoutPayload.hasNext()) { Map.Entry entry = expectedWithoutPayload.next(); - + assertEquals(entry.getKey(), wrapperWithoutPayload.next()); assertEquals(entry.getValue().longValue(), wrapperWithoutPayload.weight()); assertNull(wrapperWithoutPayload.payload()); } assertNull(wrapperWithoutPayload.next()); } - + // test the unsorted iterator wrapper without payloads - InputIterator wrapperWithoutPayload = new UnsortedInputIterator(new InputArrayIterator(unsortedWithoutPayload)); + InputIterator wrapperWithoutPayload = + new UnsortedInputIterator(new InputArrayIterator(unsortedWithoutPayload)); TreeMap actualWithoutPayload = new TreeMap<>(); while ((key = wrapperWithoutPayload.next()) != null) { long value = wrapperWithoutPayload.weight(); @@ -155,18 +168,20 @@ public class TestInputIterator extends LuceneTestCase { } assertEquals(sortedWithoutPayload, actualWithoutPayload); } - + public static long asLong(BytesRef b) { - return (((long) asIntInternal(b, b.offset) << 32) | asIntInternal(b, - b.offset + 4) & 0xFFFFFFFFL); + return (((long) asIntInternal(b, b.offset) << 32) + | asIntInternal(b, b.offset + 4) & 0xFFFFFFFFL); } private static int asIntInternal(BytesRef b, int pos) { - return ((b.bytes[pos++] & 0xFF) << 24) | ((b.bytes[pos++] & 0xFF) << 16) - | ((b.bytes[pos++] & 0xFF) << 8) | (b.bytes[pos] & 0xFF); + return ((b.bytes[pos++] & 0xFF) << 24) + | ((b.bytes[pos++] & 0xFF) << 16) + | ((b.bytes[pos++] & 0xFF) << 8) + | (b.bytes[pos] & 0xFF); } - private Directory getDirectory() { + private Directory getDirectory() { return newDirectory(); } } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java index bf1085d3bf8..869e8820f0a 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java @@ -26,7 +26,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.MockAnalyzer; @@ -53,16 +52,18 @@ import org.junit.Test; public class AnalyzingInfixSuggesterTest extends LuceneTestCase { public void testBasic() throws Exception { - Input keys[] = new Input[] { - new Input("lend me your ear", 8, new BytesRef("foobar")), - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), - }; + Input keys[] = + new Input[] { + new Input("lend me your ear", 8, new BytesRef("foobar")), + new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), + }; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false); suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(2, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); @@ -94,67 +95,80 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); assertEquals(10, results.get(0).value); assertEquals(new BytesRef("foobaz"), results.get(0).payload); - - results = suggester.lookup(TestUtil.stringToCharSequence("money penny", random()), 10, false, true); + + results = + suggester.lookup(TestUtil.stringToCharSequence("money penny", random()), 10, false, true); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); assertEquals(10, results.get(0).value); assertEquals(new BytesRef("foobaz"), results.get(0).payload); - - results = suggester.lookup(TestUtil.stringToCharSequence("penny ea", random()), 10, false, true); + + results = + suggester.lookup(TestUtil.stringToCharSequence("penny ea", random()), 10, false, true); assertEquals(2, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); - assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); + assertEquals( + "a penny saved is a penny earned", results.get(0).highlightKey); assertEquals("lend me your ear", results.get(1).key); assertEquals("lend me your ear", results.get(1).highlightKey); - - results = suggester.lookup(TestUtil.stringToCharSequence("money penny", random()), 10, false, false); + + results = + suggester.lookup(TestUtil.stringToCharSequence("money penny", random()), 10, false, false); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertNull(results.get(0).highlightKey); - + testConstructorDefaults(suggester, keys, a, true, true); testConstructorDefaults(suggester, keys, a, true, false); testConstructorDefaults(suggester, keys, a, false, false); testConstructorDefaults(suggester, keys, a, false, true); - + suggester.close(); a.close(); } - private void testConstructorDefaults(AnalyzingInfixSuggester suggester, Input[] keys, Analyzer a, - boolean allTermsRequired, boolean highlight) throws IOException { - AnalyzingInfixSuggester suggester2 = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false, allTermsRequired, highlight); + private void testConstructorDefaults( + AnalyzingInfixSuggester suggester, + Input[] keys, + Analyzer a, + boolean allTermsRequired, + boolean highlight) + throws IOException { + AnalyzingInfixSuggester suggester2 = + new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false, allTermsRequired, highlight); suggester2.build(new InputArrayIterator(keys)); - + CharSequence key = TestUtil.stringToCharSequence("penny ea", random()); - + List results1 = suggester.lookup(key, 10, allTermsRequired, highlight); List results2 = suggester2.lookup(key, false, 10); assertEquals(results1.size(), results2.size()); assertEquals(results1.get(0).key, results2.get(0).key); assertEquals(results1.get(0).highlightKey, results2.get(0).highlightKey); - + suggester2.close(); } public void testAfterLoad() throws Exception { - Input keys[] = new Input[] { - new Input("lend me your ear", 8, new BytesRef("foobar")), - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), - }; + Input keys[] = + new Input[] { + new Input("lend me your ear", 8, new BytesRef("foobar")), + new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), + }; Path tempDir = createTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); - AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); + AnalyzingInfixSuggester suggester = + new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); suggester.build(new InputArrayIterator(keys)); assertEquals(2, suggester.getCount()); suggester.close(); suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(2, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); @@ -165,14 +179,12 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { a.close(); } - /** Used to return highlighted result; see {@link - * LookupResult#highlightKey} */ + /** Used to return highlighted result; see {@link LookupResult#highlightKey} */ private static final class LookupHighlightFragment { /** Portion of text for this fragment. */ public final String text; - /** True if this text matched a part of the user's - * query. */ + /** True if this text matched a part of the user's query. */ public final boolean isHit; /** Sole constructor. */ @@ -189,58 +201,72 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { @SuppressWarnings("unchecked") public void testHighlightAsObject() throws Exception { - Input keys[] = new Input[] { - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), - }; + Input keys[] = + new Input[] { + new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), + }; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); - AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false) { - @Override - protected Object highlight(String text, Set matchedTokens, String prefixToken) throws IOException { - try (TokenStream ts = queryAnalyzer.tokenStream("text", new StringReader(text))) { - CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); - OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); - ts.reset(); - List fragments = new ArrayList<>(); - int upto = 0; - while (ts.incrementToken()) { - String token = termAtt.toString(); - int startOffset = offsetAtt.startOffset(); - int endOffset = offsetAtt.endOffset(); - if (upto < startOffset) { - fragments.add(new LookupHighlightFragment(text.substring(upto, startOffset), false)); - upto = startOffset; - } else if (upto > startOffset) { - continue; - } - - if (matchedTokens.contains(token)) { - // Token matches. - fragments.add(new LookupHighlightFragment(text.substring(startOffset, endOffset), true)); - upto = endOffset; - } else if (prefixToken != null && token.startsWith(prefixToken)) { - fragments.add(new LookupHighlightFragment(text.substring(startOffset, startOffset+prefixToken.length()), true)); - if (prefixToken.length() < token.length()) { - fragments.add(new LookupHighlightFragment(text.substring(startOffset+prefixToken.length(), startOffset+token.length()), false)); + AnalyzingInfixSuggester suggester = + new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false) { + @Override + protected Object highlight(String text, Set matchedTokens, String prefixToken) + throws IOException { + try (TokenStream ts = queryAnalyzer.tokenStream("text", new StringReader(text))) { + CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); + OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); + ts.reset(); + List fragments = new ArrayList<>(); + int upto = 0; + while (ts.incrementToken()) { + String token = termAtt.toString(); + int startOffset = offsetAtt.startOffset(); + int endOffset = offsetAtt.endOffset(); + if (upto < startOffset) { + fragments.add( + new LookupHighlightFragment(text.substring(upto, startOffset), false)); + upto = startOffset; + } else if (upto > startOffset) { + continue; + } + + if (matchedTokens.contains(token)) { + // Token matches. + fragments.add( + new LookupHighlightFragment(text.substring(startOffset, endOffset), true)); + upto = endOffset; + } else if (prefixToken != null && token.startsWith(prefixToken)) { + fragments.add( + new LookupHighlightFragment( + text.substring(startOffset, startOffset + prefixToken.length()), true)); + if (prefixToken.length() < token.length()) { + fragments.add( + new LookupHighlightFragment( + text.substring( + startOffset + prefixToken.length(), startOffset + token.length()), + false)); + } + upto = endOffset; } - upto = endOffset; } + ts.end(); + int endOffset = offsetAtt.endOffset(); + if (upto < endOffset) { + fragments.add(new LookupHighlightFragment(text.substring(upto), false)); + } + + return fragments; } - ts.end(); - int endOffset = offsetAtt.endOffset(); - if (upto < endOffset) { - fragments.add(new LookupHighlightFragment(text.substring(upto), false)); - } - - return fragments; } - } - }; + }; suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(1, results.size()); - assertEquals("a penny saved is a penny earned", toString((List) results.get(0).highlightKey)); + assertEquals( + "a penny saved is a penny earned", + toString((List) results.get(0).highlightKey)); assertEquals(10, results.get(0).value); assertEquals(new BytesRef("foobaz"), results.get(0).payload); suggester.close(); @@ -249,7 +275,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { public String toString(List fragments) { StringBuilder sb = new StringBuilder(); - for(LookupHighlightFragment fragment : fragments) { + for (LookupHighlightFragment fragment : fragments) { if (fragment.isHit) { sb.append(""); } @@ -263,22 +289,25 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } public void testRandomMinPrefixLength() throws Exception { - Input keys[] = new Input[] { - new Input("lend me your ear", 8, new BytesRef("foobar")), - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), - }; + Input keys[] = + new Input[] { + new Input("lend me your ear", 8, new BytesRef("foobar")), + new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), + }; Path tempDir = createTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); int minPrefixLength = random().nextInt(10); - AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixLength, false); + AnalyzingInfixSuggester suggester = + new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixLength, false); suggester.build(new InputArrayIterator(keys)); - for(int i=0;i<2;i++) { - for(int j=0;j<2;j++) { + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { boolean doHighlight = j == 0; - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, doHighlight); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, doHighlight); assertEquals(2, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); if (doHighlight) { @@ -293,7 +322,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals(8, results.get(1).value); assertEquals(new BytesRef("foobar"), results.get(1).payload); - results = suggester.lookup(TestUtil.stringToCharSequence("ear ", random()), 10, true, doHighlight); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear ", random()), 10, true, doHighlight); assertEquals(1, results.size()); assertEquals("lend me your ear", results.get(0).key); if (doHighlight) { @@ -302,20 +333,24 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals(8, results.get(0).value); assertEquals(new BytesRef("foobar"), results.get(0).payload); - results = suggester.lookup(TestUtil.stringToCharSequence("pen", random()), 10, true, doHighlight); + results = + suggester.lookup(TestUtil.stringToCharSequence("pen", random()), 10, true, doHighlight); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); if (doHighlight) { - assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); + assertEquals( + "a penny saved is a penny earned", results.get(0).highlightKey); } assertEquals(10, results.get(0).value); assertEquals(new BytesRef("foobaz"), results.get(0).payload); - results = suggester.lookup(TestUtil.stringToCharSequence("p", random()), 10, true, doHighlight); + results = + suggester.lookup(TestUtil.stringToCharSequence("p", random()), 10, true, doHighlight); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); if (doHighlight) { - assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); + assertEquals( + "a penny saved is a penny earned", results.get(0).highlightKey); } assertEquals(10, results.get(0).value); assertEquals(new BytesRef("foobaz"), results.get(0).payload); @@ -323,21 +358,24 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { // Make sure things still work after close and reopen: suggester.close(); - suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixLength, false); + suggester = + new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixLength, false); } suggester.close(); a.close(); } public void testHighlight() throws Exception { - Input keys[] = new Input[] { - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), - }; + Input keys[] = + new Input[] { + new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), + }; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false); suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); @@ -346,14 +384,16 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } public void testHighlightCaseChange() throws Exception { - Input keys[] = new Input[] { - new Input("a Penny saved is a penny earned", 10, new BytesRef("foobaz")), - }; + Input keys[] = + new Input[] { + new Input("a Penny saved is a penny earned", 10, new BytesRef("foobaz")), + }; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false); suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a Penny saved is a penny earned", results.get(0).key); assertEquals("a Penny saved is a penny earned", results.get(0).highlightKey); @@ -361,14 +401,16 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { // Try again, but overriding addPrefixMatch to highlight // the entire hit: - suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false) { - @Override - protected void addPrefixMatch(StringBuilder sb, String surface, String analyzed, String prefixToken) { - sb.append(""); - sb.append(surface); - sb.append(""); - } - }; + suggester = + new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false) { + @Override + protected void addPrefixMatch( + StringBuilder sb, String surface, String analyzed, String prefixToken) { + sb.append(""); + sb.append(surface); + sb.append(""); + } + }; suggester.build(new InputArrayIterator(keys)); results = suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true); assertEquals(1, results.size()); @@ -379,9 +421,10 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } public void testDoubleClose() throws Exception { - Input keys[] = new Input[] { - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), - }; + Input keys[] = + new Input[] { + new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), + }; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false); @@ -393,32 +436,35 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { public void testSuggestStopFilter() throws Exception { final CharArraySet stopWords = StopFilter.makeStopSet("a"); - Analyzer indexAnalyzer = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - MockTokenizer tokens = new MockTokenizer(); - return new TokenStreamComponents(tokens, - new StopFilter(tokens, stopWords)); - } - }; + Analyzer indexAnalyzer = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + MockTokenizer tokens = new MockTokenizer(); + return new TokenStreamComponents(tokens, new StopFilter(tokens, stopWords)); + } + }; - Analyzer queryAnalyzer = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - MockTokenizer tokens = new MockTokenizer(); - return new TokenStreamComponents(tokens, - new SuggestStopFilter(tokens, stopWords)); - } - }; + Analyzer queryAnalyzer = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + MockTokenizer tokens = new MockTokenizer(); + return new TokenStreamComponents(tokens, new SuggestStopFilter(tokens, stopWords)); + } + }; - AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), indexAnalyzer, queryAnalyzer, 3, false); + AnalyzingInfixSuggester suggester = + new AnalyzingInfixSuggester(newDirectory(), indexAnalyzer, queryAnalyzer, 3, false); - Input keys[] = new Input[] { - new Input("a bob for apples", 10, new BytesRef("foobaz")), - }; + Input keys[] = + new Input[] { + new Input("a bob for apples", 10, new BytesRef("foobaz")), + }; suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(TestUtil.stringToCharSequence("a", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("a", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a bob for apples", results.get(0).key); assertEquals("a bob for apples", results.get(0).highlightKey); @@ -430,10 +476,12 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false); suggester.build(new InputArrayIterator(new Input[0])); - suggester.add(new BytesRef("a penny saved is a penny earned"), null, 10, new BytesRef("foobaz")); + suggester.add( + new BytesRef("a penny saved is a penny earned"), null, 10, new BytesRef("foobaz")); suggester.add(new BytesRef("lend me your ear"), null, 8, new BytesRef("foobar")); suggester.refresh(); - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(2, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals("a penny saved is a penny earned", results.get(0).highlightKey); @@ -477,7 +525,8 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { suggester.add(new BytesRef("the pen is pretty"), null, 10, new BytesRef("foobaz")); suggester.refresh(); - List results = suggester.lookup(TestUtil.stringToCharSequence("pen p", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("pen p", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("the pen is pretty", results.get(0).key); assertEquals("the pen is pretty", results.get(0).highlightKey); @@ -489,9 +538,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { private static String randomText() { int numWords = TestUtil.nextInt(random(), 1, 4); - + StringBuilder b = new StringBuilder(); - for(int i=0;i 0) { b.append(' '); } @@ -530,8 +579,8 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { // simultaneous lookups while adding/updating to // see if there are any thread hazards: try { - suggester.lookup(TestUtil.stringToCharSequence(query, random()), - topN, allTermsRequired, doHilite); + suggester.lookup( + TestUtil.stringToCharSequence(query, random()), topN, allTermsRequired, doHilite); } catch (IOException ioe) { throw new RuntimeException(ioe); } @@ -548,7 +597,8 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { System.out.println(" minPrefixChars=" + minPrefixChars); } - AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixChars, false); + AnalyzingInfixSuggester suggester = + new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixChars, false); // Initial suggester built with nothing: suggester.build(new InputArrayIterator(new Input[0])); @@ -565,7 +615,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { List inputs = new ArrayList<>(); List pendingUpdates = new ArrayList<>(); - for(int iter=0;iter { if (a1.v > b.v) { return -1; @@ -716,24 +776,26 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { int topN = TestUtil.nextInt(random(), 1, expected.size()); - List actual = suggester.lookup(TestUtil.stringToCharSequence(query, random()), topN, allTermsRequired, doHilite); + List actual = + suggester.lookup( + TestUtil.stringToCharSequence(query, random()), topN, allTermsRequired, doHilite); int expectedCount = Math.min(topN, expected.size()); if (VERBOSE) { System.out.println(" expected:"); - for(int i=0;i"); b.append(inputTerm); @@ -797,15 +860,17 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } public void testBasicNRT() throws Exception { - Input keys[] = new Input[] { - new Input("lend me your ear", 8, new BytesRef("foobar")), - }; + Input keys[] = + new Input[] { + new Input("lend me your ear", 8, new BytesRef("foobar")), + }; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false); suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("lend me your ear", results.get(0).key); assertEquals("lend me your ear", results.get(0).highlightKey); @@ -813,7 +878,8 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals(new BytesRef("foobar"), results.get(0).payload); // Add a new suggestion: - suggester.add(new BytesRef("a penny saved is a penny earned"), null, 10, new BytesRef("foobaz")); + suggester.add( + new BytesRef("a penny saved is a penny earned"), null, 10, new BytesRef("foobaz")); // Must refresh to see any newly added suggestions: suggester.refresh(); @@ -872,37 +938,45 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } public void testNRTWithParallelAdds() throws IOException, InterruptedException { - String[] keys = new String[] {"python", "java", "c", "scala", "ruby", "clojure", "erlang", "go", "swift", "lisp"}; + String[] keys = + new String[] { + "python", "java", "c", "scala", "ruby", "clojure", "erlang", "go", "swift", "lisp" + }; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); Path tempDir = createTempDir("AIS_NRT_PERSIST_TEST"); - AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); + AnalyzingInfixSuggester suggester = + new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); Thread[] multiAddThreads = new Thread[10]; // Cannot call refresh on an suggester when no docs are added to the index - expectThrows(IllegalStateException.class, () -> { - suggester.refresh(); - }); + expectThrows( + IllegalStateException.class, + () -> { + suggester.refresh(); + }); - for(int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { multiAddThreads[i] = new Thread(new IndexDocument(suggester, keys[i])); } - for(int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { multiAddThreads[i].start(); } - //Make sure all threads have completed indexing - for(int i=0; i<10; i++) { + // Make sure all threads have completed indexing + for (int i = 0; i < 10; i++) { multiAddThreads[i].join(); } suggester.refresh(); - List results = suggester.lookup(TestUtil.stringToCharSequence("python", random()), 10, true, false); + List results = + suggester.lookup(TestUtil.stringToCharSequence("python", random()), 10, true, false); assertEquals(1, results.size()); assertEquals("python", results.get(0).key); - //Test if the index is getting persisted correctly and can be reopened. + // Test if the index is getting persisted correctly and can be reopened. suggester.commit(); suggester.close(); - AnalyzingInfixSuggester suggester2 = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); + AnalyzingInfixSuggester suggester2 = + new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); results = suggester2.lookup(TestUtil.stringToCharSequence("python", random()), 10, true, false); assertEquals(1, results.size()); assertEquals("python", results.get(0).key); @@ -932,7 +1006,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { private Set asSet(String... values) { HashSet result = new HashSet<>(); - for(String value : values) { + for (String value : values) { result.add(new BytesRef(value)); } @@ -941,7 +1015,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { private Set asSet(byte[]... values) { HashSet result = new HashSet<>(); - for(byte[] value : values) { + for (byte[] value : values) { result.add(new BytesRef(value)); } @@ -950,14 +1024,16 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { // LUCENE-5528 and LUCENE-6464 public void testBasicContext() throws Exception { - Input keys[] = new Input[] { - new Input("lend me your ear", 8, new BytesRef("foobar"), asSet("foo", "bar")), - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz"), asSet("foo", "baz")) - }; + Input keys[] = + new Input[] { + new Input("lend me your ear", 8, new BytesRef("foobar"), asSet("foo", "bar")), + new Input( + "a penny saved is a penny earned", 10, new BytesRef("foobaz"), asSet("foo", "baz")) + }; Path tempDir = createTempDir("analyzingInfixContext"); - for(int iter=0;iter<2;iter++) { + for (int iter = 0; iter < 2; iter++) { AnalyzingInfixSuggester suggester; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); if (iter == 0) { @@ -969,7 +1045,8 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } // No context provided, all results returned - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(2, results.size()); LookupResult result = results.get(0); assertEquals("a penny saved is a penny earned", result.key); @@ -992,7 +1069,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertTrue(result.contexts.contains(new BytesRef("bar"))); // Both have "foo" context: - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), asSet("foo"), 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), asSet("foo"), 10, true, true); assertEquals(2, results.size()); result = results.get(0); @@ -1016,7 +1095,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertTrue(result.contexts.contains(new BytesRef("bar"))); // Only one has "bar" context: - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), asSet("bar"), 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), asSet("bar"), 10, true, true); assertEquals(1, results.size()); result = results.get(0); @@ -1032,13 +1113,17 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { // None do not have "foo" context: Map contextInfo = new HashMap<>(); contextInfo.put(new BytesRef("foo"), BooleanClause.Occur.MUST_NOT); - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); assertEquals(0, results.size()); // Only one does not have "bar" context: contextInfo.clear(); contextInfo.put(new BytesRef("bar"), BooleanClause.Occur.MUST_NOT); - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); assertEquals(1, results.size()); result = results.get(0); @@ -1052,7 +1137,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertTrue(result.contexts.contains(new BytesRef("baz"))); // Both have "foo" or "bar" context: - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), asSet("foo", "bar"), 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), asSet("foo", "bar"), 10, true, true); assertEquals(2, results.size()); result = results.get(0); @@ -1076,7 +1163,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertTrue(result.contexts.contains(new BytesRef("bar"))); // Both have "bar" or "baz" context: - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), asSet("bar", "baz"), 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), asSet("bar", "baz"), 10, true, true); assertEquals(2, results.size()); result = results.get(0); @@ -1103,7 +1192,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { contextInfo.clear(); contextInfo.put(new BytesRef("foo"), BooleanClause.Occur.MUST); contextInfo.put(new BytesRef("bar"), BooleanClause.Occur.MUST); - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); assertEquals(1, results.size()); result = results.get(0); @@ -1120,21 +1211,27 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { contextInfo.clear(); contextInfo.put(new BytesRef("bar"), BooleanClause.Occur.MUST); contextInfo.put(new BytesRef("baz"), BooleanClause.Occur.MUST); - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); assertEquals(0, results.size()); // None do not have "foo" and do not have "bar" context: contextInfo.clear(); contextInfo.put(new BytesRef("foo"), BooleanClause.Occur.MUST_NOT); contextInfo.put(new BytesRef("bar"), BooleanClause.Occur.MUST_NOT); - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); assertEquals(0, results.size()); // Both do not have "bar" and do not have "baz" context: contextInfo.clear(); contextInfo.put(new BytesRef("bar"), BooleanClause.Occur.MUST_NOT); contextInfo.put(new BytesRef("baz"), BooleanClause.Occur.MUST_NOT); - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), asSet("bar", "baz"), 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), asSet("bar", "baz"), 10, true, true); assertEquals(2, results.size()); result = results.get(0); @@ -1161,7 +1258,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { contextInfo.clear(); contextInfo.put(new BytesRef("foo"), BooleanClause.Occur.MUST); contextInfo.put(new BytesRef("bar"), BooleanClause.Occur.MUST_NOT); - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), contextInfo, 10, true, true); assertEquals(1, results.size()); result = results.get(0); @@ -1173,15 +1272,18 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals(2, result.contexts.size()); assertTrue(result.contexts.contains(new BytesRef("foo"))); assertTrue(result.contexts.contains(new BytesRef("baz"))); - - //LUCENE-6464 Using the advanced context filtering by query. - //Note that this is just a sanity test as all the above tests run through the filter by query method + + // LUCENE-6464 Using the advanced context filtering by query. + // Note that this is just a sanity test as all the above tests run through the filter by query + // method BooleanQuery.Builder query = new BooleanQuery.Builder(); suggester.addContextToQuery(query, new BytesRef("foo"), BooleanClause.Occur.MUST); suggester.addContextToQuery(query, new BytesRef("bar"), BooleanClause.Occur.MUST_NOT); - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), query.build(), 10, true, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), query.build(), 10, true, true); assertEquals(1, results.size()); - + suggester.close(); a.close(); } @@ -1204,7 +1306,8 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { a.close(); } - private String pfmToString(AnalyzingInfixSuggester suggester, String surface, String prefix) throws IOException { + private String pfmToString(AnalyzingInfixSuggester suggester, String surface, String prefix) + throws IOException { StringBuilder sb = new StringBuilder(); suggester.addPrefixMatch(sb, surface, "", prefix); return sb.toString(); @@ -1216,14 +1319,19 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { byte[] context3 = new byte[1]; context3[0] = (byte) 0xff; - Input keys[] = new Input[] { - new Input("lend me your ear", 8, new BytesRef("foobar"), asSet(context1, context2)), - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz"), asSet(context1, context3)) - }; + Input keys[] = + new Input[] { + new Input("lend me your ear", 8, new BytesRef("foobar"), asSet(context1, context2)), + new Input( + "a penny saved is a penny earned", + 10, + new BytesRef("foobaz"), + asSet(context1, context3)) + }; Path tempDir = createTempDir("analyzingInfixContext"); - for(int iter=0;iter<2;iter++) { + for (int iter = 0; iter < 2; iter++) { AnalyzingInfixSuggester suggester; Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); if (iter == 0) { @@ -1235,7 +1343,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } // Both have context1: - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), asSet(context1), 10, true, true); + List results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), asSet(context1), 10, true, true); assertEquals(2, results.size()); LookupResult result = results.get(0); @@ -1265,18 +1375,22 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { public void testContextNotAllTermsRequired() throws Exception { - Input keys[] = new Input[] { - new Input("lend me your ear", 8, new BytesRef("foobar"), asSet("foo", "bar")), - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz"), asSet("foo", "baz")) - }; + Input keys[] = + new Input[] { + new Input("lend me your ear", 8, new BytesRef("foobar"), asSet("foo", "bar")), + new Input( + "a penny saved is a penny earned", 10, new BytesRef("foobaz"), asSet("foo", "baz")) + }; Path tempDir = createTempDir("analyzingInfixContext"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); - AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); + AnalyzingInfixSuggester suggester = + new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false); suggester.build(new InputArrayIterator(keys)); // No context provided, all results returned - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, false, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, false, true); assertEquals(2, results.size()); LookupResult result = results.get(0); assertEquals("a penny saved is a penny earned", result.key); @@ -1299,7 +1413,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertTrue(result.contexts.contains(new BytesRef("bar"))); // Both have "foo" context: - results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), asSet("foo"), 10, false, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ear", random()), asSet("foo"), 10, false, true); assertEquals(2, results.size()); result = results.get(0); @@ -1323,7 +1439,9 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertTrue(result.contexts.contains(new BytesRef("bar"))); // Only one has "foo" context and len - results = suggester.lookup(TestUtil.stringToCharSequence("len", random()), asSet("foo"), 10, false, true); + results = + suggester.lookup( + TestUtil.stringToCharSequence("len", random()), asSet("foo"), 10, false, true); assertEquals(1, results.size()); result = results.get(0); @@ -1338,79 +1456,115 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { suggester.close(); } - + public void testCloseIndexWriterOnBuild() throws Exception { class MyAnalyzingInfixSuggester extends AnalyzingInfixSuggester { - public MyAnalyzingInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, - int minPrefixChars, boolean commitOnBuild, boolean allTermsRequired, - boolean highlight, boolean closeIndexWriterOnBuild) throws IOException { - super(dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild, - allTermsRequired, highlight, closeIndexWriterOnBuild); + public MyAnalyzingInfixSuggester( + Directory dir, + Analyzer indexAnalyzer, + Analyzer queryAnalyzer, + int minPrefixChars, + boolean commitOnBuild, + boolean allTermsRequired, + boolean highlight, + boolean closeIndexWriterOnBuild) + throws IOException { + super( + dir, + indexAnalyzer, + queryAnalyzer, + minPrefixChars, + commitOnBuild, + allTermsRequired, + highlight, + closeIndexWriterOnBuild); } + public IndexWriter getIndexWriter() { return writer; - } + } + public SearcherManager getSearcherManager() { return searcherMgr; } } - // After build(), when closeIndexWriterOnBuild = true: - // * The IndexWriter should be null + // After build(), when closeIndexWriterOnBuild = true: + // * The IndexWriter should be null // * The SearcherManager should be non-null - // * SearcherManager's IndexWriter reference should be closed + // * SearcherManager's IndexWriter reference should be closed // (as evidenced by maybeRefreshBlocking() throwing AlreadyClosedException) Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); Path tempDir = createTempDir("analyzingInfixContext"); - final MyAnalyzingInfixSuggester suggester = new MyAnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false, - AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, true); + final MyAnalyzingInfixSuggester suggester = + new MyAnalyzingInfixSuggester( + newFSDirectory(tempDir), + a, + a, + 3, + false, + AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, + AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, + true); suggester.build(new InputArrayIterator(sharedInputs)); assertNull(suggester.getIndexWriter()); assertNotNull(suggester.getSearcherManager()); - expectThrows(AlreadyClosedException.class, () -> suggester.getSearcherManager().maybeRefreshBlocking()); - + expectThrows( + AlreadyClosedException.class, () -> suggester.getSearcherManager().maybeRefreshBlocking()); + suggester.close(); // After instantiating from an already-built suggester dir: // * The IndexWriter should be null // * The SearcherManager should be non-null - final MyAnalyzingInfixSuggester suggester2 = new MyAnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false, - AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, true); + final MyAnalyzingInfixSuggester suggester2 = + new MyAnalyzingInfixSuggester( + newFSDirectory(tempDir), + a, + a, + 3, + false, + AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, + AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, + true); assertNull(suggester2.getIndexWriter()); assertNotNull(suggester2.getSearcherManager()); suggester2.close(); a.close(); } - + public void testCommitAfterBuild() throws Exception { - performOperationWithAllOptionCombinations(suggester -> { - suggester.build(new InputArrayIterator(sharedInputs)); - suggester.commit(); - }); + performOperationWithAllOptionCombinations( + suggester -> { + suggester.build(new InputArrayIterator(sharedInputs)); + suggester.commit(); + }); } public void testRefreshAfterBuild() throws Exception { - performOperationWithAllOptionCombinations(suggester -> { - suggester.build(new InputArrayIterator(sharedInputs)); - suggester.refresh(); - }); + performOperationWithAllOptionCombinations( + suggester -> { + suggester.build(new InputArrayIterator(sharedInputs)); + suggester.refresh(); + }); } - + public void testDisallowCommitBeforeBuild() throws Exception { - performOperationWithAllOptionCombinations - (suggester -> expectThrows(IllegalStateException.class, suggester::commit)); + performOperationWithAllOptionCombinations( + suggester -> expectThrows(IllegalStateException.class, suggester::commit)); } public void testDisallowRefreshBeforeBuild() throws Exception { - performOperationWithAllOptionCombinations - (suggester -> expectThrows(IllegalStateException.class, suggester::refresh)); + performOperationWithAllOptionCombinations( + suggester -> expectThrows(IllegalStateException.class, suggester::refresh)); } - private Input sharedInputs[] = new Input[] { - new Input("lend me your ear", 8, new BytesRef("foobar")), - new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), - }; + private Input sharedInputs[] = + new Input[] { + new Input("lend me your ear", 8, new BytesRef("foobar")), + new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), + }; private interface SuggesterOperation { void operate(AnalyzingInfixSuggester suggester) throws Exception; @@ -1420,7 +1574,8 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { * Perform the given operation on suggesters constructed with all combinations of options * commitOnBuild and closeIndexWriterOnBuild, including defaults. */ - private void performOperationWithAllOptionCombinations(SuggesterOperation operation) throws Exception { + private void performOperationWithAllOptionCombinations(SuggesterOperation operation) + throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a); @@ -1435,23 +1590,55 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { operation.operate(suggester); suggester.close(); - suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, true, - AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, true); + suggester = + new AnalyzingInfixSuggester( + newDirectory(), + a, + a, + 3, + true, + AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, + AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, + true); operation.operate(suggester); suggester.close(); - suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, true, - AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, false); + suggester = + new AnalyzingInfixSuggester( + newDirectory(), + a, + a, + 3, + true, + AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, + AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, + false); operation.operate(suggester); suggester.close(); - suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false, - AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, true); + suggester = + new AnalyzingInfixSuggester( + newDirectory(), + a, + a, + 3, + false, + AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, + AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, + true); operation.operate(suggester); suggester.close(); - suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false, - AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, false); + suggester = + new AnalyzingInfixSuggester( + newDirectory(), + a, + a, + 3, + false, + AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, + AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, + false); operation.operate(suggester); suggester.close(); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java index 9b1240c211f..bbdd76ac53b 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java @@ -31,7 +31,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CannedBinaryTokenStream; import org.apache.lucene.analysis.CannedBinaryTokenStream.BinaryToken; @@ -62,35 +61,36 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { /** this is basically the WFST test ported to KeywordAnalyzer. so it acts the same */ public void testKeyword() throws Exception { - Iterable keys = shuffle( - new Input("foo", 50), - new Input("bar", 10), - new Input("barbar", 10), - new Input("barbar", 12), - new Input("barbara", 6), - new Input("bar", 5), - new Input("barbara", 1) - ); + Iterable keys = + shuffle( + new Input("foo", 50), + new Input("bar", 10), + new Input("barbar", 10), + new Input("barbar", 12), + new Input("barbara", 6), + new Input("bar", 5), + new Input("barbara", 1)); Directory tempDir = getDirectory(); Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", analyzer); suggester.build(new InputArrayIterator(keys)); - + // top N of 2, but only foo is available - List results = suggester.lookup(TestUtil.stringToCharSequence("f", random()), false, 2); + List results = + suggester.lookup(TestUtil.stringToCharSequence("f", random()), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); - + // top N of 1 for 'bar': we return this even though // barbar is higher because exactFirst is enabled: results = suggester.lookup(TestUtil.stringToCharSequence("bar", random()), false, 1); assertEquals(1, results.size()); assertEquals("bar", results.get(0).key.toString()); assertEquals(10, results.get(0).value, 0.01F); - + // top N Of 2 for 'b' results = suggester.lookup(TestUtil.stringToCharSequence("b", random()), false, 2); assertEquals(2, results.size()); @@ -98,7 +98,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(12, results.get(0).value, 0.01F); assertEquals("bar", results.get(1).key.toString()); assertEquals(10, results.get(1).value, 0.01F); - + // top N of 3 for 'ba' results = suggester.lookup(TestUtil.stringToCharSequence("ba", random()), false, 3); assertEquals(3, results.size()); @@ -108,31 +108,33 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(10, results.get(1).value, 0.01F); assertEquals("barbara", results.get(2).key.toString()); assertEquals(6, results.get(2).value, 0.01F); - + IOUtils.close(analyzer, tempDir); } - + public void testKeywordWithPayloads() throws Exception { - Iterable keys = shuffle( - new Input("foo", 50, new BytesRef("hello")), - new Input("bar", 10, new BytesRef("goodbye")), - new Input("barbar", 12, new BytesRef("thank you")), - new Input("bar", 9, new BytesRef("should be deduplicated")), - new Input("bar", 8, new BytesRef("should also be deduplicated")), - new Input("barbara", 6, new BytesRef("for all the fish"))); - + Iterable keys = + shuffle( + new Input("foo", 50, new BytesRef("hello")), + new Input("bar", 10, new BytesRef("goodbye")), + new Input("barbar", 12, new BytesRef("thank you")), + new Input("bar", 9, new BytesRef("should be deduplicated")), + new Input("bar", 8, new BytesRef("should also be deduplicated")), + new Input("barbara", 6, new BytesRef("for all the fish"))); + Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); Directory tempDir = getDirectory(); AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", analyzer); suggester.build(new InputArrayIterator(keys)); for (int i = 0; i < 2; i++) { // top N of 2, but only foo is available - List results = suggester.lookup(TestUtil.stringToCharSequence("f", random()), false, 2); + List results = + suggester.lookup(TestUtil.stringToCharSequence("f", random()), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); assertEquals(new BytesRef("hello"), results.get(0).payload); - + // top N of 1 for 'bar': we return this even though // barbar is higher because exactFirst is enabled: results = suggester.lookup(TestUtil.stringToCharSequence("bar", random()), false, 1); @@ -140,7 +142,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals("bar", results.get(0).key.toString()); assertEquals(10, results.get(0).value, 0.01F); assertEquals(new BytesRef("goodbye"), results.get(0).payload); - + // top N Of 2 for 'b' results = suggester.lookup(TestUtil.stringToCharSequence("b", random()), false, 2); assertEquals(2, results.size()); @@ -150,7 +152,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals("bar", results.get(1).key.toString()); assertEquals(10, results.get(1).value, 0.01F); assertEquals(new BytesRef("goodbye"), results.get(1).payload); - + // top N of 3 for 'ba' results = suggester.lookup(TestUtil.stringToCharSequence("ba", random()), false, 3); assertEquals(3, results.size()); @@ -166,12 +168,12 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } IOUtils.close(analyzer, tempDir); } - + public void testRandomRealisticKeys() throws IOException { LineFileDocs lineFile = new LineFileDocs(random()); Map mapping = new HashMap<>(); List keys = new ArrayList<>(); - + int howMany = atLeast(100); // this might bring up duplicates for (int i = 0; i < howMany; i++) { Document nextDoc = lineFile.nextDoc(); @@ -188,21 +190,31 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { Analyzer queryAnalyzer = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - AnalyzingSuggester analyzingSuggester = new AnalyzingSuggester(tempDir, "suggest", indexAnalyzer, queryAnalyzer, - AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, 256, -1, random().nextBoolean()); + AnalyzingSuggester analyzingSuggester = + new AnalyzingSuggester( + tempDir, + "suggest", + indexAnalyzer, + queryAnalyzer, + AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, + 256, + -1, + random().nextBoolean()); boolean doPayloads = random().nextBoolean(); if (doPayloads) { List keysAndPayloads = new ArrayList<>(); for (Input termFreq : keys) { - keysAndPayloads.add(new Input(termFreq.term, termFreq.v, new BytesRef(Long.toString(termFreq.v)))); + keysAndPayloads.add( + new Input(termFreq.term, termFreq.v, new BytesRef(Long.toString(termFreq.v)))); } analyzingSuggester.build(new InputArrayIterator(keysAndPayloads)); } else { - analyzingSuggester.build(new InputArrayIterator(keys)); + analyzingSuggester.build(new InputArrayIterator(keys)); } - + for (Input termFreq : keys) { - List lookup = analyzingSuggester.lookup(termFreq.term.utf8ToString(), false, keys.size()); + List lookup = + analyzingSuggester.lookup(termFreq.term.utf8ToString(), false, keys.size()); for (LookupResult lookupResult : lookup) { assertEquals(mapping.get(lookupResult.key), Long.valueOf(lookupResult.value)); if (doPayloads) { @@ -212,24 +224,33 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } } } - + IOUtils.close(lineFile, indexAnalyzer, queryAnalyzer, tempDir); } - + // TODO: more tests - /** - * basic "standardanalyzer" test with stopword removal - */ + /** basic "standardanalyzer" test with stopword removal */ public void testStandard() throws Exception { - final String input = "the ghost of christmas past the"; // trailing stopword there just to perturb possible bugs - Input keys[] = new Input[] { - new Input(input, 50), - }; + final String input = + "the ghost of christmas past the"; // trailing stopword there just to perturb possible bugs + Input keys[] = + new Input[] { + new Input(input, 50), + }; Directory tempDir = getDirectory(); - Analyzer standard = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", standard, standard, - AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, 256, -1, false); + Analyzer standard = + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); + AnalyzingSuggester suggester = + new AnalyzingSuggester( + tempDir, + "suggest", + standard, + standard, + AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, + 256, + -1, + false); suggester.build(new InputArrayIterator(keys)); List results; @@ -241,7 +262,8 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(50, results.get(0).value, 0.01F); // prefix of input stopping part way through christmas - results = suggester.lookup(TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1); + results = + suggester.lookup(TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1); assertEquals(1, results.size()); assertEquals(input, results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); @@ -259,7 +281,9 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(50, results.get(0).value, 0.01F); // trailing stopword "the" - results = suggester.lookup(TestUtil.stringToCharSequence("ghost christmas past the", random()), false, 1); + results = + suggester.lookup( + TestUtil.stringToCharSequence("ghost christmas past the", random()), false, 1); assertEquals(1, results.size()); assertEquals(input, results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); @@ -268,7 +292,8 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } public void testEmpty() throws Exception { - Analyzer standard = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); + Analyzer standard = + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); Directory tempDir = getDirectory(); AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", standard); suggester.build(new InputArrayIterator(new Input[0])); @@ -279,22 +304,24 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } public void testNoSeps() throws Exception { - Input[] keys = new Input[] { - new Input("ab cd", 0), - new Input("abcd", 1), - }; + Input[] keys = + new Input[] { + new Input("ab cd", 0), new Input("abcd", 1), + }; int options = 0; Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, options, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester(tempDir, "suggest", a, a, options, 256, -1, true); suggester.build(new InputArrayIterator(keys)); // TODO: would be nice if "ab " would allow the test to // pass, and more generally if the analyzer can know - // that the user's current query has ended at a word, + // that the user's current query has ended at a word, // but, analyzers don't produce SEP tokens! - List r = suggester.lookup(TestUtil.stringToCharSequence("ab c", random()), false, 2); + List r = + suggester.lookup(TestUtil.stringToCharSequence("ab c", random()), false, 2); assertEquals(2, r.size()); // With no PRESERVE_SEPS specified, "ab c" should also @@ -362,30 +389,30 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { public void testGraphDups() throws Exception { - final Analyzer analyzer = new MultiCannedAnalyzer( - new CannedTokenStream( - token("wifi",1,1), - token("hotspot",0,2), - token("network",1,1), - token("is",1,1), - token("slow",1,1)), - new CannedTokenStream( - token("wi",1,1), - token("hotspot",0,3), - token("fi",1,1), - token("network",1,1), - token("is",1,1), - token("fast",1,1)), - new CannedTokenStream( - token("wifi",1,1), - token("hotspot",0,2), - token("network",1,1))); + final Analyzer analyzer = + new MultiCannedAnalyzer( + new CannedTokenStream( + token("wifi", 1, 1), + token("hotspot", 0, 2), + token("network", 1, 1), + token("is", 1, 1), + token("slow", 1, 1)), + new CannedTokenStream( + token("wi", 1, 1), + token("hotspot", 0, 3), + token("fi", 1, 1), + token("network", 1, 1), + token("is", 1, 1), + token("fast", 1, 1)), + new CannedTokenStream( + token("wifi", 1, 1), token("hotspot", 0, 2), token("network", 1, 1))); - Input keys[] = new Input[] { - new Input("wifi network is slow", 50), - new Input("wi fi network is fast", 10), - }; - //AnalyzingSuggester suggester = new AnalyzingSuggester(analyzer, AnalyzingSuggester.EXACT_FIRST, 256, -1); + Input keys[] = + new Input[] { + new Input("wifi network is slow", 50), new Input("wi fi network is fast", 10), + }; + // AnalyzingSuggester suggester = new AnalyzingSuggester(analyzer, + // AnalyzingSuggester.EXACT_FIRST, 256, -1); Directory tempDir = getDirectory(); AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", analyzer); suggester.build(new InputArrayIterator(keys)); @@ -408,26 +435,19 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { // final SynonymMap map = b.build(); // The Analyzer below mimics the functionality of the SynonymAnalyzer - // using the above map, so that the suggest module does not need a dependency on the + // using the above map, so that the suggest module does not need a dependency on the // synonym module - final Analyzer analyzer = new MultiCannedAnalyzer( - new CannedTokenStream( - token("ab", 1, 1), - token("ba", 0, 1), - token("xc", 1, 1)), - new CannedTokenStream( - token("ba", 1, 1), - token("xd", 1, 1)), - new CannedTokenStream( - token("ab",1,1), - token("ba",0,1), - token("x",1,1))); + final Analyzer analyzer = + new MultiCannedAnalyzer( + new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("xc", 1, 1)), + new CannedTokenStream(token("ba", 1, 1), token("xd", 1, 1)), + new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("x", 1, 1))); - Input keys[] = new Input[] { - new Input("ab xc", 50), - new Input("ba xd", 50), - }; + Input keys[] = + new Input[] { + new Input("ab xc", 50), new Input("ba xd", 50), + }; Directory tempDir = getDirectory(); AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", analyzer); suggester.build(new InputArrayIterator(keys)); @@ -455,15 +475,15 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { final TermToBytesRefAttribute termBytesAtt = ts.addAttribute(TermToBytesRefAttribute.class); final PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class); final PositionLengthAttribute posLengthAtt = ts.addAttribute(PositionLengthAttribute.class); - + while(ts.incrementToken()) { termBytesAtt.fillBytesRef(); - System.out.println(String.format("%s,%s,%s", termBytesAtt.getBytesRef().utf8ToString(), posIncAtt.getPositionIncrement(), posLengthAtt.getPositionLength())); + System.out.println(String.format("%s,%s,%s", termBytesAtt.getBytesRef().utf8ToString(), posIncAtt.getPositionIncrement(), posLengthAtt.getPositionLength())); } ts.end(); ts.close(); - } - */ + } + */ private Analyzer getUnusualAnalyzer() { // First three calls just returns "a", then returns ["a","b"], then "a" again @@ -481,19 +501,19 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { Analyzer a = getUnusualAnalyzer(); int options = AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP; Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, options, 256, -1, true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("x y", 1), - new Input("x y z", 3), - new Input("x", 2), - new Input("z z z", 20), - })); + AnalyzingSuggester suggester = + new AnalyzingSuggester(tempDir, "suggest", a, a, options, 256, -1, true); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("x y", 1), new Input("x y z", 3), new Input("x", 2), new Input("z z z", 20), + })); - //System.out.println("ALL: " + suggester.lookup("x y", false, 6)); + // System.out.println("ALL: " + suggester.lookup("x y", false, 6)); - for(int topN=1;topN<6;topN++) { + for (int topN = 1; topN < 6; topN++) { List results = suggester.lookup("x y", false, topN); - //System.out.println("topN=" + topN + " " + results); + // System.out.println("topN=" + topN + " " + results); assertEquals(Math.min(topN, 4), results.size()); @@ -522,16 +542,17 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { Analyzer a = getUnusualAnalyzer(); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester( + tempDir, "suggest", a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("x y", 1), - new Input("x y z", 3), - new Input("x", 2), - new Input("z z z", 20), - })); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("x y", 1), new Input("x y z", 3), new Input("x", 2), new Input("z z z", 20), + })); - for(int topN=1;topN<6;topN++) { + for (int topN = 1; topN < 6; topN++) { List results = suggester.lookup("p", false, topN); assertEquals(Math.min(topN, 4), results.size()); @@ -546,7 +567,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { if (topN > 2) { assertEquals("x", results.get(2).key); assertEquals(2, results.get(2).value); - + if (topN > 3) { assertEquals("x y", results.get(3).key); assertEquals(1, results.get(3).value); @@ -556,7 +577,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } IOUtils.close(a, tempDir); } - + // Holds surface form separately: private static class TermFreq2 implements Comparable { public final String surfaceForm; @@ -593,13 +614,14 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } static boolean isStopChar(char ch, int numStopChars) { - //System.out.println("IS? " + ch + ": " + (ch - 'a') + ": " + ((ch - 'a') < numStopChars)); + // System.out.println("IS? " + ch + ": " + (ch - 'a') + ": " + ((ch - 'a') < numStopChars)); return (ch - 'a') < numStopChars; } // Like StopFilter: private static class TokenEater extends TokenFilter { - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncrAtt = + addAttribute(PositionIncrementAttribute.class); private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final int numStopChars; private final boolean preserveHoles; @@ -631,7 +653,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { first = false; } posIncrAtt.setPositionIncrement(posInc); - //System.out.println("RETURN term=" + termAtt + " numStopChars=" + numStopChars); + // System.out.println("RETURN term=" + termAtt + " numStopChars=" + numStopChars); return true; } if (preserveHoles) { @@ -654,8 +676,12 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { @Override public TokenStreamComponents createComponents(String fieldName) { - MockTokenizer tokenizer = new MockTokenizer(MockUTF16TermAttributeImpl.UTF16_TERM_ATTRIBUTE_FACTORY, - MockTokenizer.WHITESPACE, false, MockTokenizer.DEFAULT_MAX_TOKEN_LENGTH); + MockTokenizer tokenizer = + new MockTokenizer( + MockUTF16TermAttributeImpl.UTF16_TERM_ATTRIBUTE_FACTORY, + MockTokenizer.WHITESPACE, + false, + MockTokenizer.DEFAULT_MAX_TOKEN_LENGTH); tokenizer.setEnableChecks(true); TokenStream next; if (numStopChars != 0) { @@ -673,11 +699,11 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { public void testRandom() throws Exception { int numQueries = atLeast(200); - + final List slowCompletor = new ArrayList<>(); final TreeSet allPrefixes = new TreeSet<>(); final Set seen = new HashSet<>(); - + boolean doPayloads = random().nextBoolean(); Input[] keys = null; @@ -694,18 +720,26 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { final boolean preserveHoles = random().nextBoolean(); if (VERBOSE) { - System.out.println("TEST: " + numQueries + " words; preserveSep=" + preserveSep + " numStopChars=" + numStopChars + " preserveHoles=" + preserveHoles); + System.out.println( + "TEST: " + + numQueries + + " words; preserveSep=" + + preserveSep + + " numStopChars=" + + numStopChars + + " preserveHoles=" + + preserveHoles); } - + for (int i = 0; i < numQueries; i++) { int numTokens = TestUtil.nextInt(random(), 1, 4); String key; String analyzedKey; - while(true) { + while (true) { key = ""; analyzedKey = ""; boolean lastRemoved = false; - for(int token=0;token < numTokens;token++) { + for (int token = 0; token < numTokens; token++) { String s; while (true) { // TODO: would be nice to fix this slowCompletor/comparator to @@ -715,7 +749,9 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { if (token > 0) { key += " "; } - if (preserveSep && analyzedKey.length() > 0 && analyzedKey.charAt(analyzedKey.length()-1) != SEP) { + if (preserveSep + && analyzedKey.length() > 0 + && analyzedKey.charAt(analyzedKey.length() - 1) != SEP) { analyzedKey += SEP; } key += s; @@ -750,7 +786,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { allPrefixes.add(key.substring(0, j)); } // we can probably do Integer.MAX_VALUE here, but why worry. - int weight = random().nextInt(1<<24); + int weight = random().nextInt(1 << 24); BytesRef payload; if (doPayloads) { byte[] bytes = new byte[random().nextInt(10)]; @@ -770,15 +806,29 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { // altering the test: List sorted = new ArrayList<>(slowCompletor); Collections.sort(sorted); - for(TermFreq2 ent : sorted) { - System.out.println(" surface='" + ent.surfaceForm + "' analyzed='" + ent.analyzedForm + "' weight=" + ent.weight); + for (TermFreq2 ent : sorted) { + System.out.println( + " surface='" + + ent.surfaceForm + + "' analyzed='" + + ent.analyzedForm + + "' weight=" + + ent.weight); } } Analyzer a = new MockTokenEatingAnalyzer(numStopChars, preserveHoles); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, - preserveSep ? AnalyzingSuggester.PRESERVE_SEP : 0, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester( + tempDir, + "suggest", + a, + a, + preserveSep ? AnalyzingSuggester.PRESERVE_SEP : 0, + 256, + -1, + true); if (doPayloads) { suggester.build(new InputArrayIterator(shuffle(payloadKeys))); } else { @@ -792,7 +842,8 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } final int topN = TestUtil.nextInt(random(), 1, 10); - List r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN); + List r = + suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN); // 2. go thru whole set to find suggestions: List matches = new ArrayList<>(); @@ -801,9 +852,9 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { String[] tokens = prefix.split(" "); StringBuilder builder = new StringBuilder(); boolean lastRemoved = false; - for(int i=0;i 0 && !builder.toString().endsWith(""+SEP)) { + if (preserveSep && builder.length() > 0 && !builder.toString().endsWith("" + SEP)) { builder.append(SEP); } @@ -855,17 +906,19 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertTrue(numStopChars > 0 || matches.size() > 0); if (matches.size() > 1) { - Collections.sort(matches, new Comparator() { - @Override - public int compare(TermFreq2 left, TermFreq2 right) { - int cmp = Float.compare(right.weight, left.weight); - if (cmp == 0) { - return left.analyzedForm.compareTo(right.analyzedForm); - } else { - return cmp; + Collections.sort( + matches, + new Comparator() { + @Override + public int compare(TermFreq2 left, TermFreq2 right) { + int cmp = Float.compare(right.weight, left.weight); + if (cmp == 0) { + return left.analyzedForm.compareTo(right.analyzedForm); + } else { + return cmp; + } } - } - }); + }); } if (matches.size() > topN) { @@ -874,20 +927,20 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { if (VERBOSE) { System.out.println(" expected:"); - for(TermFreq2 lr : matches) { + for (TermFreq2 lr : matches) { System.out.println(" key=" + lr.surfaceForm + " weight=" + lr.weight); } System.out.println(" actual:"); - for(LookupResult lr : r) { + for (LookupResult lr : r) { System.out.println(" key=" + lr.key + " weight=" + lr.value); } } assertEquals(matches.size(), r.size()); - for(int hit=0;hit results = suggester.lookup("a", false, 5); assertEquals(2, results.size()); @@ -917,14 +971,18 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { public void testQueueExhaustion() throws Exception { Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, AnalyzingSuggester.EXACT_FIRST, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester( + tempDir, "suggest", a, a, AnalyzingSuggester.EXACT_FIRST, 256, -1, true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("a", 2), - new Input("a b c", 3), - new Input("a c a", 1), - new Input("a c b", 1), - })); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("a", 2), + new Input("a b c", 3), + new Input("a c a", 1), + new Input("a c b", 1), + })); suggester.lookup("a", false, 4); IOUtils.close(a, tempDir); @@ -935,13 +993,15 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, AnalyzingSuggester.EXACT_FIRST, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester( + tempDir, "suggest", a, a, AnalyzingSuggester.EXACT_FIRST, 256, -1, true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("a", 5), - new Input("a b", 3), - new Input("a c", 4), - })); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("a", 5), new Input("a b", 3), new Input("a c", 4), + })); assertEquals(3, suggester.getCount()); List results = suggester.lookup("a", false, 3); @@ -979,22 +1039,23 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } public void testDupSurfaceFormsMissingResults() throws Exception { - Analyzer a = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(r -> {}, new CannedTokenStream( - token("hairy", 1, 1), - token("smelly", 0, 1), - token("dog", 1, 1))); - } - }; + Analyzer a = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents( + r -> {}, + new CannedTokenStream( + token("hairy", 1, 1), token("smelly", 0, 1), token("dog", 1, 1))); + } + }; Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); - suggester.build(new InputArrayIterator(shuffle( - new Input("hambone", 6), - new Input("nellie", 5)))); + suggester.build( + new InputArrayIterator(shuffle(new Input("hambone", 6), new Input("nellie", 5)))); List results = suggester.lookup("nellie", false, 2); assertEquals(2, results.size()); @@ -1026,23 +1087,23 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } public void testDupSurfaceFormsMissingResults2() throws Exception { - Analyzer a = new MultiCannedAnalyzer( - new CannedTokenStream( - token("p", 1, 1), - token("q", 1, 1), - token("r", 0, 1), - token("s", 0, 1)), - new CannedTokenStream(token("p", 1, 1)), - new CannedTokenStream(token("p", 1, 1)), - new CannedTokenStream(token("p", 1, 1))); + Analyzer a = + new MultiCannedAnalyzer( + new CannedTokenStream( + token("p", 1, 1), token("q", 1, 1), token("r", 0, 1), token("s", 0, 1)), + new CannedTokenStream(token("p", 1, 1)), + new CannedTokenStream(token("p", 1, 1)), + new CannedTokenStream(token("p", 1, 1))); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("a", 6), - new Input("b", 5), - })); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("a", 6), new Input("b", 5), + })); List results = suggester.lookup("a", false, 2); assertEquals(2, results.size()); @@ -1074,14 +1135,15 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } /** - * Adds 50 random keys, that all analyze to the same thing (dog), with the same cost, - * and checks that they come back in surface-form order. + * Adds 50 random keys, that all analyze to the same thing (dog), with the same cost, and checks + * that they come back in surface-form order. */ public void testTieBreakOnSurfaceForm() throws Exception { Analyzer a = new MultiCannedAnalyzer(new CannedTokenStream(token("dog", 1, 1))); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); // make 50 inputs all with the same cost of 1, random strings Input[] inputs = new Input[100]; @@ -1107,45 +1169,53 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { List results = suggester.lookup("", false, 50); assertEquals(50, results.size()); for (int i = 1; i < 50; i++) { - String previous = results.get(i-1).toString(); + String previous = results.get(i - 1).toString(); String current = results.get(i).toString(); - assertTrue("surface forms out of order: previous=" + previous + ",current=" + current, - current.compareTo(previous) >= 0); + assertTrue( + "surface forms out of order: previous=" + previous + ",current=" + current, + current.compareTo(previous) >= 0); } IOUtils.close(a, tempDir); } public void test0ByteKeys() throws Exception { - final Analyzer a = new MultiCannedAnalyzer(true, - new CannedBinaryTokenStream(token(new BytesRef(new byte[] {0x0, 0x0, 0x0}))), - new CannedBinaryTokenStream(token(new BytesRef(new byte[] {0x0, 0x0}))), - new CannedBinaryTokenStream(token(new BytesRef(new byte[] {0x0, 0x0, 0x0}))), - new CannedBinaryTokenStream(token(new BytesRef(new byte[] {0x0, 0x0}))) - ); + final Analyzer a = + new MultiCannedAnalyzer( + true, + new CannedBinaryTokenStream(token(new BytesRef(new byte[] {0x0, 0x0, 0x0}))), + new CannedBinaryTokenStream(token(new BytesRef(new byte[] {0x0, 0x0}))), + new CannedBinaryTokenStream(token(new BytesRef(new byte[] {0x0, 0x0, 0x0}))), + new CannedBinaryTokenStream(token(new BytesRef(new byte[] {0x0, 0x0})))); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); + + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("a a", 50), new Input("a b", 50), + })); - suggester.build(new InputArrayIterator(new Input[] { - new Input("a a", 50), - new Input("a b", 50), - })); - IOUtils.close(a, tempDir); } public void testDupSurfaceFormsMissingResults3() throws Exception { Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("a a", 7), - new Input("a a", 7), - new Input("a c", 6), - new Input("a c", 3), - new Input("a b", 5), - })); + AnalyzingSuggester suggester = + new AnalyzingSuggester( + tempDir, "suggest", a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, true); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("a a", 7), + new Input("a a", 7), + new Input("a c", 6), + new Input("a c", 3), + new Input("a b", 5), + })); assertEquals("[a a/7, a c/6, a b/5]", suggester.lookup("a", false, 3).toString()); IOUtils.close(tempDir, a); } @@ -1153,11 +1223,14 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { public void testEndingSpace() throws Exception { Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("i love lucy", 7), - new Input("isla de muerta", 8), - })); + AnalyzingSuggester suggester = + new AnalyzingSuggester( + tempDir, "suggest", a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, true); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("i love lucy", 7), new Input("isla de muerta", 8), + })); assertEquals("[isla de muerta/8, i love lucy/7]", suggester.lookup("i", false, 3).toString()); assertEquals("[i love lucy/7]", suggester.lookup("i ", false, 3).toString()); IOUtils.close(a, tempDir); @@ -1165,40 +1238,48 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { public void testTooManyExpansions() throws Exception { - final Analyzer a = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(r -> {}, new CannedTokenStream( - new Token("a", 0, 1), - new Token("b", 0, 0, 1))); - } - }; + final Analyzer a = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents( + r -> {}, new CannedTokenStream(new Token("a", 0, 1), new Token("b", 0, 0, 1))); + } + }; Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, 1, true); + AnalyzingSuggester suggester = + new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, 1, true); suggester.build(new InputArrayIterator(new Input[] {new Input("a", 1)})); assertEquals("[a/1]", suggester.lookup("a", false, 1).toString()); IOUtils.close(a, tempDir); } - + public void testIllegalLookupArgument() throws Exception { Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("а где Люси?", 7), - })); - expectThrows(IllegalArgumentException.class, () -> { - suggester.lookup("а\u001E", false, 3); - }); - expectThrows(IllegalArgumentException.class, () -> { - suggester.lookup("а\u001F", false, 3); - }); + AnalyzingSuggester suggester = + new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("а где Люси?", 7), + })); + expectThrows( + IllegalArgumentException.class, + () -> { + suggester.lookup("а\u001E", false, 3); + }); + expectThrows( + IllegalArgumentException.class, + () -> { + suggester.lookup("а\u001F", false, 3); + }); IOUtils.close(a, tempDir); } - static final Iterable shuffle(Input...values) { + static final Iterable shuffle(Input... values) { final List asList = new ArrayList<>(values.length); for (Input value : values) { asList.add(value); @@ -1213,15 +1294,17 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { Directory tempDir = getDirectory(); AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a); String bigString = TestUtil.randomSimpleString(random(), 30000, 30000); - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { - suggester.build(new InputArrayIterator(new Input[] { - new Input(bigString, 7)})); - }); + IllegalArgumentException ex = + expectThrows( + IllegalArgumentException.class, + () -> { + suggester.build(new InputArrayIterator(new Input[] {new Input(bigString, 7)})); + }); assertTrue(ex.getMessage().contains("input automaton is too large")); IOUtils.close(a, tempDir); } - private Directory getDirectory() { + private Directory getDirectory() { return newDirectory(); } } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java index 296e40452d2..8717dbe2424 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java @@ -21,7 +21,6 @@ import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.MockAnalyzer; @@ -38,64 +37,63 @@ import org.apache.lucene.util.TestUtil; public class BlendedInfixSuggesterTest extends LuceneTestCase { - - /** - * Test the weight transformation depending on the position - * of the matching term. - */ + /** Test the weight transformation depending on the position of the matching term. */ public void testBlendedSort() throws IOException { BytesRef payload = new BytesRef("star"); - Input keys[] = new Input[]{ - new Input("star wars: episode v - the empire strikes back", 8, payload) - }; + Input keys[] = + new Input[] {new Input("star wars: episode v - the empire strikes back", 8, payload)}; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); assertSuggestionsRanking(payload, suggester); } /** - * Test to validate the suggestions ranking according to the position coefficient, - * even if the weight associated to the suggestion is unitary. + * Test to validate the suggestions ranking according to the position coefficient, even if the + * weight associated to the suggestion is unitary. */ - public void testBlendedSort_fieldWeightUnitary_shouldRankSuggestionsByPositionMatch() throws IOException { + public void testBlendedSort_fieldWeightUnitary_shouldRankSuggestionsByPositionMatch() + throws IOException { BytesRef payload = new BytesRef("star"); - Input keys[] = new Input[]{ - new Input("star wars: episode v - the empire strikes back", 1, payload) - }; + Input keys[] = + new Input[] {new Input("star wars: episode v - the empire strikes back", 1, payload)}; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); assertSuggestionsRanking(payload, suggester); } /** - * Test to validate the suggestions ranking according to the position coefficient, - * even if the weight associated to the suggestion is zero. + * Test to validate the suggestions ranking according to the position coefficient, even if the + * weight associated to the suggestion is zero. */ - public void testBlendedSort_fieldWeightZero_shouldRankSuggestionsByPositionMatch() throws IOException { + public void testBlendedSort_fieldWeightZero_shouldRankSuggestionsByPositionMatch() + throws IOException { BytesRef payload = new BytesRef("star"); - Input keys[] = new Input[]{ - new Input("star wars: episode v - the empire strikes back", 0, payload) - }; + Input keys[] = + new Input[] {new Input("star wars: episode v - the empire strikes back", 0, payload)}; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); assertSuggestionsRanking(payload, suggester); } /** - * Test to validate the suggestions ranking according to the position coefficient, - * even if the weight associated to the suggestion is very big, no overflow should happen. + * Test to validate the suggestions ranking according to the position coefficient, even if the + * weight associated to the suggestion is very big, no overflow should happen. */ - public void testBlendedSort_fieldWeightLongMax_shouldRankSuggestionsByPositionMatchWithNoOverflow() throws IOException { + public void + testBlendedSort_fieldWeightLongMax_shouldRankSuggestionsByPositionMatchWithNoOverflow() + throws IOException { BytesRef payload = new BytesRef("star"); - Input keys[] = new Input[]{ - new Input("star wars: episode v - the empire strikes back", Long.MAX_VALUE, payload) - }; + Input keys[] = + new Input[] { + new Input("star wars: episode v - the empire strikes back", Long.MAX_VALUE, payload) + }; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); assertSuggestionsRanking(payload, suggester); } - private void assertSuggestionsRanking(BytesRef payload, BlendedInfixSuggester suggester) throws IOException { + private void assertSuggestionsRanking(BytesRef payload, BlendedInfixSuggester suggester) + throws IOException { // we query for star wars and check that the weight // is smaller when we search for tokens that are far from the beginning @@ -118,25 +116,26 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { Path tempDir = createTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET); - BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a, - AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, - BlendedInfixSuggester.BlenderType.POSITION_LINEAR, - BlendedInfixSuggester.DEFAULT_NUM_FACTOR, false); + BlendedInfixSuggester suggester = + new BlendedInfixSuggester( + newFSDirectory(tempDir), + a, + a, + AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, + BlendedInfixSuggester.BlenderType.POSITION_LINEAR, + BlendedInfixSuggester.DEFAULT_NUM_FACTOR, + false); suggester.build(new InputArrayIterator(keys)); return suggester; } - /** - * Verify the different flavours of the blender types - */ + /** Verify the different flavours of the blender types */ public void testBlendingType() throws IOException { BytesRef pl = new BytesRef("lake"); long w = 20; - Input keys[] = new Input[]{ - new Input("top of the lake", w, pl) - }; + Input keys[] = new Input[] {new Input("top of the lake", w, pl)}; Path tempDir = createTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET); @@ -152,9 +151,15 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { suggester.close(); // BlenderType.RECIPROCAL is using 1/(1+p) * w where w is weight and p the position of the word - suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a, - AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, - BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1, false); + suggester = + new BlendedInfixSuggester( + newFSDirectory(tempDir), + a, + a, + AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, + BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, + 1, + false); suggester.build(new InputArrayIterator(keys)); assertEquals(w, getInResults(suggester, "top", pl, 1)); @@ -162,10 +167,20 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { assertEquals((int) (w * 1 / (1 + 3)), getInResults(suggester, "lake", pl, 1)); suggester.close(); - // BlenderType.EXPONENTIAL_RECIPROCAL is using 1/(pow(1+p, exponent)) * w where w is weight and p the position of the word - suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a, - AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, - BlendedInfixSuggester.BlenderType.POSITION_EXPONENTIAL_RECIPROCAL, 1, 4.0, false, true, false); + // BlenderType.EXPONENTIAL_RECIPROCAL is using 1/(pow(1+p, exponent)) * w where w is weight and + // p the position of the word + suggester = + new BlendedInfixSuggester( + newFSDirectory(tempDir), + a, + a, + AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, + BlendedInfixSuggester.BlenderType.POSITION_EXPONENTIAL_RECIPROCAL, + 1, + 4.0, + false, + true, + false); suggester.build(new InputArrayIterator(keys)); @@ -186,23 +201,29 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { BytesRef star = new BytesRef("star"); BytesRef ret = new BytesRef("ret"); - Input keys[] = new Input[]{ - new Input("top of the lake", 18, lake), - new Input("star wars: episode v - the empire strikes back", 12, star), - new Input("the returned", 10, ret), - }; + Input keys[] = + new Input[] { + new Input("top of the lake", 18, lake), + new Input("star wars: episode v - the empire strikes back", 12, star), + new Input("the returned", 10, ret), + }; Path tempDir = createTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET); // if factor is small, we don't get the expected element - BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a, - AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, - BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1, false); + BlendedInfixSuggester suggester = + new BlendedInfixSuggester( + newFSDirectory(tempDir), + a, + a, + AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, + BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, + 1, + false); suggester.build(new InputArrayIterator(keys)); - // we don't find it for in the 2 first assertEquals(2, suggester.lookup("the", 2, true, false).size()); long w0 = getInResults(suggester, "the", ret, 2); @@ -216,9 +237,15 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { suggester.close(); // if we increase the factor we have it - suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a, - AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, - BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 2, false); + suggester = + new BlendedInfixSuggester( + newFSDirectory(tempDir), + a, + a, + AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, + BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, + 2, + false); suggester.build(new InputArrayIterator(keys)); // we have it @@ -232,16 +259,12 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { suggester.close(); } - /** - * Handle trailing spaces that result in no prefix token LUCENE-6093 - */ + /** Handle trailing spaces that result in no prefix token LUCENE-6093 */ public void testNullPrefixToken() throws IOException { BytesRef payload = new BytesRef("lake"); - Input keys[] = new Input[]{ - new Input("top of the lake", 8, payload) - }; + Input keys[] = new Input[] {new Input("top of the lake", 8, payload)}; BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys); @@ -254,71 +277,72 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { public void testBlendedInfixSuggesterDedupsOnWeightTitleAndPayload() throws Exception { - //exactly same inputs - Input[] inputDocuments = new Input[] { - new Input("lend me your ear", 7, new BytesRef("uid1")), - new Input("lend me your ear", 7, new BytesRef("uid1")), - }; + // exactly same inputs + Input[] inputDocuments = + new Input[] { + new Input("lend me your ear", 7, new BytesRef("uid1")), + new Input("lend me your ear", 7, new BytesRef("uid1")), + }; duplicateCheck(inputDocuments, 1); // inputs differ on payload - inputDocuments = new Input[] { - new Input("lend me your ear", 7, new BytesRef("uid1")), - new Input("lend me your ear", 7, new BytesRef("uid2")), - }; + inputDocuments = + new Input[] { + new Input("lend me your ear", 7, new BytesRef("uid1")), + new Input("lend me your ear", 7, new BytesRef("uid2")), + }; duplicateCheck(inputDocuments, 2); - //exactly same input without payloads - inputDocuments = new Input[] { - new Input("lend me your ear", 7), - new Input("lend me your ear", 7), - }; + // exactly same input without payloads + inputDocuments = + new Input[] { + new Input("lend me your ear", 7), new Input("lend me your ear", 7), + }; duplicateCheck(inputDocuments, 1); - //Same input with first has payloads, second does not - inputDocuments = new Input[] { - new Input("lend me your ear", 7, new BytesRef("uid1")), - new Input("lend me your ear", 7), - }; + // Same input with first has payloads, second does not + inputDocuments = + new Input[] { + new Input("lend me your ear", 7, new BytesRef("uid1")), new Input("lend me your ear", 7), + }; duplicateCheck(inputDocuments, 2); - /**same input, first not having a payload, the second having payload - * we would expect 2 entries out but we are getting only 1 because - * the InputArrayIterator#hasPayloads() returns false because the first - * item has no payload, therefore, when ingested, none of the 2 input has payload and become 1 + /** + * same input, first not having a payload, the second having payload we would expect 2 entries + * out but we are getting only 1 because the InputArrayIterator#hasPayloads() returns false + * because the first item has no payload, therefore, when ingested, none of the 2 input has + * payload and become 1 */ - inputDocuments = new Input[] { - new Input("lend me your ear", 7), - new Input("lend me your ear", 7, new BytesRef("uid2")), - }; + inputDocuments = + new Input[] { + new Input("lend me your ear", 7), new Input("lend me your ear", 7, new BytesRef("uid2")), + }; List results = duplicateCheck(inputDocuments, 1); assertNull(results.get(0).payload); - - //exactly same inputs but different weight - inputDocuments = new Input[] { - new Input("lend me your ear", 1, new BytesRef("uid1")), - new Input("lend me your ear", 7, new BytesRef("uid1")), - }; + // exactly same inputs but different weight + inputDocuments = + new Input[] { + new Input("lend me your ear", 1, new BytesRef("uid1")), + new Input("lend me your ear", 7, new BytesRef("uid1")), + }; duplicateCheck(inputDocuments, 2); - //exactly same inputs but different text - inputDocuments = new Input[] { - new Input("lend me your earings", 7, new BytesRef("uid1")), - new Input("lend me your ear", 7, new BytesRef("uid1")), - }; + // exactly same inputs but different text + inputDocuments = + new Input[] { + new Input("lend me your earings", 7, new BytesRef("uid1")), + new Input("lend me your ear", 7, new BytesRef("uid1")), + }; duplicateCheck(inputDocuments, 2); - } - public void testSuggesterCountForAllLookups() throws IOException { - - Input keys[] = new Input[]{ - new Input("lend me your ears", 1), - new Input("as you sow so shall you reap", 1), - }; + Input keys[] = + new Input[] { + new Input("lend me your ears", 1), new Input("as you sow so shall you reap", 1), + }; Path tempDir = createTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET); @@ -327,7 +351,6 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a); suggester.build(new InputArrayIterator(keys)); - String term = "you"; List responses = suggester.lookup(term, false, 1); @@ -336,70 +359,67 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { responses = suggester.lookup(term, false, 2); assertEquals(2, responses.size()); - responses = suggester.lookup(term, 1, false, false); assertEquals(1, responses.size()); responses = suggester.lookup(term, 2, false, false); assertEquals(2, responses.size()); - responses = suggester.lookup(term, (Map) null, 1, false, false); assertEquals(1, responses.size()); responses = suggester.lookup(term, (Map) null, 2, false, false); assertEquals(2, responses.size()); - responses = suggester.lookup(term, (Set) null, 1, false, false); assertEquals(1, responses.size()); responses = suggester.lookup(term, (Set) null, 2, false, false); assertEquals(2, responses.size()); - responses = suggester.lookup(term, null, false, 1); assertEquals(1, responses.size()); responses = suggester.lookup(term, null, false, 2); assertEquals(2, responses.size()); - responses = suggester.lookup(term, (BooleanQuery) null, 1, false, false); assertEquals(1, responses.size()); responses = suggester.lookup(term, (BooleanQuery) null, 2, false, false); assertEquals(2, responses.size()); - suggester.close(); - } - - public void /*testT*/rying() throws IOException { + public void /*testT*/ rying() throws IOException { BytesRef lake = new BytesRef("lake"); BytesRef star = new BytesRef("star"); BytesRef ret = new BytesRef("ret"); - Input keys[] = new Input[]{ - new Input("top of the lake", 15, lake), - new Input("star wars: episode v - the empire strikes back", 12, star), - new Input("the returned", 10, ret), - }; + Input keys[] = + new Input[] { + new Input("top of the lake", 15, lake), + new Input("star wars: episode v - the empire strikes back", 12, star), + new Input("the returned", 10, ret), + }; Path tempDir = createTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET); // if factor is small, we don't get the expected element - BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a, - AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, - BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, - BlendedInfixSuggester.DEFAULT_NUM_FACTOR, false); + BlendedInfixSuggester suggester = + new BlendedInfixSuggester( + newFSDirectory(tempDir), + a, + a, + AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, + BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, + BlendedInfixSuggester.DEFAULT_NUM_FACTOR, + false); suggester.build(new InputArrayIterator(keys)); - List responses = suggester.lookup("the", 4, true, false); for (Lookup.LookupResult response : responses) { @@ -409,7 +429,9 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { suggester.close(); } - private static long getInResults(BlendedInfixSuggester suggester, String prefix, BytesRef payload, int num) throws IOException { + private static long getInResults( + BlendedInfixSuggester suggester, String prefix, BytesRef payload, int num) + throws IOException { List responses = suggester.lookup(prefix, num, true, false); @@ -422,16 +444,25 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { return -1; } - private List duplicateCheck(Input[] inputs, int expectedSuggestionCount) throws IOException { + private List duplicateCheck(Input[] inputs, int expectedSuggestionCount) + throws IOException { Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); - BlendedInfixSuggester suggester = new BlendedInfixSuggester(newDirectory(), a, a, AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, - BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL,10, false); + BlendedInfixSuggester suggester = + new BlendedInfixSuggester( + newDirectory(), + a, + a, + AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, + BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, + 10, + false); InputArrayIterator inputArrayIterator = new InputArrayIterator(inputs); suggester.build(inputArrayIterator); - List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = + suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(expectedSuggestionCount, results.size()); suggester.close(); @@ -439,5 +470,4 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { return results; } - } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java index 003beb5a1d7..ea63a1dd121 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java @@ -25,7 +25,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.TreeSet; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CannedTokenStream; import org.apache.lucene.analysis.MockAnalyzer; @@ -52,7 +51,7 @@ import org.apache.lucene.util.automaton.FiniteStringsIterator; import org.apache.lucene.util.fst.Util; public class FuzzySuggesterTest extends LuceneTestCase { - + public void testRandomEdits() throws IOException { List keys = new ArrayList<>(); int numTerms = atLeast(100); @@ -62,20 +61,34 @@ public class FuzzySuggesterTest extends LuceneTestCase { keys.add(new Input("foo bar boo far", 12)); MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy", analyzer, analyzer, FuzzySuggester.EXACT_FIRST | FuzzySuggester.PRESERVE_SEP, 256, -1, true, FuzzySuggester.DEFAULT_MAX_EDITS, FuzzySuggester.DEFAULT_TRANSPOSITIONS, - 0, FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, FuzzySuggester.DEFAULT_UNICODE_AWARE); + FuzzySuggester suggester = + new FuzzySuggester( + tempDir, + "fuzzy", + analyzer, + analyzer, + FuzzySuggester.EXACT_FIRST | FuzzySuggester.PRESERVE_SEP, + 256, + -1, + true, + FuzzySuggester.DEFAULT_MAX_EDITS, + FuzzySuggester.DEFAULT_TRANSPOSITIONS, + 0, + FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, + FuzzySuggester.DEFAULT_UNICODE_AWARE); suggester.build(new InputArrayIterator(keys)); int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { String addRandomEdit = addRandomEdit("foo bar boo", FuzzySuggester.DEFAULT_NON_FUZZY_PREFIX); - List results = suggester.lookup(TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2); + List results = + suggester.lookup(TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2); assertEquals(addRandomEdit, 1, results.size()); assertEquals("foo bar boo far", results.get(0).key.toString()); - assertEquals(12, results.get(0).value, 0.01F); + assertEquals(12, results.get(0).value, 0.01F); } IOUtils.close(analyzer, tempDir); } - + public void testNonLatinRandomEdits() throws IOException { List keys = new ArrayList<>(); int numTerms = atLeast(100); @@ -85,13 +98,27 @@ public class FuzzySuggesterTest extends LuceneTestCase { keys.add(new Input("фуу бар буу фар", 12)); MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy",analyzer, analyzer, FuzzySuggester.EXACT_FIRST | FuzzySuggester.PRESERVE_SEP, 256, -1, true, FuzzySuggester.DEFAULT_MAX_EDITS, FuzzySuggester.DEFAULT_TRANSPOSITIONS, - 0, FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, true); + FuzzySuggester suggester = + new FuzzySuggester( + tempDir, + "fuzzy", + analyzer, + analyzer, + FuzzySuggester.EXACT_FIRST | FuzzySuggester.PRESERVE_SEP, + 256, + -1, + true, + FuzzySuggester.DEFAULT_MAX_EDITS, + FuzzySuggester.DEFAULT_TRANSPOSITIONS, + 0, + FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, + true); suggester.build(new InputArrayIterator(keys)); int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { String addRandomEdit = addRandomEdit("фуу бар буу", 0); - List results = suggester.lookup(TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2); + List results = + suggester.lookup(TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2); assertEquals(addRandomEdit, 1, results.size()); assertEquals("фуу бар буу фар", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -101,60 +128,62 @@ public class FuzzySuggesterTest extends LuceneTestCase { /** this is basically the WFST test ported to KeywordAnalyzer. so it acts the same */ public void testKeyword() throws Exception { - Input keys[] = new Input[] { - new Input("foo", 50), - new Input("bar", 10), - new Input("barbar", 12), - new Input("barbara", 6) - }; - + Input keys[] = + new Input[] { + new Input("foo", 50), + new Input("bar", 10), + new Input("barbar", 12), + new Input("barbara", 6) + }; + Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy",analyzer); + FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy", analyzer); suggester.build(new InputArrayIterator(keys)); - - List results = suggester.lookup(TestUtil.stringToCharSequence("bariar", random()), false, 2); + + List results = + suggester.lookup(TestUtil.stringToCharSequence("bariar", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); - + results = suggester.lookup(TestUtil.stringToCharSequence("barbr", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); - + results = suggester.lookup(TestUtil.stringToCharSequence("barbara", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbara", results.get(0).key.toString()); assertEquals(6, results.get(0).value, 0.01F); - + results = suggester.lookup(TestUtil.stringToCharSequence("barbar", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); assertEquals("barbara", results.get(1).key.toString()); assertEquals(6, results.get(1).value, 0.01F); - + results = suggester.lookup(TestUtil.stringToCharSequence("barbaa", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); assertEquals("barbara", results.get(1).key.toString()); assertEquals(6, results.get(1).value, 0.01F); - + // top N of 2, but only foo is available results = suggester.lookup(TestUtil.stringToCharSequence("f", random()), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); - + // top N of 1 for 'bar': we return this even though // barbar is higher because exactFirst is enabled: results = suggester.lookup(TestUtil.stringToCharSequence("bar", random()), false, 1); assertEquals(1, results.size()); assertEquals("bar", results.get(0).key.toString()); assertEquals(10, results.get(0).value, 0.01F); - + // top N Of 2 for 'b' results = suggester.lookup(TestUtil.stringToCharSequence("b", random()), false, 2); assertEquals(2, results.size()); @@ -162,7 +191,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { assertEquals(12, results.get(0).value, 0.01F); assertEquals("bar", results.get(1).key.toString()); assertEquals(10, results.get(1).value, 0.01F); - + // top N of 3 for 'ba' results = suggester.lookup(TestUtil.stringToCharSequence("ba", random()), false, 3); assertEquals(3, results.size()); @@ -172,25 +201,39 @@ public class FuzzySuggesterTest extends LuceneTestCase { assertEquals(10, results.get(1).value, 0.01F); assertEquals("barbara", results.get(2).key.toString()); assertEquals(6, results.get(2).value, 0.01F); - + IOUtils.close(analyzer, tempDir); } - - /** - * basic "standardanalyzer" test with stopword removal - */ + + /** basic "standardanalyzer" test with stopword removal */ public void testStandard() throws Exception { - Input keys[] = new Input[] { - new Input("the ghost of christmas past", 50), - }; - - Analyzer standard = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); + Input keys[] = + new Input[] { + new Input("the ghost of christmas past", 50), + }; + + Analyzer standard = + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy", standard, standard, AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, 256, -1, false, FuzzySuggester.DEFAULT_MAX_EDITS, FuzzySuggester.DEFAULT_TRANSPOSITIONS, - FuzzySuggester.DEFAULT_NON_FUZZY_PREFIX, FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, FuzzySuggester.DEFAULT_UNICODE_AWARE); + FuzzySuggester suggester = + new FuzzySuggester( + tempDir, + "fuzzy", + standard, + standard, + AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, + 256, + -1, + false, + FuzzySuggester.DEFAULT_MAX_EDITS, + FuzzySuggester.DEFAULT_TRANSPOSITIONS, + FuzzySuggester.DEFAULT_NON_FUZZY_PREFIX, + FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, + FuzzySuggester.DEFAULT_UNICODE_AWARE); suggester.build(new InputArrayIterator(keys)); - - List results = suggester.lookup(TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1); + + List results = + suggester.lookup(TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1); assertEquals(1, results.size()); assertEquals("the ghost of christmas past", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); @@ -206,27 +249,29 @@ public class FuzzySuggesterTest extends LuceneTestCase { assertEquals(1, results.size()); assertEquals("the ghost of christmas past", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); - + IOUtils.close(standard, tempDir); } public void testNoSeps() throws Exception { - Input[] keys = new Input[] { - new Input("ab cd", 0), - new Input("abcd", 1), - }; + Input[] keys = + new Input[] { + new Input("ab cd", 0), new Input("abcd", 1), + }; int options = 0; Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy",a, a, options, 256, -1, true, 1, true, 1, 3, false); + FuzzySuggester suggester = + new FuzzySuggester(tempDir, "fuzzy", a, a, options, 256, -1, true, 1, true, 1, 3, false); suggester.build(new InputArrayIterator(keys)); // TODO: would be nice if "ab " would allow the test to // pass, and more generally if the analyzer can know - // that the user's current query has ended at a word, + // that the user's current query has ended at a word, // but, analyzers don't produce SEP tokens! - List r = suggester.lookup(TestUtil.stringToCharSequence("ab c", random()), false, 2); + List r = + suggester.lookup(TestUtil.stringToCharSequence("ab c", random()), false, 2); assertEquals(2, r.size()); // With no PRESERVE_SEPS specified, "ab c" should also @@ -238,33 +283,32 @@ public class FuzzySuggesterTest extends LuceneTestCase { public void testGraphDups() throws Exception { - final Analyzer analyzer = new AnalyzingSuggesterTest.MultiCannedAnalyzer( - new CannedTokenStream( - token("wifi", 1, 1), - token("hotspot", 0, 2), - token("network", 1, 1), - token("is", 1, 1), - token("slow", 1, 1)), - new CannedTokenStream( - token("wi", 1, 1), - token("hotspot", 0, 3), - token("fi", 1, 1), - token("network", 1, 1), - token("is", 1, 1), - token("fast", 1, 1)), - new CannedTokenStream( - token("wifi", 1, 1), - token("hotspot",0,2), - token("network",1,1))); + final Analyzer analyzer = + new AnalyzingSuggesterTest.MultiCannedAnalyzer( + new CannedTokenStream( + token("wifi", 1, 1), + token("hotspot", 0, 2), + token("network", 1, 1), + token("is", 1, 1), + token("slow", 1, 1)), + new CannedTokenStream( + token("wi", 1, 1), + token("hotspot", 0, 3), + token("fi", 1, 1), + token("network", 1, 1), + token("is", 1, 1), + token("fast", 1, 1)), + new CannedTokenStream( + token("wifi", 1, 1), token("hotspot", 0, 2), token("network", 1, 1))); - Input keys[] = new Input[] { - new Input("wifi network is slow", 50), - new Input("wi fi network is fast", 10), - }; + Input keys[] = + new Input[] { + new Input("wifi network is slow", 50), new Input("wi fi network is fast", 10), + }; Directory tempDir = getDirectory(); FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy", analyzer); suggester.build(new InputArrayIterator(keys)); - + List results = suggester.lookup("wifi network", false, 10); if (VERBOSE) { System.out.println("Results: " + results); @@ -295,26 +339,19 @@ public class FuzzySuggesterTest extends LuceneTestCase { // final SynonymMap map = b.build(); // The Analyzer below mimics the functionality of the SynonymAnalyzer - // using the above map, so that the suggest module does not need a dependency on the - // synonym module + // using the above map, so that the suggest module does not need a dependency on the + // synonym module - final Analyzer analyzer = new AnalyzingSuggesterTest.MultiCannedAnalyzer( - new CannedTokenStream( - token("ab", 1, 1), - token("ba", 0, 1), - token("xc", 1, 1)), - new CannedTokenStream( - token("ba", 1, 1), - token("xd", 1, 1)), - new CannedTokenStream( - token("ab", 1, 1), - token("ba", 0, 1), - token("x", 1, 1))); + final Analyzer analyzer = + new AnalyzingSuggesterTest.MultiCannedAnalyzer( + new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("xc", 1, 1)), + new CannedTokenStream(token("ba", 1, 1), token("xd", 1, 1)), + new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("x", 1, 1))); - Input keys[] = new Input[] { - new Input("ab xc", 50), - new Input("ba xd", 50), - }; + Input keys[] = + new Input[] { + new Input("ab xc", 50), new Input("ba xd", 50), + }; Directory tempDir = getDirectory(); FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy", analyzer); suggester.build(new InputArrayIterator(keys)); @@ -338,14 +375,14 @@ public class FuzzySuggesterTest extends LuceneTestCase { final TermToBytesRefAttribute termBytesAtt = ts.addAttribute(TermToBytesRefAttribute.class); final PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class); final PositionLengthAttribute posLengthAtt = ts.addAttribute(PositionLengthAttribute.class); - + while(ts.incrementToken()) { termBytesAtt.fillBytesRef(); - System.out.println(String.format("%s,%s,%s", termBytesAtt.getBytesRef().utf8ToString(), posIncAtt.getPositionIncrement(), posLengthAtt.getPositionLength())); + System.out.println(String.format("%s,%s,%s", termBytesAtt.getBytesRef().utf8ToString(), posIncAtt.getPositionIncrement(), posLengthAtt.getPositionLength())); } ts.end(); ts.close(); - } + } */ private Analyzer getUnusualAnalyzer() { @@ -363,19 +400,32 @@ public class FuzzySuggesterTest extends LuceneTestCase { Analyzer a = getUnusualAnalyzer(); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy", a, a, AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, 256, -1, true, 1, true, 1, 3, false); - suggester.build(new InputArrayIterator(new Input[] { - new Input("x y", 1), - new Input("x y z", 3), - new Input("x", 2), - new Input("z z z", 20), - })); + FuzzySuggester suggester = + new FuzzySuggester( + tempDir, + "fuzzy", + a, + a, + AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, + 256, + -1, + true, + 1, + true, + 1, + 3, + false); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("x y", 1), new Input("x y z", 3), new Input("x", 2), new Input("z z z", 20), + })); - //System.out.println("ALL: " + suggester.lookup("x y", false, 6)); + // System.out.println("ALL: " + suggester.lookup("x y", false, 6)); - for(int topN=1;topN<6;topN++) { + for (int topN = 1; topN < 6; topN++) { List results = suggester.lookup("x y", false, topN); - //System.out.println("topN=" + topN + " " + results); + // System.out.println("topN=" + topN + " " + results); assertEquals(Math.min(topN, 4), results.size()); @@ -404,16 +454,29 @@ public class FuzzySuggesterTest extends LuceneTestCase { Analyzer a = getUnusualAnalyzer(); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy", a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, true, 1, true, 1, 3, false); + FuzzySuggester suggester = + new FuzzySuggester( + tempDir, + "fuzzy", + a, + a, + AnalyzingSuggester.PRESERVE_SEP, + 256, + -1, + true, + 1, + true, + 1, + 3, + false); - suggester.build(new InputArrayIterator(new Input[] { - new Input("x y", 1), - new Input("x y z", 3), - new Input("x", 2), - new Input("z z z", 20), - })); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("x y", 1), new Input("x y z", 3), new Input("x", 2), new Input("z z z", 20), + })); - for(int topN=1;topN<6;topN++) { + for (int topN = 1; topN < 6; topN++) { List results = suggester.lookup("p", false, topN); assertEquals(Math.min(topN, 4), results.size()); @@ -428,7 +491,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { if (topN > 2) { assertEquals("x", results.get(2).key); assertEquals(2, results.get(2).value); - + if (topN > 3) { assertEquals("x y", results.get(3).key); assertEquals(1, results.get(3).value); @@ -438,7 +501,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { } IOUtils.close(a, tempDir); } - + // Holds surface form separately: private static class TermFreqPayload2 implements Comparable { public final String surfaceForm; @@ -468,13 +531,14 @@ public class FuzzySuggesterTest extends LuceneTestCase { } static boolean isStopChar(char ch, int numStopChars) { - //System.out.println("IS? " + ch + ": " + (ch - 'a') + ": " + ((ch - 'a') < numStopChars)); + // System.out.println("IS? " + ch + ": " + (ch - 'a') + ": " + ((ch - 'a') < numStopChars)); return (ch - 'a') < numStopChars; } // Like StopFilter: private static class TokenEater extends TokenFilter { - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncrAtt = + addAttribute(PositionIncrementAttribute.class); private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final int numStopChars; private final boolean preserveHoles; @@ -506,7 +570,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { first = false; } posIncrAtt.setPositionIncrement(posInc); - //System.out.println("RETURN term=" + termAtt + " numStopChars=" + numStopChars); + // System.out.println("RETURN term=" + termAtt + " numStopChars=" + numStopChars); return true; } if (preserveHoles) { @@ -529,7 +593,9 @@ public class FuzzySuggesterTest extends LuceneTestCase { @Override public TokenStreamComponents createComponents(String fieldName) { - MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false, MockTokenizer.DEFAULT_MAX_TOKEN_LENGTH); + MockTokenizer tokenizer = + new MockTokenizer( + MockTokenizer.WHITESPACE, false, MockTokenizer.DEFAULT_MAX_TOKEN_LENGTH); tokenizer.setEnableChecks(true); TokenStream next; if (numStopChars != 0) { @@ -545,11 +611,11 @@ public class FuzzySuggesterTest extends LuceneTestCase { public void testRandom() throws Exception { int numQueries = atLeast(20); - + final List slowCompletor = new ArrayList<>(); final TreeSet allPrefixes = new TreeSet<>(); final Set seen = new HashSet<>(); - + Input[] keys = new Input[numQueries]; boolean preserveSep = random().nextBoolean(); @@ -559,18 +625,28 @@ public class FuzzySuggesterTest extends LuceneTestCase { final boolean preserveHoles = random().nextBoolean(); if (VERBOSE) { - System.out.println("TEST: " + numQueries + " words; preserveSep=" + preserveSep + " ; unicodeAware=" + unicodeAware + " numStopChars=" + numStopChars + " preserveHoles=" + preserveHoles); + System.out.println( + "TEST: " + + numQueries + + " words; preserveSep=" + + preserveSep + + " ; unicodeAware=" + + unicodeAware + + " numStopChars=" + + numStopChars + + " preserveHoles=" + + preserveHoles); } - + for (int i = 0; i < numQueries; i++) { int numTokens = TestUtil.nextInt(random(), 1, 4); String key; String analyzedKey; - while(true) { + while (true) { key = ""; analyzedKey = ""; boolean lastRemoved = false; - for(int token=0;token < numTokens;token++) { + for (int token = 0; token < numTokens; token++) { String s; while (true) { // TODO: would be nice to fix this slowCompletor/comparator to @@ -580,7 +656,13 @@ public class FuzzySuggesterTest extends LuceneTestCase { if (token > 0) { key += " "; } - if (preserveSep && analyzedKey.length() > 0 && (unicodeAware ? analyzedKey.codePointAt(analyzedKey.codePointCount(0, analyzedKey.length())-1) != ' ' : analyzedKey.charAt(analyzedKey.length()-1) != ' ')) { + if (preserveSep + && analyzedKey.length() > 0 + && (unicodeAware + ? analyzedKey.codePointAt( + analyzedKey.codePointCount(0, analyzedKey.length()) - 1) + != ' ' + : analyzedKey.charAt(analyzedKey.length() - 1) != ' ')) { analyzedKey += " "; } key += s; @@ -615,7 +697,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { allPrefixes.add(key.substring(0, j)); } // we can probably do Integer.MAX_VALUE here, but why worry. - int weight = random().nextInt(1<<24); + int weight = random().nextInt(1 << 24); keys[i] = new Input(key, weight); slowCompletor.add(new TermFreqPayload2(key, analyzedKey, weight)); @@ -626,15 +708,34 @@ public class FuzzySuggesterTest extends LuceneTestCase { // altering the test: List sorted = new ArrayList<>(slowCompletor); Collections.sort(sorted); - for(TermFreqPayload2 ent : sorted) { - System.out.println(" surface='" + ent.surfaceForm + " analyzed='" + ent.analyzedForm + "' weight=" + ent.weight); + for (TermFreqPayload2 ent : sorted) { + System.out.println( + " surface='" + + ent.surfaceForm + + " analyzed='" + + ent.analyzedForm + + "' weight=" + + ent.weight); } } Analyzer a = new MockTokenEatingAnalyzer(numStopChars, preserveHoles); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy",a, a, - preserveSep ? AnalyzingSuggester.PRESERVE_SEP : 0, 256, -1, true, 1, false, 1, 3, unicodeAware); + FuzzySuggester suggester = + new FuzzySuggester( + tempDir, + "fuzzy", + a, + a, + preserveSep ? AnalyzingSuggester.PRESERVE_SEP : 0, + 256, + -1, + true, + 1, + false, + 1, + 3, + unicodeAware); suggester.build(new InputArrayIterator(keys)); for (String prefix : allPrefixes) { @@ -644,7 +745,8 @@ public class FuzzySuggesterTest extends LuceneTestCase { } final int topN = TestUtil.nextInt(random(), 1, 10); - List r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN); + List r = + suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN); // 2. go thru whole set to find suggestions: List matches = new ArrayList<>(); @@ -653,7 +755,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { String[] tokens = prefix.split(" "); StringBuilder builder = new StringBuilder(); boolean lastRemoved = false; - for(int i=0;i 0 && !builder.toString().endsWith(" ")) { builder.append(' '); @@ -703,7 +805,9 @@ public class FuzzySuggesterTest extends LuceneTestCase { // us the "answer key" (ie maybe we have a bug in // suggester.toLevA ...) ... but testRandom2() fixes // this: - Automaton automaton = suggester.convertAutomaton(suggester.toLevenshteinAutomata(suggester.toLookupAutomaton(analyzedKey))); + Automaton automaton = + suggester.convertAutomaton( + suggester.toLevenshteinAutomata(suggester.toLookupAutomaton(analyzedKey))); assertTrue(automaton.isDeterministic()); // TODO: could be faster... but it's slowCompletor for a reason @@ -712,7 +816,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { spare.copyChars(e.analyzedForm); FiniteStringsIterator finiteStrings = new FiniteStringsIterator(suggester.toAutomaton(spare.get(), tokenStreamToAutomaton)); - for (IntsRef string; (string = finiteStrings.next()) != null;) { + for (IntsRef string; (string = finiteStrings.next()) != null; ) { int p = 0; BytesRef ref = Util.toBytesRef(string, spare); boolean added = false; @@ -729,24 +833,26 @@ public class FuzzySuggesterTest extends LuceneTestCase { } if (!added && automaton.isAccept(p)) { matches.add(new LookupResult(e.surfaceForm, e.weight)); - } + } } } assertTrue(numStopChars > 0 || matches.size() > 0); if (matches.size() > 1) { - Collections.sort(matches, new Comparator() { - @Override - public int compare(LookupResult left, LookupResult right) { - int cmp = Float.compare(right.value, left.value); - if (cmp == 0) { - return left.compareTo(right); - } else { - return cmp; + Collections.sort( + matches, + new Comparator() { + @Override + public int compare(LookupResult left, LookupResult right) { + int cmp = Float.compare(right.value, left.value); + if (cmp == 0) { + return left.compareTo(right); + } else { + return cmp; + } } - } - }); + }); } if (matches.size() > topN) { @@ -755,20 +861,21 @@ public class FuzzySuggesterTest extends LuceneTestCase { if (VERBOSE) { System.out.println(" expected:"); - for(LookupResult lr : matches) { + for (LookupResult lr : matches) { System.out.println(" key=" + lr.key + " weight=" + lr.value); } System.out.println(" actual:"); - for(LookupResult lr : r) { + for (LookupResult lr : r) { System.out.println(" key=" + lr.key + " weight=" + lr.value); } } - + assertEquals(prefix + " " + topN, matches.size(), r.size()); - for(int hit=0;hit keys = Arrays.asList(new Input[] { - new Input("a", 40), - new Input("a ", 50), - new Input(" a", 60), - }); + List keys = + Arrays.asList( + new Input[] { + new Input("a", 40), new Input("a ", 50), new Input(" a", 60), + }); Collections.shuffle(keys, random()); suggester.build(new InputArrayIterator(keys)); @@ -801,14 +909,18 @@ public class FuzzySuggesterTest extends LuceneTestCase { public void testEditSeps() throws Exception { Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); - FuzzySuggester suggester = new FuzzySuggester(tempDir, "fuzzy", a, a, FuzzySuggester.PRESERVE_SEP, 2, -1, true, 2, true, 1, 3, false); + FuzzySuggester suggester = + new FuzzySuggester( + tempDir, "fuzzy", a, a, FuzzySuggester.PRESERVE_SEP, 2, -1, true, 2, true, 1, 3, false); - List keys = Arrays.asList(new Input[] { - new Input("foo bar", 40), - new Input("foo bar baz", 50), - new Input("barbaz", 60), - new Input("barbazfoo", 10), - }); + List keys = + Arrays.asList( + new Input[] { + new Input("foo bar", 40), + new Input("foo bar baz", 50), + new Input("barbaz", 60), + new Input("barbazfoo", 10), + }); Collections.shuffle(keys, random()); suggester.build(new InputArrayIterator(keys)); @@ -819,20 +931,20 @@ public class FuzzySuggesterTest extends LuceneTestCase { assertEquals("[barbazfoo/10]", suggester.lookup("bar baz foo", false, 5).toString()); IOUtils.close(a, tempDir); } - + @SuppressWarnings("fallthrough") private static String addRandomEdit(String string, int prefixLength) { char[] input = string.toCharArray(); StringBuilder builder = new StringBuilder(); for (int i = 0; i < input.length; i++) { - if (i >= prefixLength && random().nextBoolean() && i < input.length-1) { - switch(random().nextInt(4)) { + if (i >= prefixLength && random().nextBoolean() && i < input.length - 1) { + switch (random().nextInt(4)) { case 3: - if (i < input.length-1) { + if (i < input.length - 1) { // Transpose input[i] and input[1+i]: - builder.append(input[i+1]); + builder.append(input[i + 1]); builder.append(input[i]); - for(int j=i+2;j answers = new ArrayList<>(); final Set seen = new HashSet<>(); - for(int i=0;i() { - @Override - public int compare(Input a, Input b) { - return a.term.compareTo(b.term); - } - }); + Collections.sort( + answers, + new Comparator() { + @Override + public int compare(Input a, Input b) { + return a.term.compareTo(b.term); + } + }); if (VERBOSE) { System.out.println("\nTEST: targets"); - for(Input tf : answers) { + for (Input tf : answers) { System.out.println(" " + tf.term.utf8ToString() + " freq=" + tf.v); } } @@ -917,32 +1031,55 @@ public class FuzzySuggesterTest extends LuceneTestCase { // TODO: test graph analyzers // TODO: test exactFirst / preserveSep permutations Directory tempDir = getDirectory(); - FuzzySuggester suggest = new FuzzySuggester(tempDir, "fuzzy", a, a, 0, 256, -1, true, maxEdits, transpositions, prefixLen, prefixLen, false); + FuzzySuggester suggest = + new FuzzySuggester( + tempDir, + "fuzzy", + a, + a, + 0, + 256, + -1, + true, + maxEdits, + transpositions, + prefixLen, + prefixLen, + false); if (VERBOSE) { - System.out.println("TEST: maxEdits=" + maxEdits + " prefixLen=" + prefixLen + " transpositions=" + transpositions + " num=" + NUM); + System.out.println( + "TEST: maxEdits=" + + maxEdits + + " prefixLen=" + + prefixLen + + " transpositions=" + + transpositions + + " num=" + + NUM); } Collections.shuffle(answers, random()); suggest.build(new InputArrayIterator(answers.toArray(new Input[answers.size()]))); final int ITERS = atLeast(100); - for(int iter=0;iter actual = suggest.lookup(frag, false, NUM); if (VERBOSE) { System.out.println(" actual: " + actual.size()); - for(LookupResult c : actual) { + for (LookupResult c : actual) { System.out.println(" " + c); } } @@ -950,13 +1087,13 @@ public class FuzzySuggesterTest extends LuceneTestCase { Collections.sort(actual, new CompareByCostThenAlpha()); final int limit = Math.min(expected.size(), actual.size()); - for(int ans=0;ans slowFuzzyMatch(int prefixLen, int maxEdits, boolean allowTransposition, List answers, String frag) { + private List slowFuzzyMatch( + int prefixLen, int maxEdits, boolean allowTransposition, List answers, String frag) { final List results = new ArrayList<>(); final int fragLen = frag.length(); - for(Input tf : answers) { - //System.out.println(" check s=" + tf.term.utf8ToString()); + for (Input tf : answers) { + // System.out.println(" check s=" + tf.term.utf8ToString()); boolean prefixMatches = true; - for(int i=0;i 1 && j > 1 && targetPoints.ints[i-1] == otherPoints.ints[j-2] && targetPoints.ints[i-2] == otherPoints.ints[j-1]) { - d[i][j] = Math.min(d[i][j], d[i-2][j-2] + cost); + if (allowTransposition + && i > 1 + && j > 1 + && targetPoints.ints[i - 1] == otherPoints.ints[j - 2] + && targetPoints.ints[i - 2] == otherPoints.ints[j - 1]) { + d[i][j] = Math.min(d[i][j], d[i - 2][j - 2] + cost); } } } - + return d[n][m]; } - + private static IntsRef toIntsRef(String s) { IntsRef ref = new IntsRef(s.length()); // worst case int utf16Len = s.length(); @@ -1134,7 +1276,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { return ref; } - private Directory getDirectory() { + private Directory getDirectory() { return newDirectory(); } } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java index 530a4c3ce3e..e6f64dec2f8 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java @@ -30,7 +30,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.MockAnalyzer; @@ -51,33 +50,30 @@ import org.junit.Ignore; public class TestFreeTextSuggester extends LuceneTestCase { public void testBasic() throws Exception { - Iterable keys = AnalyzingSuggesterTest.shuffle( - new Input("foo bar baz blah", 50), - new Input("boo foo bar foo bee", 20) - ); + Iterable keys = + AnalyzingSuggesterTest.shuffle( + new Input("foo bar baz blah", 50), new Input("boo foo bar foo bee", 20)); Analyzer a = new MockAnalyzer(random()); FreeTextSuggester sug = new FreeTextSuggester(a, a, 2, (byte) 0x20); sug.build(new InputArrayIterator(keys)); assertEquals(2, sug.getCount()); - for(int i=0;i<2;i++) { + for (int i = 0; i < 2; i++) { // Uses bigram model and unigram backoff: - assertEquals("foo bar/0.67 foo bee/0.33 baz/0.04 blah/0.04 boo/0.04", - toString(sug.lookup("foo b", 10))); + assertEquals( + "foo bar/0.67 foo bee/0.33 baz/0.04 blah/0.04 boo/0.04", + toString(sug.lookup("foo b", 10))); // Uses only bigram model: - assertEquals("foo bar/0.67 foo bee/0.33", - toString(sug.lookup("foo ", 10))); + assertEquals("foo bar/0.67 foo bee/0.33", toString(sug.lookup("foo ", 10))); // Uses only unigram model: - assertEquals("foo/0.33", - toString(sug.lookup("foo", 10))); + assertEquals("foo/0.33", toString(sug.lookup("foo", 10))); // Uses only unigram model: - assertEquals("bar/0.22 baz/0.11 bee/0.11 blah/0.11 boo/0.11", - toString(sug.lookup("b", 10))); + assertEquals("bar/0.22 baz/0.11 bee/0.11 blah/0.11 boo/0.11", toString(sug.lookup("b", 10))); // Try again after save/load: Path tmpDir = createTempDir("FreeTextSuggesterTest"); @@ -100,14 +96,14 @@ public class TestFreeTextSuggester extends LuceneTestCase { public void testIllegalByteDuringBuild() throws Exception { // Default separator is INFORMATION SEPARATOR TWO // (0x1e), so no input token is allowed to contain it - Iterable keys = AnalyzingSuggesterTest.shuffle( - new Input("foo\u001ebar baz", 50) - ); + Iterable keys = AnalyzingSuggesterTest.shuffle(new Input("foo\u001ebar baz", 50)); Analyzer analyzer = new MockAnalyzer(random()); FreeTextSuggester sug = new FreeTextSuggester(analyzer); - expectThrows(IllegalArgumentException.class, () -> { - sug.build(new InputArrayIterator(keys)); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + sug.build(new InputArrayIterator(keys)); + }); analyzer.close(); } @@ -115,79 +111,81 @@ public class TestFreeTextSuggester extends LuceneTestCase { public void testIllegalByteDuringQuery() throws Exception { // Default separator is INFORMATION SEPARATOR TWO // (0x1e), so no input token is allowed to contain it - Iterable keys = AnalyzingSuggesterTest.shuffle( - new Input("foo bar baz", 50) - ); + Iterable keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar baz", 50)); Analyzer analyzer = new MockAnalyzer(random()); FreeTextSuggester sug = new FreeTextSuggester(analyzer); sug.build(new InputArrayIterator(keys)); - expectThrows(IllegalArgumentException.class, () -> { - sug.lookup("foo\u001eb", 10); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + sug.lookup("foo\u001eb", 10); + }); analyzer.close(); } @Ignore public void testWiki() throws Exception { - final LineFileDocs lfd = new LineFileDocs(null, "/lucenedata/enwiki/enwiki-20120502-lines-1k.txt"); + final LineFileDocs lfd = + new LineFileDocs(null, "/lucenedata/enwiki/enwiki-20120502-lines-1k.txt"); // Skip header: lfd.nextDoc(); Analyzer analyzer = new MockAnalyzer(random()); FreeTextSuggester sug = new FreeTextSuggester(analyzer); - sug.build(new InputIterator() { + sug.build( + new InputIterator() { - private int count; + private int count; - @Override - public long weight() { - return 1; - } - - @Override - public BytesRef next() { - Document doc; - try { - doc = lfd.nextDoc(); - } catch (IOException ioe) { - throw new RuntimeException(ioe); + @Override + public long weight() { + return 1; } - if (doc == null) { + + @Override + public BytesRef next() { + Document doc; + try { + doc = lfd.nextDoc(); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + if (doc == null) { + return null; + } + if (count++ == 10000) { + return null; + } + return new BytesRef(doc.get("body")); + } + + @Override + public BytesRef payload() { return null; } - if (count++ == 10000) { + + @Override + public boolean hasPayloads() { + return false; + } + + @Override + public Set contexts() { return null; } - return new BytesRef(doc.get("body")); - } - @Override - public BytesRef payload() { - return null; - } - - @Override - public boolean hasPayloads() { - return false; - } - - @Override - public Set contexts() { - return null; - } - - @Override - public boolean hasContexts() { - return false; - } - }); + @Override + public boolean hasContexts() { + return false; + } + }); if (VERBOSE) { System.out.println(sug.ramBytesUsed() + " bytes"); List results = sug.lookup("general r", 10); System.out.println("results:"); - for(LookupResult result : results) { + for (LookupResult result : results) { System.out.println(" " + result); } } @@ -197,43 +195,38 @@ public class TestFreeTextSuggester extends LuceneTestCase { // Make sure you can suggest based only on unigram model: public void testUnigrams() throws Exception { - Iterable keys = AnalyzingSuggesterTest.shuffle( - new Input("foo bar baz blah boo foo bar foo bee", 50) - ); + Iterable keys = + AnalyzingSuggesterTest.shuffle(new Input("foo bar baz blah boo foo bar foo bee", 50)); Analyzer a = new MockAnalyzer(random()); FreeTextSuggester sug = new FreeTextSuggester(a, a, 1, (byte) 0x20); sug.build(new InputArrayIterator(keys)); // Sorts first by count, descending, second by term, ascending - assertEquals("bar/0.22 baz/0.11 bee/0.11 blah/0.11 boo/0.11", - toString(sug.lookup("b", 10))); + assertEquals("bar/0.22 baz/0.11 bee/0.11 blah/0.11 boo/0.11", toString(sug.lookup("b", 10))); a.close(); } // Make sure the last token is not duplicated public void testNoDupsAcrossGrams() throws Exception { - Iterable keys = AnalyzingSuggesterTest.shuffle( - new Input("foo bar bar bar bar", 50) - ); + Iterable keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar bar bar bar", 50)); Analyzer a = new MockAnalyzer(random()); FreeTextSuggester sug = new FreeTextSuggester(a, a, 2, (byte) 0x20); sug.build(new InputArrayIterator(keys)); - assertEquals("foo bar/1.00", - toString(sug.lookup("foo b", 10))); + assertEquals("foo bar/1.00", toString(sug.lookup("foo b", 10))); a.close(); } // Lookup of just empty string produces unicode only matches: public void testEmptyString() throws Exception { - Iterable keys = AnalyzingSuggesterTest.shuffle( - new Input("foo bar bar bar bar", 50) - ); + Iterable keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar bar bar bar", 50)); Analyzer a = new MockAnalyzer(random()); FreeTextSuggester sug = new FreeTextSuggester(a, a, 2, (byte) 0x20); sug.build(new InputArrayIterator(keys)); - expectThrows(IllegalArgumentException.class, () -> { - sug.lookup("", 10); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + sug.lookup("", 10); + }); a.close(); } @@ -242,27 +235,24 @@ public class TestFreeTextSuggester extends LuceneTestCase { // we should properly predict from that: public void testEndingHole() throws Exception { // Just deletes "of" - Analyzer a = new Analyzer() { - @Override - public TokenStreamComponents createComponents(String field) { - Tokenizer tokenizer = new MockTokenizer(); - CharArraySet stopSet = StopFilter.makeStopSet("of"); - return new TokenStreamComponents(tokenizer, new StopFilter(tokenizer, stopSet)); - } - }; + Analyzer a = + new Analyzer() { + @Override + public TokenStreamComponents createComponents(String field) { + Tokenizer tokenizer = new MockTokenizer(); + CharArraySet stopSet = StopFilter.makeStopSet("of"); + return new TokenStreamComponents(tokenizer, new StopFilter(tokenizer, stopSet)); + } + }; - Iterable keys = AnalyzingSuggesterTest.shuffle( - new Input("wizard of oz", 50) - ); + Iterable keys = AnalyzingSuggesterTest.shuffle(new Input("wizard of oz", 50)); FreeTextSuggester sug = new FreeTextSuggester(a, a, 3, (byte) 0x20); sug.build(new InputArrayIterator(keys)); - assertEquals("wizard _ oz/1.00", - toString(sug.lookup("wizard of", 10))); + assertEquals("wizard _ oz/1.00", toString(sug.lookup("wizard of", 10))); // Falls back to unigram model, with backoff 0.4 times // prop 0.5: - assertEquals("oz/0.20", - toString(sug.lookup("wizard o", 10))); + assertEquals("oz/0.20", toString(sug.lookup("wizard o", 10))); a.close(); } @@ -271,38 +261,37 @@ public class TestFreeTextSuggester extends LuceneTestCase { // does not produce e.g. a hole only "_ _" token: public void testTwoEndingHoles() throws Exception { // Just deletes "of" - Analyzer a = new Analyzer() { - @Override - public TokenStreamComponents createComponents(String field) { - Tokenizer tokenizer = new MockTokenizer(); - CharArraySet stopSet = StopFilter.makeStopSet("of"); - return new TokenStreamComponents(tokenizer, new StopFilter(tokenizer, stopSet)); - } - }; + Analyzer a = + new Analyzer() { + @Override + public TokenStreamComponents createComponents(String field) { + Tokenizer tokenizer = new MockTokenizer(); + CharArraySet stopSet = StopFilter.makeStopSet("of"); + return new TokenStreamComponents(tokenizer, new StopFilter(tokenizer, stopSet)); + } + }; - Iterable keys = AnalyzingSuggesterTest.shuffle( - new Input("wizard of of oz", 50) - ); + Iterable keys = AnalyzingSuggesterTest.shuffle(new Input("wizard of of oz", 50)); FreeTextSuggester sug = new FreeTextSuggester(a, a, 3, (byte) 0x20); sug.build(new InputArrayIterator(keys)); - assertEquals("", - toString(sug.lookup("wizard of of", 10))); + assertEquals("", toString(sug.lookup("wizard of of", 10))); a.close(); } - private static Comparator byScoreThenKey = new Comparator() { - @Override - public int compare(LookupResult a, LookupResult b) { - if (a.value > b.value) { - return -1; - } else if (a.value < b.value) { - return 1; - } else { - // Tie break by UTF16 sort order: - return ((String) a.key).compareTo((String) b.key); - } - } - }; + private static Comparator byScoreThenKey = + new Comparator() { + @Override + public int compare(LookupResult a, LookupResult b) { + if (a.value > b.value) { + return -1; + } else if (a.value < b.value) { + return 1; + } else { + // Tie break by UTF16 sort order: + return ((String) a.key).compareTo((String) b.key); + } + } + }; public void testRandom() throws IOException { String[] terms = new String[TestUtil.nextInt(random(), 2, 10)]; @@ -320,12 +309,12 @@ public class TestFreeTextSuggester extends LuceneTestCase { int numDocs = atLeast(10); long totTokens = 0; final String[][] docs = new String[numDocs][]; - for(int i=0;i contexts() { - return null; - } + @Override + public Set contexts() { + return null; + } - @Override - public boolean hasContexts() { - return false; - } - }); + @Override + public boolean hasContexts() { + return false; + } + }); // Build inefficient but hopefully correct model: - List> gramCounts = new ArrayList<>(grams); - for(int gram=0;gram> gramCounts = new ArrayList<>(grams); + for (int gram = 0; gram < grams; gram++) { if (VERBOSE) { System.out.println("TEST: build model for gram=" + gram); } - Map model = new HashMap<>(); + Map model = new HashMap<>(); gramCounts.add(model); - for(String[] doc : docs) { - for(int i=0;i i) { b.append(' '); } @@ -421,9 +412,9 @@ public class TestFreeTextSuggester extends LuceneTestCase { } int lookups = atLeast(100); - for(int iter=0;iter=0;i--) { + for (int i = grams - 1; i >= 0; i--) { if (VERBOSE) { System.out.println(" grams=" + i); } - if (tokens.length < i+1) { + if (tokens.length < i + 1) { // Don't have enough tokens to use this model if (VERBOSE) { System.out.println(" skip"); @@ -472,7 +463,7 @@ public class TestFreeTextSuggester extends LuceneTestCase { continue; } - if (i == 0 && tokens[tokens.length-1].length() == 0) { + if (i == 0 && tokens[tokens.length - 1].length() == 0) { // Never suggest unigrams from empty string: if (VERBOSE) { System.out.println(" skip unigram priors only"); @@ -482,7 +473,7 @@ public class TestFreeTextSuggester extends LuceneTestCase { // Build up "context" ngram: b = new StringBuilder(); - for(int j=tokens.length-i-1;j { - createFactory("bogusArg", "bogusValue"); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + createFactory("bogusArg", "bogusValue"); + }); assertTrue(expected.getMessage().contains("Unknown parameters")); } /** Test that bogus arguments result in exception */ public void testBogusFormats() throws Exception { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - createFactory("words", "stop-snowball.txt", - "format", "bogus"); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + createFactory("words", "stop-snowball.txt", "format", "bogus"); + }); String msg = expected.getMessage(); assertTrue(msg, msg.contains("Unknown")); assertTrue(msg, msg.contains("format")); assertTrue(msg, msg.contains("bogus")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - createFactory( - // implicit default words file - "format", "bogus"); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + createFactory( + // implicit default words file + "format", "bogus"); + }); msg = expected.getMessage(); assertTrue(msg, msg.contains("can not be specified")); assertTrue(msg, msg.contains("format")); assertTrue(msg, msg.contains("bogus")); - } + } - private SuggestStopFilterFactory createFactory(String ... params) throws IOException { - if(params.length%2 != 0) { + private SuggestStopFilterFactory createFactory(String... params) throws IOException { + if (params.length % 2 != 0) { throw new IllegalArgumentException("invalid keysAndValues map"); } - Map args = new HashMap<>(params.length/2); - for(int i=0; i args = new HashMap<>(params.length / 2); + for (int i = 0; i < params.length; i += 2) { + String previous = args.put(params[i], params[i + 1]); assertNull("duplicate values for key: " + params[i], previous); } args.put("luceneMatchVersion", Version.LATEST.toString()); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java index c25b44d5706..43c7b95a375 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java @@ -16,12 +16,15 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; +import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; +import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; + import java.util.ArrayList; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -36,10 +39,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; -import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; -import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; - public class TestContextQuery extends LuceneTestCase { public Directory dir; @@ -55,17 +54,23 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testIllegalInnerQuery() throws Exception { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - new ContextQuery(new ContextQuery( - new PrefixCompletionQuery(new MockAnalyzer(random()), new Term("suggest_field", "sugg")))); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new ContextQuery( + new ContextQuery( + new PrefixCompletionQuery( + new MockAnalyzer(random()), new Term("suggest_field", "sugg")))); + }); assertTrue(expected.getMessage().contains(ContextQuery.class.getSimpleName())); } @Test public void testSimpleContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 8, "type1")); @@ -83,18 +88,19 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); query.addContext("type1", 1); query.addContext("type2", 2); query.addContext("type3", 3); query.addContext("type4", 4); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion4", "type4", 5 * 4), new Entry("suggestion3", "type3", 6 * 3), new Entry("suggestion2", "type2", 7 * 2), - new Entry("suggestion1", "type1", 8 * 1) - ); + new Entry("suggestion1", "type1", 8 * 1)); reader.close(); iw.close(); @@ -103,7 +109,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testContextQueryOnSuggestField() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new SuggestField("suggest_field", "abc", 3)); @@ -121,10 +128,14 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab"))); - IllegalStateException expected = expectThrows(IllegalStateException.class, () -> { - suggestIndexSearcher.suggest(query, 4, false); - }); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab"))); + IllegalStateException expected = + expectThrows( + IllegalStateException.class, + () -> { + suggestIndexSearcher.suggest(query, 4, false); + }); assertTrue(expected.getMessage().contains("SuggestField")); reader.close(); @@ -134,7 +145,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testNonExactContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1")); @@ -152,10 +164,12 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); query.addContext("type", 1, false); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", "type1", 4), new Entry("suggestion2", "type2", 3), new Entry("suggestion3", "type3", 2), @@ -168,7 +182,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testContextPrecedenceBoost() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "typetype")); @@ -181,14 +196,15 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); query.addContext("type", 1); query.addContext("typetype", 2); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", "typetype", 4 * 2), - new Entry("suggestion2", "type", 3 * 1) - ); + new Entry("suggestion2", "type", 3 * 1)); reader.close(); iw.close(); @@ -197,7 +213,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testEmptyContext() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion_no_ctx", 4)); @@ -213,11 +230,11 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, - new Entry("suggestion_no_ctx", null, 4), - new Entry("suggestion", "type4", 1)); + assertSuggestions( + suggest, new Entry("suggestion_no_ctx", null, 4), new Entry("suggestion", "type4", 1)); reader.close(); iw.close(); @@ -226,7 +243,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testEmptyContextWithBoosts() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 4)); @@ -245,16 +263,17 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); query.addContext("type4", 10); query.addAllContexts(); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion4", "type4", 1 * 10), new Entry("suggestion1", null, 4), new Entry("suggestion2", null, 3), - new Entry("suggestion3", null, 2) - ); + new Entry("suggestion3", null, 2)); reader.close(); iw.close(); } @@ -262,10 +281,12 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testSameSuggestionMultipleContext() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); - document.add(new ContextSuggestField("suggest_field", "suggestion", 4, "type1", "type2", "type3")); + document.add( + new ContextSuggestField("suggest_field", "suggestion", 4, "type1", "type2", "type3")); iw.addDocument(document); document = new Document(); @@ -278,18 +299,19 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); query.addContext("type1", 10); query.addContext("type2", 2); query.addContext("type3", 3); query.addContext("type4", 4); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion", "type1", 4 * 10), new Entry("suggestion", "type3", 4 * 3), new Entry("suggestion", "type2", 4 * 2), - new Entry("suggestion", "type4", 1 * 4) - ); + new Entry("suggestion", "type4", 1 * 4)); reader.close(); iw.close(); @@ -298,7 +320,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testMixedContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1")); @@ -316,17 +339,18 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); query.addContext("type1", 7); query.addContext("type2", 6); query.addAllContexts(); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", "type1", 4 * 7), new Entry("suggestion2", "type2", 3 * 6), new Entry("suggestion3", "type3", 2), - new Entry("suggestion4", "type4", 1) - ); + new Entry("suggestion4", "type4", 1)); reader.close(); iw.close(); @@ -335,7 +359,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testFilteringContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1")); @@ -353,14 +378,15 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); query.addContext("type3", 3); query.addContext("type4", 4); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion3", "type3", 2 * 3), - new Entry("suggestion4", "type4", 1 * 4) - ); + new Entry("suggestion4", "type4", 1 * 4)); reader.close(); iw.close(); @@ -369,7 +395,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testContextQueryRewrite() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1")); @@ -389,7 +416,8 @@ public class TestContextQuery extends LuceneTestCase { SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", "type1", 4), new Entry("suggestion2", "type2", 3), new Entry("suggestion3", "type3", 2), @@ -402,7 +430,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testMultiContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 8, "type1", "type3")); @@ -420,13 +449,15 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); query.addContext("type1", 1); query.addContext("type2", 2); query.addContext("type3", 3); query.addContext("type4", 4); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", "type3", 8 * 3), new Entry("suggestion4", "type4", 5 * 4), new Entry("suggestion3", "type3", 6 * 3), @@ -440,7 +471,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testAllContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1")); @@ -458,9 +490,11 @@ public class TestContextQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", "type1", 4), new Entry("suggestion2", "type2", 3), new Entry("suggestion3", "type3", 2), @@ -473,7 +507,8 @@ public class TestContextQuery extends LuceneTestCase { @Test public void testRandomContextQueryScoring() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - try(RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"))) { + try (RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"))) { int numSuggestions = atLeast(20); int numContexts = atLeast(5); @@ -501,21 +536,25 @@ public class TestContextQuery extends LuceneTestCase { } Entry[] expectedResults = expectedEntries.toArray(new Entry[expectedEntries.size()]); - ArrayUtil.introSort(expectedResults, new Comparator() { - @Override - public int compare(Entry o1, Entry o2) { - int cmp = Float.compare(o2.value, o1.value); - if (cmp != 0) { - return cmp; - } else { - return o1.output.compareTo(o2.output); - } - } - }); + ArrayUtil.introSort( + expectedResults, + new Comparator() { + @Override + public int compare(Entry o1, Entry o2) { + int cmp = Float.compare(o2.value, o1.value); + if (cmp != 0) { + return cmp; + } else { + return o1.output.compareTo(o2.output); + } + } + }); - try(DirectoryReader reader = iw.getReader()) { + try (DirectoryReader reader = iw.getReader()) { SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery( + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); for (int i = 0; i < contexts.size(); i++) { query.addContext(contexts.get(i), i + 1); } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextSuggestField.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextSuggestField.java index 1814b5dfb4e..39a4b8eda4e 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextSuggestField.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextSuggestField.java @@ -16,8 +16,12 @@ */ package org.apache.lucene.search.suggest.document; -import java.io.ByteArrayOutputStream; +import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; +import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; +import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; +import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; +import java.io.ByteArrayOutputStream; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.TokenStream; @@ -36,11 +40,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; -import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; -import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; -import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; - public class TestContextSuggestField extends LuceneTestCase { public Directory dir; @@ -57,9 +56,12 @@ public class TestContextSuggestField extends LuceneTestCase { @Test public void testEmptySuggestion() throws Exception { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - new ContextSuggestField("suggest_field", "", 1, "type1"); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new ContextSuggestField("suggest_field", "", 1, "type1"); + }); assertTrue(expected.getMessage().contains("value")); } @@ -71,24 +73,34 @@ public class TestContextSuggestField extends LuceneTestCase { Analyzer analyzer = new MockAnalyzer(random()); Document document = new Document(); - try (RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "name"))) { + try (RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "name"))) { // exception should be thrown for context value containing CONTEXT_SEPARATOR - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - document.add(new ContextSuggestField("name", "sugg", 1, charsRefBuilder.toString())); - iw.addDocument(document); - iw.commit(); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + document.add( + new ContextSuggestField("name", "sugg", 1, charsRefBuilder.toString())); + iw.addDocument(document); + iw.commit(); + }); assertTrue(expected.getMessage().contains("[0x1d]")); } document.clear(); - try (RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "name"))) { + try (RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "name"))) { // exception should be thrown for context value containing CONTEXT_SEPARATOR - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - document.add(new ContextSuggestField("name", charsRefBuilder.toString(), 1, "sugg")); - iw.addDocument(document); - iw.commit(false); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + document.add( + new ContextSuggestField("name", charsRefBuilder.toString(), 1, "sugg")); + iw.addDocument(document); + iw.commit(false); + }); assertTrue(expected.getMessage().contains("[0x1d]")); } } @@ -96,7 +108,8 @@ public class TestContextSuggestField extends LuceneTestCase { @Test public void testTokenStream() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - ContextSuggestField field = new ContextSuggestField("field", "input", 1, "context1", "context2"); + ContextSuggestField field = + new ContextSuggestField("field", "input", 1, "context1", "context2"); BytesRef surfaceForm = new BytesRef("input"); ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); try (OutputStreamDataOutput output = new OutputStreamDataOutput(byteArrayOutputStream)) { @@ -119,12 +132,31 @@ public class TestContextSuggestField extends LuceneTestCase { builder.append((char) ConcatenateGraphFilter.SEP_LABEL); builder.append("input"); expectedOutputs[1] = builder.toCharsRef().toString(); - TokenStream stream = new TestSuggestField.PayloadAttrToTypeAttrFilter(field.tokenStream(analyzer, null)); - assertTokenStreamContents(stream, expectedOutputs, null, null, new String[]{payload.utf8ToString(), payload.utf8ToString()}, new int[]{1, 0}, null, null); + TokenStream stream = + new TestSuggestField.PayloadAttrToTypeAttrFilter(field.tokenStream(analyzer, null)); + assertTokenStreamContents( + stream, + expectedOutputs, + null, + null, + new String[] {payload.utf8ToString(), payload.utf8ToString()}, + new int[] {1, 0}, + null, + null); CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer); - stream = new TestSuggestField.PayloadAttrToTypeAttrFilter(field.tokenStream(completionAnalyzer, null)); - assertTokenStreamContents(stream, expectedOutputs, null, null, new String[]{payload.utf8ToString(), payload.utf8ToString()}, new int[]{1, 0}, null, null); + stream = + new TestSuggestField.PayloadAttrToTypeAttrFilter( + field.tokenStream(completionAnalyzer, null)); + assertTokenStreamContents( + stream, + expectedOutputs, + null, + null, + new String[] {payload.utf8ToString(), payload.utf8ToString()}, + new int[] {1, 0}, + null, + null); } @Test @@ -134,13 +166,16 @@ public class TestContextSuggestField extends LuceneTestCase { document.add(new SuggestField("suggest_field", "suggestion1", 4)); document.add(new ContextSuggestField("suggest_field", "suggestion2", 3)); - try (RandomIndexWriter iw = new RandomIndexWriter(random(), dir, - iwcWithSuggestField(analyzer, "suggest_field"))) { + try (RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"))) { // mixing suggest field types for same field name should error out - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - iw.addDocument(document); - iw.commit(false); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + iw.addDocument(document); + iw.commit(false); + }); assertTrue(expected.getMessage().contains("mixed types")); } } @@ -148,8 +183,9 @@ public class TestContextSuggestField extends LuceneTestCase { @Test public void testWithSuggestFields() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, - iwcWithSuggestField(analyzer, "suggest_field", "context_suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter( + random(), dir, iwcWithSuggestField(analyzer, "suggest_field", "context_suggest_field")); Document document = new Document(); document.add(new SuggestField("suggest_field", "suggestion1", 4)); @@ -174,7 +210,8 @@ public class TestContextSuggestField extends LuceneTestCase { CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 10, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", 4), new Entry("suggestion2", 3), new Entry("suggestion3", 2), @@ -182,7 +219,8 @@ public class TestContextSuggestField extends LuceneTestCase { query = new PrefixCompletionQuery(analyzer, new Term("context_suggest_field", "sugg")); suggest = suggestIndexSearcher.suggest(query, 10, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", "type1", 4), new Entry("suggestion2", "type2", 3), new Entry("suggestion3", "type3", 2), @@ -194,8 +232,11 @@ public class TestContextSuggestField extends LuceneTestCase { @Test public void testCompletionAnalyzer() throws Exception { - CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(new StandardAnalyzer(), true, true); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, "suggest_field")); + CompletionAnalyzer completionAnalyzer = + new CompletionAnalyzer(new StandardAnalyzer(), true, true); + RandomIndexWriter iw = + new RandomIndexWriter( + random(), dir, iwcWithSuggestField(completionAnalyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1")); @@ -212,17 +253,19 @@ public class TestContextSuggestField extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(completionAnalyzer, new Term("suggest_field", "sugg"))); + ContextQuery query = + new ContextQuery( + new PrefixCompletionQuery(completionAnalyzer, new Term("suggest_field", "sugg"))); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggestion1", "type1", 4), new Entry("suggestion2", "type2", 3), new Entry("suggestion3", "type3", 2), new Entry("suggestion4", "type4", 1)); query.addContext("type1"); suggest = suggestIndexSearcher.suggest(query, 4, false); - assertSuggestions(suggest, - new Entry("suggestion1", "type1", 4)); + assertSuggestions(suggest, new Entry("suggestion1", "type1", 4)); reader.close(); iw.close(); } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestFuzzyCompletionQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestFuzzyCompletionQuery.java index ae93de4462a..e8a5531d5a5 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestFuzzyCompletionQuery.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestFuzzyCompletionQuery.java @@ -16,6 +16,10 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; +import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; +import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -28,10 +32,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; -import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; -import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; - public class TestFuzzyCompletionQuery extends LuceneTestCase { public Directory dir; @@ -48,7 +48,8 @@ public class TestFuzzyCompletionQuery extends LuceneTestCase { @Test public void testFuzzyQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new SuggestField("suggest_field", "suggestion", 2)); @@ -67,12 +68,12 @@ public class TestFuzzyCompletionQuery extends LuceneTestCase { SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); CompletionQuery query = new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "sugg")); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suaggestion", 4 * 2), new Entry("suggestion", 2 * 3), new Entry("sugfoo", 1 * 3), - new Entry("ssuggestion", 1 * 1) - ); + new Entry("ssuggestion", 1 * 1)); reader.close(); iw.close(); @@ -81,7 +82,8 @@ public class TestFuzzyCompletionQuery extends LuceneTestCase { @Test public void testFuzzyContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "sduggestion", 1, "type1")); @@ -100,15 +102,16 @@ public class TestFuzzyCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - CompletionQuery query = new ContextQuery(new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "sugge"))); + CompletionQuery query = + new ContextQuery(new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "sugge"))); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("suggdestion", "type4", 4), new Entry("suggestion", "type4", 4), new Entry("sugdgestion", "type3", 3), new Entry("sudggestion", "type2", 2), - new Entry("sduggestion", "type1", 1) - ); + new Entry("sduggestion", "type1", 1)); reader.close(); iw.close(); @@ -117,7 +120,8 @@ public class TestFuzzyCompletionQuery extends LuceneTestCase { @Test public void testFuzzyFilteredContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "sduggestion", 1, "type1")); @@ -136,15 +140,16 @@ public class TestFuzzyCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - CompletionQuery fuzzyQuery = new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "sugge")); + CompletionQuery fuzzyQuery = + new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "sugge")); ContextQuery contextQuery = new ContextQuery(fuzzyQuery); contextQuery.addContext("type1", 6); contextQuery.addContext("type3", 2); TopSuggestDocs suggest = suggestIndexSearcher.suggest(contextQuery, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("sduggestion", "type1", 1 * (1 + 6)), - new Entry("sugdgestion", "type3", 1 * (3 + 2)) - ); + new Entry("sugdgestion", "type3", 1 * (3 + 2))); reader.close(); iw.close(); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java index 4339f991ffa..172f43b285c 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java @@ -16,9 +16,14 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; +import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; +import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; +import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; +import static org.hamcrest.core.IsEqual.equalTo; + import java.io.IOException; import java.util.Objects; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; @@ -44,12 +49,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; -import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; -import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; -import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; -import static org.hamcrest.core.IsEqual.equalTo; - public class TestPrefixCompletionQuery extends LuceneTestCase { private static class NumericRangeBitsProducer extends BitsProducer { @@ -74,9 +73,7 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { return false; } NumericRangeBitsProducer that = (NumericRangeBitsProducer) obj; - return field.equals(that.field) - && min == that.min - && max == that.max; + return field.equals(that.field) && min == that.min && max == that.max; } @Override @@ -118,7 +115,8 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { public void testSimple() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new SuggestField("suggest_field", "abc", 3)); @@ -136,7 +134,8 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab")); TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, 3, false); assertSuggestions(lookupDocs, new Entry("abcdd", 5), new Entry("abd", 4), new Entry("abc", 3)); @@ -147,7 +146,8 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { @Test public void testEmptyPrefixQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new SuggestField("suggest_field", "suggestion1", 1)); iw.addDocument(document); @@ -158,7 +158,8 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "")); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); assertEquals(0, suggest.scoreDocs.length); @@ -169,7 +170,8 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { public void testMostlyFilteredOutDocuments() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); int num = Math.min(1000, atLeast(10)); for (int i = 0; i < num; i++) { Document document = new Document(); @@ -185,9 +187,10 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); - int topScore = num/2; + int topScore = num / 2; BitsProducer filter = new NumericRangeBitsProducer("filter_int_fld", 0, topScore); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter); // if at most half of the top scoring documents have been filtered out // the search should be admissible for a single segment TopSuggestDocs suggest = indexSearcher.suggest(query, num, false); @@ -215,7 +218,8 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { public void testDocFiltering() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new NumericDocValuesField("filter_int_fld", 9)); @@ -240,9 +244,11 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); // suggest without filter - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "app")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "app")); TopSuggestDocs suggest = indexSearcher.suggest(query, 3, false); - assertSuggestions(suggest, new Entry("apple", 5), new Entry("applle", 4), new Entry("apples", 3)); + assertSuggestions( + suggest, new Entry("apple", 5), new Entry("applle", 4), new Entry("apples", 3)); // suggest with filter BitsProducer filter = new NumericRangeBitsProducer("filter_int_fld", 5, 12); @@ -255,10 +261,12 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { } public void testAnalyzerDefaults() throws Exception { - Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); + Analyzer analyzer = + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer); final String field = getTestName(); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field)); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field)); Document document = new Document(); document.add(new SuggestField(field, "foobar", 7)); document.add(new SuggestField(field, "foo bar", 8)); @@ -272,14 +280,18 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); CompletionQuery query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "fo")); - TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); //matches all with "fo*" - assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("foo bar", 8), new Entry("foobar", 7)); + TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); // matches all with "fo*" + assertSuggestions( + suggest, new Entry("foo the bar", 11), new Entry("foo bar", 8), new Entry("foobar", 7)); // with leading stopword - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "_ fo*" + query = + new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "_ fo*" suggest = indexSearcher.suggest(query, 9, false); assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("the fo", 9)); // with middle stopword - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foo the bar")); // becomes "foo _ bar*" + query = + new PrefixCompletionQuery( + completionAnalyzer, new Term(field, "foo the bar")); // becomes "foo _ bar*" suggest = indexSearcher.suggest(query, 9, false); assertSuggestions(suggest, new Entry("foo the bar", 11)); // no space @@ -287,7 +299,9 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { suggest = indexSearcher.suggest(query, 9, false); assertSuggestions(suggest, new Entry("foobar", 7)); // surrounding stopwords - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the baz the")); // becomes "_ baz _" + query = + new PrefixCompletionQuery( + completionAnalyzer, new Term(field, "the baz the")); // becomes "_ baz _" suggest = indexSearcher.suggest(query, 4, false); assertSuggestions(suggest); reader.close(); @@ -295,11 +309,14 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { } public void testAnalyzerWithoutSeparator() throws Exception { - Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); - //note: when we don't preserve separators, the choice of preservePosInc is irrelevant - CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer, false, random().nextBoolean()); + Analyzer analyzer = + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); + // note: when we don't preserve separators, the choice of preservePosInc is irrelevant + CompletionAnalyzer completionAnalyzer = + new CompletionAnalyzer(analyzer, false, random().nextBoolean()); final String field = getTestName(); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field)); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field)); Document document = new Document(); document.add(new SuggestField(field, "foobar", 7)); document.add(new SuggestField(field, "foo bar", 8)); @@ -310,37 +327,67 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { iw.addDocument(document); - // note we use the completionAnalyzer with the queries (instead of input analyzer) because of non-default settings + // note we use the completionAnalyzer with the queries (instead of input analyzer) because of + // non-default settings DirectoryReader reader = iw.getReader(); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); CompletionQuery query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "fo")); - TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); //matches all with fo - assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7)); + TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); // matches all with fo + assertSuggestions( + suggest, + new Entry("foo the bar", 11), + new Entry("the foo bar", 10), + new Entry("the fo", 9), + new Entry("foo bar", 8), + new Entry("foobar", 7)); // with leading stopword - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "fo*" + query = + new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "fo*" suggest = indexSearcher.suggest(query, 9, false); - assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7)); + assertSuggestions( + suggest, + new Entry("foo the bar", 11), + new Entry("the foo bar", 10), + new Entry("the fo", 9), + new Entry("foo bar", 8), + new Entry("foobar", 7)); // with middle stopword - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foo the bar")); // becomes "foobar*" + query = + new PrefixCompletionQuery( + completionAnalyzer, new Term(field, "foo the bar")); // becomes "foobar*" suggest = indexSearcher.suggest(query, 9, false); - assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("foo bar", 8), new Entry("foobar", 7)); + assertSuggestions( + suggest, + new Entry("foo the bar", 11), + new Entry("the foo bar", 10), + new Entry("foo bar", 8), + new Entry("foobar", 7)); // no space query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foob")); suggest = indexSearcher.suggest(query, 9, false); // no separators, thus match several - assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("foo bar", 8), new Entry("foobar", 7)); + assertSuggestions( + suggest, + new Entry("foo the bar", 11), + new Entry("the foo bar", 10), + new Entry("foo bar", 8), + new Entry("foobar", 7)); // surrounding stopwords - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the baz the")); // becomes "baz*" - suggest = indexSearcher.suggest(query, 4, false);// stopwords in query get removed so we match + query = + new PrefixCompletionQuery( + completionAnalyzer, new Term(field, "the baz the")); // becomes "baz*" + suggest = indexSearcher.suggest(query, 4, false); // stopwords in query get removed so we match assertSuggestions(suggest, new Entry("baz the", 12)); reader.close(); iw.close(); } public void testAnalyzerNoPreservePosInc() throws Exception { - Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); + Analyzer analyzer = + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer, true, false); final String field = getTestName(); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field)); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field)); Document document = new Document(); document.add(new SuggestField(field, "foobar", 7)); document.add(new SuggestField(field, "foo bar", 8)); @@ -351,27 +398,49 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { iw.addDocument(document); - // note we use the completionAnalyzer with the queries (instead of input analyzer) because of non-default settings + // note we use the completionAnalyzer with the queries (instead of input analyzer) because of + // non-default settings DirectoryReader reader = iw.getReader(); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); CompletionQuery query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "fo")); - TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); //matches all with fo - assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7)); + TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); // matches all with fo + assertSuggestions( + suggest, + new Entry("foo the bar", 11), + new Entry("the foo bar", 10), + new Entry("the fo", 9), + new Entry("foo bar", 8), + new Entry("foobar", 7)); // with leading stopword - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "fo*" + query = + new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "fo*" suggest = indexSearcher.suggest(query, 9, false); - assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7)); + assertSuggestions( + suggest, + new Entry("foo the bar", 11), + new Entry("the foo bar", 10), + new Entry("the fo", 9), + new Entry("foo bar", 8), + new Entry("foobar", 7)); // with middle stopword - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foo the bar")); // becomes "foo bar*" + query = + new PrefixCompletionQuery( + completionAnalyzer, new Term(field, "foo the bar")); // becomes "foo bar*" suggest = indexSearcher.suggest(query, 9, false); - assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("foo bar", 8)); // no foobar + assertSuggestions( + suggest, + new Entry("foo the bar", 11), + new Entry("the foo bar", 10), + new Entry("foo bar", 8)); // no foobar // no space query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foob")); suggest = indexSearcher.suggest(query, 4, false); // separators, thus only match "foobar" assertSuggestions(suggest, new Entry("foobar", 7)); // surrounding stopwords - query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the baz the")); // becomes "baz*" - suggest = indexSearcher.suggest(query, 4, false);// stopwords in query get removed so we match + query = + new PrefixCompletionQuery( + completionAnalyzer, new Term(field, "the baz the")); // becomes "baz*" + suggest = indexSearcher.suggest(query, 4, false); // stopwords in query get removed so we match assertSuggestions(suggest, new Entry("baz the", 12)); reader.close(); iw.close(); @@ -379,7 +448,10 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { public void testGhostField() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field", "suggest_field2", "suggest_field3")); + IndexWriter iw = + new IndexWriter( + dir, + iwcWithSuggestField(analyzer, "suggest_field", "suggest_field2", "suggest_field3")); Document document = new Document(); document.add(new StringField("id", "0", Field.Store.NO)); @@ -398,8 +470,9 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { iw.deleteDocuments(new Term("id", "0")); // first force merge is OK iw.forceMerge(1); - - // second force merge causes MultiFields to include "suggest_field" in its iteration, yet a null Terms is returned (no documents have + + // second force merge causes MultiFields to include "suggest_field" in its iteration, yet a null + // Terms is returned (no documents have // this field anymore) iw.addDocument(new Document()); iw.forceMerge(1); @@ -407,7 +480,8 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { DirectoryReader reader = DirectoryReader.open(iw); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "app")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "app")); assertEquals(0, indexSearcher.suggest(query, 3, false).totalHits.value); query = new PrefixCompletionQuery(analyzer, new Term("suggest_field2", "app")); @@ -419,7 +493,8 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { public void testEmptyPrefixContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type")); iw.addDocument(document); @@ -430,12 +505,13 @@ public class TestPrefixCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", ""))); + ContextQuery query = + new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", ""))); query.addContext("type", 1); // Ensure that context queries optimize an empty prefix to a fully empty automaton. - CompletionWeight weight = (CompletionWeight) query.createWeight( - suggestIndexSearcher, ScoreMode.COMPLETE, 1.0F); + CompletionWeight weight = + (CompletionWeight) query.createWeight(suggestIndexSearcher, ScoreMode.COMPLETE, 1.0F); assertEquals(0, weight.getAutomaton().getNumStates()); // Check that there are no suggestions. diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestRegexCompletionQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestRegexCompletionQuery.java index 65b944b9e32..738ce252ff7 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestRegexCompletionQuery.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestRegexCompletionQuery.java @@ -16,6 +16,10 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; +import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; +import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -29,10 +33,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry; -import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions; -import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField; - public class TestRegexCompletionQuery extends LuceneTestCase { public Directory dir; @@ -49,7 +49,8 @@ public class TestRegexCompletionQuery extends LuceneTestCase { @Test public void testRegexQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new SuggestField("suggest_field", "suggestion", 1)); @@ -67,10 +68,15 @@ public class TestRegexCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - RegexCompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "[a|w|s]s?ugg")); + RegexCompletionQuery query = + new RegexCompletionQuery(new Term("suggest_field", "[a|w|s]s?ugg")); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4, false); - assertSuggestions(suggest, new Entry("wsuggestion", 4), new Entry("ssuggestion", 3), - new Entry("asuggestion", 2), new Entry("suggestion", 1)); + assertSuggestions( + suggest, + new Entry("wsuggestion", 4), + new Entry("ssuggestion", 3), + new Entry("asuggestion", 2), + new Entry("suggestion", 1)); reader.close(); iw.close(); @@ -79,7 +85,8 @@ public class TestRegexCompletionQuery extends LuceneTestCase { @Test public void testEmptyRegexQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new SuggestField("suggest_field", "suggestion1", 1)); iw.addDocument(document); @@ -102,7 +109,8 @@ public class TestRegexCompletionQuery extends LuceneTestCase { @Test public void testSimpleRegexContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "sduggestion", 5, "type1")); @@ -121,9 +129,11 @@ public class TestRegexCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - CompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "[a|s][d|u|s][u|d|g]")); + CompletionQuery query = + new RegexCompletionQuery(new Term("suggest_field", "[a|s][d|u|s][u|d|g]")); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("sduggestion", "type1", 5), new Entry("sudggestion", "type2", 4), new Entry("sugdgestion", "type3", 3), @@ -137,7 +147,8 @@ public class TestRegexCompletionQuery extends LuceneTestCase { @Test public void testRegexContextQueryWithBoost() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "sduggestion", 5, "type1")); @@ -156,13 +167,15 @@ public class TestRegexCompletionQuery extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - CompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "[a|s][d|u|s][u|g]")); + CompletionQuery query = + new RegexCompletionQuery(new Term("suggest_field", "[a|s][d|u|s][u|g]")); ContextQuery contextQuery = new ContextQuery(query); contextQuery.addContext("type1", 6); contextQuery.addContext("type3", 7); contextQuery.addAllContexts(); TopSuggestDocs suggest = suggestIndexSearcher.suggest(contextQuery, 5, false); - assertSuggestions(suggest, + assertSuggestions( + suggest, new Entry("sduggestion", "type1", 5 * 6), new Entry("sugdgestion", "type3", 3 * 7), new Entry("suggdestion", "type4", 2), @@ -175,7 +188,8 @@ public class TestRegexCompletionQuery extends LuceneTestCase { @Test public void testEmptyRegexContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type")); iw.addDocument(document); @@ -190,8 +204,8 @@ public class TestRegexCompletionQuery extends LuceneTestCase { query.addContext("type", 1); // Ensure that context queries optimize an empty regex to a fully empty automaton. - CompletionWeight weight = (CompletionWeight) query.createWeight( - suggestIndexSearcher, ScoreMode.COMPLETE, 1.0F); + CompletionWeight weight = + (CompletionWeight) query.createWeight(suggestIndexSearcher, ScoreMode.COMPLETE, 1.0F); assertEquals(0, weight.getAutomaton().getNumStates()); // Check that there are no suggestions. diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java index 5fa5072626c..7ebac3f6cec 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java @@ -16,6 +16,10 @@ */ package org.apache.lucene.search.suggest.document; +import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; +import static org.hamcrest.core.IsEqual.equalTo; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.ArrayList; @@ -28,8 +32,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CyclicBarrier; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.TokenFilter; @@ -67,9 +69,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; -import static org.hamcrest.core.IsEqual.equalTo; - public class TestSuggestField extends LuceneTestCase { public Directory dir; @@ -86,17 +85,23 @@ public class TestSuggestField extends LuceneTestCase { @Test public void testEmptySuggestion() throws Exception { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - new SuggestField("suggest_field", "", 3); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new SuggestField("suggest_field", "", 3); + }); assertTrue(expected.getMessage().contains("value")); } @Test public void testNegativeWeight() throws Exception { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - new SuggestField("suggest_field", "sugg", -1); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new SuggestField("suggest_field", "sugg", -1); + }); assertTrue(expected.getMessage().contains("weight")); } @@ -105,31 +110,42 @@ public class TestSuggestField extends LuceneTestCase { CharsRefBuilder charsRefBuilder = new CharsRefBuilder(); charsRefBuilder.append("sugg"); charsRefBuilder.setCharAt(2, (char) ConcatenateGraphFilter.SEP_LABEL); - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - new SuggestField("name", charsRefBuilder.toString(), 1); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new SuggestField("name", charsRefBuilder.toString(), 1); + }); assertTrue(expected.getMessage().contains("[0x1f]")); charsRefBuilder.setCharAt(2, (char) CompletionAnalyzer.HOLE_CHARACTER); - expected = expectThrows(IllegalArgumentException.class, () -> { - new SuggestField("name", charsRefBuilder.toString(), 1); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new SuggestField("name", charsRefBuilder.toString(), 1); + }); assertTrue(expected.getMessage().contains("[0x1e]")); charsRefBuilder.setCharAt(2, (char) NRTSuggesterBuilder.END_BYTE); - expected = expectThrows(IllegalArgumentException.class, () -> { - new SuggestField("name", charsRefBuilder.toString(), 1); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + new SuggestField("name", charsRefBuilder.toString(), 1); + }); assertTrue(expected.getMessage().contains("[0x0]")); } @Test public void testEmpty() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab")); TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, 3, false); assertThat(lookupDocs.totalHits.value, equalTo(0L)); reader.close(); @@ -150,20 +166,38 @@ public class TestSuggestField extends LuceneTestCase { } BytesRef payload = new BytesRef(byteArrayOutputStream.toByteArray()); TokenStream stream = new PayloadAttrToTypeAttrFilter(suggestField.tokenStream(analyzer, null)); - assertTokenStreamContents(stream, new String[] {"input"}, null, null, new String[]{payload.utf8ToString()}, new int[]{1}, null, null); + assertTokenStreamContents( + stream, + new String[] {"input"}, + null, + null, + new String[] {payload.utf8ToString()}, + new int[] {1}, + null, + null); CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer); stream = new PayloadAttrToTypeAttrFilter(suggestField.tokenStream(completionAnalyzer, null)); - assertTokenStreamContents(stream, new String[] {"input"}, null, null, new String[]{payload.utf8ToString()}, new int[]{1}, null, null); + assertTokenStreamContents( + stream, + new String[] {"input"}, + null, + null, + new String[] {payload.utf8ToString()}, + new int[] {1}, + null, + null); } - @Test @Slow + @Test + @Slow public void testDupSuggestFieldValues() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); final int num = Math.min(1000, atLeast(100)); int[] weights = new int[num]; - for(int i = 0; i < num; i++) { + for (int i = 0; i < num; i++) { Document document = new Document(); weights[i] = random().nextInt(Integer.MAX_VALUE); document.add(new SuggestField("suggest_field", "abc", weights[i])); @@ -182,7 +216,8 @@ public class TestSuggestField extends LuceneTestCase { } SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc")); TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, num, false); assertSuggestions(lookupDocs, expectedEntries); @@ -192,12 +227,13 @@ public class TestSuggestField extends LuceneTestCase { public void testDeduplication() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); final int num = TestUtil.nextInt(random(), 2, 20); int[] weights = new int[num]; int bestABCWeight = Integer.MIN_VALUE; int bestABDWeight = Integer.MIN_VALUE; - for(int i = 0; i < num; i++) { + for (int i = 0; i < num; i++) { Document document = new Document(); weights[i] = random().nextInt(Integer.MAX_VALUE); String suggestValue; @@ -235,7 +271,8 @@ public class TestSuggestField extends LuceneTestCase { } SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "a")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "a")); TopSuggestDocsCollector collector = new TopSuggestDocsCollector(2, true); suggestIndexSearcher.suggest(query, collector); TopSuggestDocs lookupDocs = collector.get(); @@ -248,10 +285,11 @@ public class TestSuggestField extends LuceneTestCase { @Slow public void testExtremeDeduplication() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); final int num = atLeast(500); int bestWeight = Integer.MIN_VALUE; - for(int i = 0; i < num; i++) { + for (int i = 0; i < num; i++) { Document document = new Document(); int weight = TestUtil.nextInt(random(), 10, 100); bestWeight = Math.max(weight, bestWeight); @@ -275,7 +313,8 @@ public class TestSuggestField extends LuceneTestCase { expectedEntries[1] = new Entry("abd", 7); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "a")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "a")); TopSuggestDocsCollector collector = new TopSuggestDocsCollector(2, true); suggestIndexSearcher.suggest(query, collector); TopSuggestDocs lookupDocs = collector.get(); @@ -284,11 +323,11 @@ public class TestSuggestField extends LuceneTestCase { reader.close(); iw.close(); } - + private static String randomSimpleString(int numDigits, int maxLen) { final int len = TestUtil.nextInt(random(), 1, maxLen); final char[] chars = new char[len]; - for(int j=0;j= topN) { System.out.println(" leftover: " + i + ": " + expected.get(i)); } else { @@ -390,22 +430,23 @@ public class TestSuggestField extends LuceneTestCase { } } expected = expected.subList(0, Math.min(topN, expected.size())); - - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", prefix)); + + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", prefix)); TopSuggestDocsCollector collector = new TopSuggestDocsCollector(topN, dedup); searcher.suggest(query, collector); TopSuggestDocs actual = collector.get(); if (VERBOSE) { System.out.println(" actual:"); SuggestScoreDoc[] suggestScoreDocs = (SuggestScoreDoc[]) actual.scoreDocs; - for(int i=0;i entries = new ArrayList<>(); @@ -629,19 +682,21 @@ public class TestSuggestField extends LuceneTestCase { } DirectoryReader reader = iw.getReader(); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_")); - TopSuggestDocs suggest = indexSearcher.suggest(query, (entries.size() == 0) ? 1 : entries.size(), false); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_")); + TopSuggestDocs suggest = + indexSearcher.suggest(query, (entries.size() == 0) ? 1 : entries.size(), false); assertSuggestions(suggest, entries.toArray(new Entry[entries.size()])); reader.close(); iw.close(); } - @Test public void testReturnedDocID() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); int num = Math.min(1000, atLeast(10)); for (int i = 0; i < num; i++) { @@ -657,7 +712,8 @@ public class TestSuggestField extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_")); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_")); TopSuggestDocs suggest = indexSearcher.suggest(query, num, false); assertEquals(num, suggest.totalHits.value); for (SuggestScoreDoc suggestScoreDoc : suggest.scoreLookupDocs()) { @@ -676,14 +732,16 @@ public class TestSuggestField extends LuceneTestCase { @Test public void testScoring() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); int num = Math.min(1000, atLeast(50)); String[] prefixes = {"abc", "bac", "cab"}; Map mappings = new HashMap<>(); for (int i = 0; i < num; i++) { Document document = new Document(); - String suggest = prefixes[i % 3] + TestUtil.randomSimpleString(random(), 10) + "_" +String.valueOf(i); + String suggest = + prefixes[i % 3] + TestUtil.randomSimpleString(random(), 10) + "_" + String.valueOf(i); int weight = random().nextInt(Integer.MAX_VALUE); document.add(new SuggestField("suggest_field", suggest, weight)); mappings.put(suggest, weight); @@ -697,7 +755,8 @@ public class TestSuggestField extends LuceneTestCase { DirectoryReader reader = iw.getReader(); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); for (String prefix : prefixes) { - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", prefix)); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", prefix)); TopSuggestDocs suggest = indexSearcher.suggest(query, num, false); assertTrue(suggest.totalHits.value > 0); float topScore = -1; @@ -719,7 +778,8 @@ public class TestSuggestField extends LuceneTestCase { @Test public void testRealisticKeys() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); + RandomIndexWriter iw = + new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); LineFileDocs lineFileDocs = new LineFileDocs(random()); int num = Math.min(1000, atLeast(50)); Map mappings = new HashMap<>(); @@ -748,7 +808,8 @@ public class TestSuggestField extends LuceneTestCase { for (Map.Entry entry : mappings.entrySet()) { String title = entry.getKey(); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", title)); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field", title)); TopSuggestDocs suggest = indexSearcher.suggest(query, mappings.size(), false); assertTrue(suggest.totalHits.value > 0); boolean matched = false; @@ -768,7 +829,11 @@ public class TestSuggestField extends LuceneTestCase { @Test public void testThreads() throws Exception { final Analyzer analyzer = new MockAnalyzer(random()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field_1", "suggest_field_2", "suggest_field_3")); + RandomIndexWriter iw = + new RandomIndexWriter( + random(), + dir, + iwcWithSuggestField(analyzer, "suggest_field_1", "suggest_field_2", "suggest_field_3")); int num = Math.min(1000, atLeast(100)); final String prefix1 = "abc1_"; final String prefix2 = "abc2_"; @@ -797,29 +862,31 @@ public class TestSuggestField extends LuceneTestCase { DirectoryReader reader = iw.getReader(); int numThreads = TestUtil.nextInt(random(), 2, 7); Thread threads[] = new Thread[numThreads]; - final CyclicBarrier startingGun = new CyclicBarrier(numThreads+1); + final CyclicBarrier startingGun = new CyclicBarrier(numThreads + 1); final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); final SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - startingGun.await(); - PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_1", prefix1)); - TopSuggestDocs suggest = indexSearcher.suggest(query, num, false); - assertSuggestions(suggest, entries1); - query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_2", prefix2)); - suggest = indexSearcher.suggest(query, num, false); - assertSuggestions(suggest, entries2); - query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_3", prefix3)); - suggest = indexSearcher.suggest(query, num, false); - assertSuggestions(suggest, entries3); - } catch (Throwable e) { - errors.add(e); - } - } - }; + threads[i] = + new Thread() { + @Override + public void run() { + try { + startingGun.await(); + PrefixCompletionQuery query = + new PrefixCompletionQuery(analyzer, new Term("suggest_field_1", prefix1)); + TopSuggestDocs suggest = indexSearcher.suggest(query, num, false); + assertSuggestions(suggest, entries1); + query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_2", prefix2)); + suggest = indexSearcher.suggest(query, num, false); + assertSuggestions(suggest, entries2); + query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_3", prefix3)); + suggest = indexSearcher.suggest(query, num, false); + assertSuggestions(suggest, entries3); + } catch (Throwable e) { + errors.add(e); + } + } + }; threads[i].start(); } @@ -864,7 +931,13 @@ public class TestSuggestField extends LuceneTestCase { SuggestScoreDoc[] suggestScoreDocs = (SuggestScoreDoc[]) actual.scoreDocs; for (int i = 0; i < Math.min(expected.length, suggestScoreDocs.length); i++) { SuggestScoreDoc lookupDoc = suggestScoreDocs[i]; - String msg = "Hit " + i + ": expected: " + toString(expected[i]) + " but actual: " + toString(lookupDoc); + String msg = + "Hit " + + i + + ": expected: " + + toString(expected[i]) + + " but actual: " + + toString(lookupDoc); assertThat(msg, lookupDoc.key.toString(), equalTo(expected[i].output)); assertThat(msg, lookupDoc.score, equalTo(expected[i].value)); assertThat(msg, lookupDoc.context, equalTo(expected[i].context)); @@ -873,11 +946,11 @@ public class TestSuggestField extends LuceneTestCase { } private static String toString(Entry expected) { - return "key:"+ expected.output+" score:"+expected.value+" context:"+expected.context; + return "key:" + expected.output + " score:" + expected.value + " context:" + expected.context; } private static String toString(SuggestScoreDoc actual) { - return "key:"+ actual.key.toString()+" score:"+actual.score+" context:"+actual.context; + return "key:" + actual.key.toString() + " score:" + actual.score + " context:" + actual.context; } static IndexWriterConfig iwcWithSuggestField(Analyzer analyzer, String... suggestFields) { @@ -887,24 +960,25 @@ public class TestSuggestField extends LuceneTestCase { static IndexWriterConfig iwcWithSuggestField(Analyzer analyzer, final Set suggestFields) { IndexWriterConfig iwc = newIndexWriterConfig(random(), analyzer); iwc.setMergePolicy(newLogMergePolicy()); - Codec filterCodec = new Lucene90Codec() { - CompletionPostingsFormat.FSTLoadMode fstLoadMode = - RandomPicks.randomFrom(random(), CompletionPostingsFormat.FSTLoadMode.values()); - PostingsFormat postingsFormat = new Completion84PostingsFormat(fstLoadMode); + Codec filterCodec = + new Lucene90Codec() { + CompletionPostingsFormat.FSTLoadMode fstLoadMode = + RandomPicks.randomFrom(random(), CompletionPostingsFormat.FSTLoadMode.values()); + PostingsFormat postingsFormat = new Completion84PostingsFormat(fstLoadMode); - @Override - public PostingsFormat getPostingsFormatForField(String field) { - if (suggestFields.contains(field)) { - return postingsFormat; - } - return super.getPostingsFormatForField(field); - } - }; + @Override + public PostingsFormat getPostingsFormatForField(String field) { + if (suggestFields.contains(field)) { + return postingsFormat; + } + return super.getPostingsFormatForField(field); + } + }; iwc.setCodec(filterCodec); return iwc; } - public final static class PayloadAttrToTypeAttrFilter extends TokenFilter { + public static final class PayloadAttrToTypeAttrFilter extends TokenFilter { private PayloadAttribute payload = addAttribute(PayloadAttribute.class); private TypeAttribute type = addAttribute(TypeAttribute.class); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java index b26a2ad99ee..c1bde036b3b 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.fst; import java.util.Comparator; - import org.apache.lucene.search.suggest.InMemorySorter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -43,7 +42,7 @@ public class BytesRefSortersTest extends LuceneTestCase { private void check(BytesRefSorter sorter) throws Exception { for (int i = 0; i < 100; i++) { - byte [] current = new byte [random().nextInt(256)]; + byte[] current = new byte[random().nextInt(256)]; random().nextBytes(current); sorter.add(new BytesRef(current)); } @@ -51,11 +50,13 @@ public class BytesRefSortersTest extends LuceneTestCase { // Create two iterators and check that they're aligned with each other. BytesRefIterator i1 = sorter.iterator(); BytesRefIterator i2 = sorter.iterator(); - + // Verify sorter contract. - expectThrows(IllegalStateException.class, () -> { - sorter.add(new BytesRef(new byte [1])); - }); + expectThrows( + IllegalStateException.class, + () -> { + sorter.add(new BytesRef(new byte[1])); + }); while (true) { BytesRef spare1 = i1.next(); @@ -65,5 +66,5 @@ public class BytesRefSortersTest extends LuceneTestCase { break; } } - } + } } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java index e0ca96255b8..73e6f8dd7ee 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java @@ -18,16 +18,13 @@ package org.apache.lucene.search.suggest.fst; import java.nio.charset.StandardCharsets; import java.util.*; - import org.apache.lucene.search.suggest.*; import org.apache.lucene.search.suggest.Lookup.LookupResult; import org.apache.lucene.search.suggest.fst.FSTCompletion.Completion; import org.apache.lucene.store.Directory; import org.apache.lucene.util.*; -/** - * Unit tests for {@link FSTCompletion}. - */ +/** Unit tests for {@link FSTCompletion}. */ public class FSTCompletionTest extends LuceneTestCase { public static Input tf(String t, int v) { @@ -50,82 +47,76 @@ public class FSTCompletionTest extends LuceneTestCase { } private Input[] evalKeys() { - final Input[] keys = new Input[] { - tf("one", 0), - tf("oneness", 1), - tf("onerous", 1), - tf("onesimus", 1), - tf("two", 1), - tf("twofold", 1), - tf("twonk", 1), - tf("thrive", 1), - tf("through", 1), - tf("threat", 1), - tf("three", 1), - tf("foundation", 1), - tf("fourblah", 1), - tf("fourteen", 1), - tf("four", 0), - tf("fourier", 0), - tf("fourty", 0), - tf("xo", 1), - }; + final Input[] keys = + new Input[] { + tf("one", 0), + tf("oneness", 1), + tf("onerous", 1), + tf("onesimus", 1), + tf("two", 1), + tf("twofold", 1), + tf("twonk", 1), + tf("thrive", 1), + tf("through", 1), + tf("threat", 1), + tf("three", 1), + tf("foundation", 1), + tf("fourblah", 1), + tf("fourteen", 1), + tf("four", 0), + tf("fourier", 0), + tf("fourty", 0), + tf("xo", 1), + }; return keys; } public void testExactMatchHighPriority() throws Exception { - assertMatchEquals(completion.lookup(stringToCharSequence("two"), 1), - "two/1.0"); + assertMatchEquals(completion.lookup(stringToCharSequence("two"), 1), "two/1.0"); } public void testExactMatchLowPriority() throws Exception { - assertMatchEquals(completion.lookup(stringToCharSequence("one"), 2), - "one/0.0", - "oneness/1.0"); + assertMatchEquals(completion.lookup(stringToCharSequence("one"), 2), "one/0.0", "oneness/1.0"); } - + public void testExactMatchReordering() throws Exception { - // Check reordering of exact matches. - assertMatchEquals(completion.lookup(stringToCharSequence("four"), 4), + // Check reordering of exact matches. + assertMatchEquals( + completion.lookup(stringToCharSequence("four"), 4), "four/0.0", "fourblah/1.0", "fourteen/1.0", - "fourier/0.0"); + "fourier/0.0"); } public void testRequestedCount() throws Exception { // 'one' is promoted after collecting two higher ranking results. - assertMatchEquals(completion.lookup(stringToCharSequence("one"), 2), - "one/0.0", - "oneness/1.0"); + assertMatchEquals(completion.lookup(stringToCharSequence("one"), 2), "one/0.0", "oneness/1.0"); - // 'four' is collected in a bucket and then again as an exact match. - assertMatchEquals(completion.lookup(stringToCharSequence("four"), 2), - "four/0.0", - "fourblah/1.0"); + // 'four' is collected in a bucket and then again as an exact match. + assertMatchEquals( + completion.lookup(stringToCharSequence("four"), 2), "four/0.0", "fourblah/1.0"); - // Check reordering of exact matches. - assertMatchEquals(completion.lookup(stringToCharSequence("four"), 4), + // Check reordering of exact matches. + assertMatchEquals( + completion.lookup(stringToCharSequence("four"), 4), "four/0.0", "fourblah/1.0", "fourteen/1.0", "fourier/0.0"); // 'one' is at the top after collecting all alphabetical results. - assertMatchEquals(completionAlphabetical.lookup(stringToCharSequence("one"), 2), - "one/0.0", - "oneness/1.0"); - + assertMatchEquals( + completionAlphabetical.lookup(stringToCharSequence("one"), 2), "one/0.0", "oneness/1.0"); + // 'one' is not promoted after collecting two higher ranking results. FSTCompletion noPromotion = new FSTCompletion(completion.getFST(), true, false); - assertMatchEquals(noPromotion.lookup(stringToCharSequence("one"), 2), - "oneness/1.0", - "onerous/1.0"); + assertMatchEquals( + noPromotion.lookup(stringToCharSequence("one"), 2), "oneness/1.0", "onerous/1.0"); - // 'one' is at the top after collecting all alphabetical results. - assertMatchEquals(completionAlphabetical.lookup(stringToCharSequence("one"), 2), - "one/0.0", - "oneness/1.0"); + // 'one' is at the top after collecting all alphabetical results. + assertMatchEquals( + completionAlphabetical.lookup(stringToCharSequence("one"), 2), "one/0.0", "oneness/1.0"); } public void testMiss() throws Exception { @@ -137,16 +128,19 @@ public class FSTCompletionTest extends LuceneTestCase { } public void testFullMatchList() throws Exception { - assertMatchEquals(completion.lookup(stringToCharSequence("one"), Integer.MAX_VALUE), - "oneness/1.0", + assertMatchEquals( + completion.lookup(stringToCharSequence("one"), Integer.MAX_VALUE), + "oneness/1.0", "onerous/1.0", - "onesimus/1.0", + "onesimus/1.0", "one/0.0"); } public void testThreeByte() throws Exception { - String key = new String(new byte[] { - (byte) 0xF0, (byte) 0xA4, (byte) 0xAD, (byte) 0xA2}, StandardCharsets.UTF_8); + String key = + new String( + new byte[] {(byte) 0xF0, (byte) 0xA4, (byte) 0xAD, (byte) 0xA2}, + StandardCharsets.UTF_8); FSTCompletionBuilder builder = new FSTCompletionBuilder(); builder.add(new BytesRef(key), 0); @@ -158,7 +152,7 @@ public class FSTCompletionTest extends LuceneTestCase { public void testLargeInputConstantWeights() throws Exception { Directory tempDir = getDirectory(); FSTCompletionLookup lookup = new FSTCompletionLookup(tempDir, "fst", 10, true); - + Random r = random(); List keys = new ArrayList<>(); for (int i = 0; i < 5000; i++) { @@ -169,16 +163,17 @@ public class FSTCompletionTest extends LuceneTestCase { // All the weights were constant, so all returned buckets must be constant, whatever they // are. - Long previous = null; + Long previous = null; for (Input tf : keys) { - Long current = ((Number)lookup.get(TestUtil.bytesToCharSequence(tf.term, random()))).longValue(); + Long current = + ((Number) lookup.get(TestUtil.bytesToCharSequence(tf.term, random()))).longValue(); if (previous != null) { assertEquals(previous, current); } previous = current; } tempDir.close(); - } + } @Slow public void testMultilingualInput() throws Exception { @@ -189,13 +184,21 @@ public class FSTCompletionTest extends LuceneTestCase { lookup.build(new InputArrayIterator(input)); assertEquals(input.size(), lookup.getCount()); for (Input tf : input) { - assertNotNull("Not found: " + tf.term.toString(), lookup.get(TestUtil.bytesToCharSequence(tf.term, random()))); - assertEquals(tf.term.utf8ToString(), lookup.lookup(TestUtil.bytesToCharSequence(tf.term, random()), true, 1).get(0).key.toString()); + assertNotNull( + "Not found: " + tf.term.toString(), + lookup.get(TestUtil.bytesToCharSequence(tf.term, random()))); + assertEquals( + tf.term.utf8ToString(), + lookup + .lookup(TestUtil.bytesToCharSequence(tf.term, random()), true, 1) + .get(0) + .key + .toString()); } List result = lookup.lookup(stringToCharSequence("wit"), true, 5); assertEquals(5, result.size()); - assertTrue(result.get(0).key.toString().equals("wit")); // exact match. + assertTrue(result.get(0).key.toString().equals("wit")); // exact match. assertTrue(result.get(1).key.toString().equals("with")); // highest count. tempDir.close(); } @@ -209,7 +212,7 @@ public class FSTCompletionTest extends LuceneTestCase { List freqs = new ArrayList<>(); Random rnd = random(); for (int i = 0; i < 2500 + rnd.nextInt(2500); i++) { - int weight = rnd.nextInt(100); + int weight = rnd.nextInt(100); freqs.add(new Input("" + rnd.nextLong(), weight)); } @@ -234,21 +237,24 @@ public class FSTCompletionTest extends LuceneTestCase { } private void assertMatchEquals(List res, String... expected) { - String [] result = new String [res.size()]; + String[] result = new String[res.size()]; for (int i = 0; i < res.size(); i++) { result[i] = res.get(i).toString(); } if (!Arrays.equals(stripScore(expected), stripScore(result))) { int colLen = Math.max(maxLen(expected), maxLen(result)); - + StringBuilder b = new StringBuilder(); - String format = "%" + colLen + "s " + "%" + colLen + "s\n"; + String format = "%" + colLen + "s " + "%" + colLen + "s\n"; b.append(String.format(Locale.ROOT, format, "Expected", "Result")); for (int i = 0; i < Math.max(result.length, expected.length); i++) { - b.append(String.format(Locale.ROOT, format, - i < expected.length ? expected[i] : "--", - i < result.length ? result[i] : "--")); + b.append( + String.format( + Locale.ROOT, + format, + i < expected.length ? expected[i] : "--", + i < result.length ? result[i] : "--")); } System.err.println(b.toString()); @@ -257,7 +263,7 @@ public class FSTCompletionTest extends LuceneTestCase { } private String[] stripScore(String[] expected) { - String [] result = new String [expected.length]; + String[] result = new String[expected.length]; for (int i = 0; i < result.length; i++) { result[i] = expected[i].replaceAll("\\/[0-9\\.]+", ""); } @@ -266,12 +272,13 @@ public class FSTCompletionTest extends LuceneTestCase { private int maxLen(String[] result) { int len = 0; - for (String s : result) + for (String s : result) { len = Math.max(len, s.length()); + } return len; } - private Directory getDirectory() { + private Directory getDirectory() { return newDirectory(); } } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java index f225ef99c38..68a704b07e3 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java @@ -17,7 +17,6 @@ package org.apache.lucene.search.suggest.fst; import java.util.*; - import org.apache.lucene.search.suggest.Input; import org.apache.lucene.search.suggest.InputArrayIterator; import org.apache.lucene.search.suggest.Lookup.LookupResult; @@ -27,22 +26,24 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; public class WFSTCompletionTest extends LuceneTestCase { - + public void testBasic() throws Exception { - Input keys[] = new Input[] { - new Input("foo", 50), - new Input("bar", 10), - new Input("barbar", 12), - new Input("barbara", 6) - }; - + Input keys[] = + new Input[] { + new Input("foo", 50), + new Input("bar", 10), + new Input("barbar", 12), + new Input("barbara", 6) + }; + Random random = new Random(random().nextLong()); Directory tempDir = getDirectory(); WFSTCompletionLookup suggester = new WFSTCompletionLookup(tempDir, "wfst"); suggester.build(new InputArrayIterator(keys)); - + // top N of 2, but only foo is available - List results = suggester.lookup(TestUtil.stringToCharSequence("f", random), false, 2); + List results = + suggester.lookup(TestUtil.stringToCharSequence("f", random), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); @@ -58,7 +59,7 @@ public class WFSTCompletionTest extends LuceneTestCase { assertEquals(1, results.size()); assertEquals("bar", results.get(0).key.toString()); assertEquals(10, results.get(0).value, 0.01F); - + // top N Of 2 for 'b' results = suggester.lookup(TestUtil.stringToCharSequence("b", random), false, 2); assertEquals(2, results.size()); @@ -66,7 +67,7 @@ public class WFSTCompletionTest extends LuceneTestCase { assertEquals(12, results.get(0).value, 0.01F); assertEquals("bar", results.get(1).key.toString()); assertEquals(10, results.get(1).value, 0.01F); - + // top N of 3 for 'ba' results = suggester.lookup(TestUtil.stringToCharSequence("ba", random), false, 3); assertEquals(3, results.size()); @@ -84,12 +85,13 @@ public class WFSTCompletionTest extends LuceneTestCase { Directory tempDir = getDirectory(); WFSTCompletionLookup suggester = new WFSTCompletionLookup(tempDir, "wfst", true); - suggester.build(new InputArrayIterator(new Input[] { - new Input("x y", 20), - new Input("x", 2), - })); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("x y", 20), new Input("x", 2), + })); - for(int topN=1;topN<4;topN++) { + for (int topN = 1; topN < 4; topN++) { List results = suggester.lookup("x", false, topN); assertEquals(Math.min(topN, 2), results.size()); @@ -110,12 +112,13 @@ public class WFSTCompletionTest extends LuceneTestCase { Directory tempDir = getDirectory(); WFSTCompletionLookup suggester = new WFSTCompletionLookup(tempDir, "wfst", false); - suggester.build(new InputArrayIterator(new Input[] { - new Input("x y", 20), - new Input("x", 2), - })); + suggester.build( + new InputArrayIterator( + new Input[] { + new Input("x y", 20), new Input("x", 2), + })); - for(int topN=1;topN<4;topN++) { + for (int topN = 1; topN < 4; topN++) { List results = suggester.lookup("x", false, topN); assertEquals(Math.min(topN, 2), results.size()); @@ -130,15 +133,15 @@ public class WFSTCompletionTest extends LuceneTestCase { } tempDir.close(); } - + public void testRandom() throws Exception { int numWords = atLeast(1000); - - final TreeMap slowCompletor = new TreeMap<>(); + + final TreeMap slowCompletor = new TreeMap<>(); final TreeSet allPrefixes = new TreeSet<>(); - + Input[] keys = new Input[numWords]; - + for (int i = 0; i < numWords; i++) { String s; while (true) { @@ -149,13 +152,13 @@ public class WFSTCompletionTest extends LuceneTestCase { break; } } - + for (int j = 1; j < s.length(); j++) { allPrefixes.add(s.substring(0, j)); } // we can probably do Integer.MAX_VALUE here, but why worry. - int weight = random().nextInt(1<<24); - slowCompletor.put(s, (long)weight); + int weight = random().nextInt(1 << 24); + slowCompletor.put(s, (long) weight); keys[i] = new Input(s, weight); } @@ -167,38 +170,41 @@ public class WFSTCompletionTest extends LuceneTestCase { Random random = new Random(random().nextLong()); for (String prefix : allPrefixes) { final int topN = TestUtil.nextInt(random, 1, 10); - List r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random), false, topN); + List r = + suggester.lookup(TestUtil.stringToCharSequence(prefix, random), false, topN); // 2. go thru whole treemap (slowCompletor) and check it's actually the best suggestion final List matches = new ArrayList<>(); // TODO: could be faster... but it's slowCompletor for a reason - for (Map.Entry e : slowCompletor.entrySet()) { + for (Map.Entry e : slowCompletor.entrySet()) { if (e.getKey().startsWith(prefix)) { matches.add(new LookupResult(e.getKey(), e.getValue().longValue())); } } assertTrue(matches.size() > 0); - Collections.sort(matches, new Comparator() { - @Override - public int compare(LookupResult left, LookupResult right) { - int cmp = Float.compare(right.value, left.value); - if (cmp == 0) { - return left.compareTo(right); - } else { - return cmp; - } - } - }); + Collections.sort( + matches, + new Comparator() { + @Override + public int compare(LookupResult left, LookupResult right) { + int cmp = Float.compare(right.value, left.value); + if (cmp == 0) { + return left.compareTo(right); + } else { + return cmp; + } + } + }); if (matches.size() > topN) { matches.subList(topN, matches.size()).clear(); } assertEquals(matches.size(), r.size()); - for(int hit=0;hit - * Example usage: +/** + * Base class for testing tokenstream factories. + * + *

    Example usage: + * *

      *   Reader reader = new StringReader("Some Text to Analyze");
      *   reader = charFilterFactory("htmlstrip").create(reader);
    @@ -40,14 +39,19 @@ import org.apache.lucene.util.Version;
      * 
    */ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTestCase { - - private AbstractAnalysisFactory analysisFactory(Class clazz, Version matchVersion, ResourceLoader loader, String... keysAndValues) throws Exception { + + private AbstractAnalysisFactory analysisFactory( + Class clazz, + Version matchVersion, + ResourceLoader loader, + String... keysAndValues) + throws Exception { if (keysAndValues.length % 2 == 1) { throw new IllegalArgumentException("invalid keysAndValues map"); } - Map args = new HashMap<>(); + Map args = new HashMap<>(); for (int i = 0; i < keysAndValues.length; i += 2) { - String previous = args.put(keysAndValues[i], keysAndValues[i+1]); + String previous = args.put(keysAndValues[i], keysAndValues[i + 1]); assertNull("duplicate values for key: " + keysAndValues[i], previous); } if (matchVersion != null) { @@ -71,72 +75,88 @@ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTest return factory; } - /** + /** * Returns a fully initialized TokenizerFactory with the specified name and key-value arguments. - * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should - * be on the test classpath. + * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should be + * on the test classpath. */ - protected TokenizerFactory tokenizerFactory(String name, String... keysAndValues) throws Exception { + protected TokenizerFactory tokenizerFactory(String name, String... keysAndValues) + throws Exception { return tokenizerFactory(name, Version.LATEST, keysAndValues); } - /** + /** * Returns a fully initialized TokenizerFactory with the specified name and key-value arguments. - * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should - * be on the test classpath. + * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should be + * on the test classpath. */ - protected TokenizerFactory tokenizerFactory(String name, Version version, String... keysAndValues) throws Exception { + protected TokenizerFactory tokenizerFactory(String name, Version version, String... keysAndValues) + throws Exception { return tokenizerFactory(name, version, new ClasspathResourceLoader(getClass()), keysAndValues); } - - /** - * Returns a fully initialized TokenizerFactory with the specified name, version, resource loader, + + /** + * Returns a fully initialized TokenizerFactory with the specified name, version, resource loader, * and key-value arguments. */ - protected TokenizerFactory tokenizerFactory(String name, Version matchVersion, ResourceLoader loader, String... keysAndValues) throws Exception { - return (TokenizerFactory) analysisFactory(TokenizerFactory.lookupClass(name), matchVersion, loader, keysAndValues); + protected TokenizerFactory tokenizerFactory( + String name, Version matchVersion, ResourceLoader loader, String... keysAndValues) + throws Exception { + return (TokenizerFactory) + analysisFactory(TokenizerFactory.lookupClass(name), matchVersion, loader, keysAndValues); } - /** + /** * Returns a fully initialized TokenFilterFactory with the specified name and key-value arguments. - * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should - * be on the test classpath. + * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should be + * on the test classpath. */ - protected TokenFilterFactory tokenFilterFactory(String name, Version version, String... keysAndValues) throws Exception { - return tokenFilterFactory(name, version, new ClasspathResourceLoader(getClass()), keysAndValues); + protected TokenFilterFactory tokenFilterFactory( + String name, Version version, String... keysAndValues) throws Exception { + return tokenFilterFactory( + name, version, new ClasspathResourceLoader(getClass()), keysAndValues); } - /** + /** * Returns a fully initialized TokenFilterFactory with the specified name and key-value arguments. - * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should - * be on the test classpath. + * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should be + * on the test classpath. */ - protected TokenFilterFactory tokenFilterFactory(String name, String... keysAndValues) throws Exception { + protected TokenFilterFactory tokenFilterFactory(String name, String... keysAndValues) + throws Exception { return tokenFilterFactory(name, Version.LATEST, keysAndValues); } - - /** - * Returns a fully initialized TokenFilterFactory with the specified name, version, resource loader, - * and key-value arguments. + + /** + * Returns a fully initialized TokenFilterFactory with the specified name, version, resource + * loader, and key-value arguments. */ - protected TokenFilterFactory tokenFilterFactory(String name, Version matchVersion, ResourceLoader loader, String... keysAndValues) throws Exception { - return (TokenFilterFactory) analysisFactory(TokenFilterFactory.lookupClass(name), matchVersion, loader, keysAndValues); + protected TokenFilterFactory tokenFilterFactory( + String name, Version matchVersion, ResourceLoader loader, String... keysAndValues) + throws Exception { + return (TokenFilterFactory) + analysisFactory(TokenFilterFactory.lookupClass(name), matchVersion, loader, keysAndValues); } - - /** + + /** * Returns a fully initialized CharFilterFactory with the specified name and key-value arguments. - * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should - * be on the test classpath. + * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should be + * on the test classpath. */ - protected CharFilterFactory charFilterFactory(String name, String... keysAndValues) throws Exception { - return charFilterFactory(name, Version.LATEST, new ClasspathResourceLoader(getClass()), keysAndValues); + protected CharFilterFactory charFilterFactory(String name, String... keysAndValues) + throws Exception { + return charFilterFactory( + name, Version.LATEST, new ClasspathResourceLoader(getClass()), keysAndValues); } - - /** - * Returns a fully initialized CharFilterFactory with the specified name, version, resource loader, - * and key-value arguments. + + /** + * Returns a fully initialized CharFilterFactory with the specified name, version, resource + * loader, and key-value arguments. */ - protected CharFilterFactory charFilterFactory(String name, Version matchVersion, ResourceLoader loader, String... keysAndValues) throws Exception { - return (CharFilterFactory) analysisFactory(CharFilterFactory.lookupClass(name), matchVersion, loader, keysAndValues); + protected CharFilterFactory charFilterFactory( + String name, Version matchVersion, ResourceLoader loader, String... keysAndValues) + throws Exception { + return (CharFilterFactory) + analysisFactory(CharFilterFactory.lookupClass(name), matchVersion, loader, keysAndValues); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java index fb9004a2340..0485bd39cb8 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java @@ -27,7 +27,6 @@ import java.nio.file.Files; import java.nio.file.Paths; import java.util.*; import java.util.concurrent.CountDownLatch; - import org.apache.lucene.analysis.tokenattributes.*; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -54,11 +53,10 @@ import org.apache.lucene.util.fst.Util; /** * Base class for all Lucene unit tests that use TokenStreams. - *

    - * When writing unit tests for analysis components, it's highly recommended - * to use the helper methods here (especially in conjunction with {@link MockAnalyzer} or - * {@link MockTokenizer}), as they contain many assertions and checks to - * catch bugs. + * + *

    When writing unit tests for analysis components, it's highly recommended to use the helper + * methods here (especially in conjunction with {@link MockAnalyzer} or {@link MockTokenizer}), as + * they contain many assertions and checks to catch bugs. * * @see MockAnalyzer * @see MockTokenizer @@ -67,18 +65,19 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { // some helpers to test Analyzers and TokenStreams: /** - * Attribute that records if it was cleared or not. This is used - * for testing that clearAttributes() was called correctly. + * Attribute that records if it was cleared or not. This is used for testing that + * clearAttributes() was called correctly. */ public static interface CheckClearAttributesAttribute extends Attribute { boolean getAndResetClearCalled(); } /** - * Attribute that records if it was cleared or not. This is used - * for testing that clearAttributes() was called correctly. + * Attribute that records if it was cleared or not. This is used for testing that + * clearAttributes() was called correctly. */ - public static final class CheckClearAttributesAttributeImpl extends AttributeImpl implements CheckClearAttributesAttribute { + public static final class CheckClearAttributesAttributeImpl extends AttributeImpl + implements CheckClearAttributesAttribute { private boolean clearCalled = false; @Override @@ -97,10 +96,8 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { @Override public boolean equals(Object other) { - return ( - other instanceof CheckClearAttributesAttributeImpl && - ((CheckClearAttributesAttributeImpl) other).clearCalled == this.clearCalled - ); + return (other instanceof CheckClearAttributesAttributeImpl + && ((CheckClearAttributesAttributeImpl) other).clearCalled == this.clearCalled); } @Override @@ -123,11 +120,24 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { // - graph offsets are correct (all tokens leaving from // pos X have the same startOffset; all tokens // arriving to pos Y have the same endOffset) - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], - int posLengths[], Integer finalOffset, Integer finalPosInc, boolean[] keywordAtts, - boolean graphOffsetsAreCorrect, byte[][] payloads, int[] flags) throws IOException { + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int posLengths[], + Integer finalOffset, + Integer finalPosInc, + boolean[] keywordAtts, + boolean graphOffsetsAreCorrect, + byte[][] payloads, + int[] flags) + throws IOException { assertNotNull(output); - CheckClearAttributesAttribute checkClearAtt = ts.addAttribute(CheckClearAttributesAttribute.class); + CheckClearAttributesAttribute checkClearAtt = + ts.addAttribute(CheckClearAttributesAttribute.class); CharTermAttribute termAtt = null; if (output.length > 0) { @@ -149,7 +159,8 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { PositionIncrementAttribute posIncrAtt = null; if (posIncrements != null || finalPosInc != null) { - assertTrue("has no PositionIncrementAttribute", ts.hasAttribute(PositionIncrementAttribute.class)); + assertTrue( + "has no PositionIncrementAttribute", ts.hasAttribute(PositionIncrementAttribute.class)); posIncrAtt = ts.getAttribute(PositionIncrementAttribute.class); } @@ -178,10 +189,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } // Maps position to the start/end offset: - final Map posToStartOffset = new HashMap<>(); - final Map posToEndOffset = new HashMap<>(); + final Map posToStartOffset = new HashMap<>(); + final Map posToEndOffset = new HashMap<>(); - // TODO: would be nice to be able to assert silly duplicated tokens are not created, but a number of cases do this "legitimately": LUCENE-7622 + // TODO: would be nice to be able to assert silly duplicated tokens are not created, but a + // number of cases do this "legitimately": LUCENE-7622 ts.reset(); int pos = -1; @@ -190,21 +202,25 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { // extra safety to enforce, that the state is not preserved and also assign bogus values ts.clearAttributes(); termAtt.setEmpty().append("bogusTerm"); - if (offsetAtt != null) offsetAtt.setOffset(14584724,24683243); + if (offsetAtt != null) offsetAtt.setOffset(14584724, 24683243); if (typeAtt != null) typeAtt.setType("bogusType"); if (posIncrAtt != null) posIncrAtt.setPositionIncrement(45987657); if (posLengthAtt != null) posLengthAtt.setPositionLength(45987653); - if (keywordAtt != null) keywordAtt.setKeyword((i&1) == 0); - if (payloadAtt != null) payloadAtt.setPayload(new BytesRef(new byte[] { 0x00, -0x21, 0x12, -0x43, 0x24 })); + if (keywordAtt != null) keywordAtt.setKeyword((i & 1) == 0); + if (payloadAtt != null) + payloadAtt.setPayload(new BytesRef(new byte[] {0x00, -0x21, 0x12, -0x43, 0x24})); if (flagsAtt != null) flagsAtt.setFlags(~0); // all 1's checkClearAtt.getAndResetClearCalled(); // reset it, because we called clearAttribute() before - assertTrue("token "+i+" does not exist", ts.incrementToken()); - assertTrue("clearAttributes() was not called correctly in TokenStream chain at token " + i, checkClearAtt.getAndResetClearCalled()); + assertTrue("token " + i + " does not exist", ts.incrementToken()); + assertTrue( + "clearAttributes() was not called correctly in TokenStream chain at token " + i, + checkClearAtt.getAndResetClearCalled()); - assertEquals("term "+i, output[i], termAtt.toString()); + assertEquals("term " + i, output[i], termAtt.toString()); if (startOffsets != null) { - assertEquals("startOffset " + i + " term=" + termAtt, startOffsets[i], offsetAtt.startOffset()); + assertEquals( + "startOffset " + i + " term=" + termAtt, startOffsets[i], offsetAtt.startOffset()); } if (endOffsets != null) { assertEquals("endOffset " + i + " term=" + termAtt, endOffsets[i], offsetAtt.endOffset()); @@ -213,13 +229,18 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { assertEquals("type " + i + " term=" + termAtt, types[i], typeAtt.type()); } if (posIncrements != null) { - assertEquals("posIncrement " + i + " term=" + termAtt, posIncrements[i], posIncrAtt.getPositionIncrement()); + assertEquals( + "posIncrement " + i + " term=" + termAtt, + posIncrements[i], + posIncrAtt.getPositionIncrement()); } if (posLengths != null) { - assertEquals("posLength " + i + " term=" + termAtt, posLengths[i], posLengthAtt.getPositionLength()); + assertEquals( + "posLength " + i + " term=" + termAtt, posLengths[i], posLengthAtt.getPositionLength()); } if (keywordAtts != null) { - assertEquals("keywordAtt " + i + " term=" + termAtt, keywordAtts[i], keywordAtt.isKeyword()); + assertEquals( + "keywordAtt " + i + " term=" + termAtt, keywordAtts[i], keywordAtt.isKeyword()); } if (flagsAtt != null) { assertEquals("flagsAtt " + i + " term=" + termAtt, flags[i], flagsAtt.getFlags()); @@ -239,19 +260,41 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } } if (posLengthAtt != null) { - assertTrue("posLength must be >= 1; got: " + posLengthAtt.getPositionLength(), posLengthAtt.getPositionLength() >= 1); + assertTrue( + "posLength must be >= 1; got: " + posLengthAtt.getPositionLength(), + posLengthAtt.getPositionLength() >= 1); } // we can enforce some basic things about a few attributes even if the caller doesn't check: if (offsetAtt != null) { final int startOffset = offsetAtt.startOffset(); final int endOffset = offsetAtt.endOffset(); if (finalOffset != null) { - assertTrue("startOffset (= " + startOffset + ") must be <= finalOffset (= " + finalOffset + ") term=" + termAtt, startOffset <= finalOffset.intValue()); - assertTrue("endOffset must be <= finalOffset: got endOffset=" + endOffset + " vs finalOffset=" + finalOffset.intValue() + " term=" + termAtt, - endOffset <= finalOffset.intValue()); + assertTrue( + "startOffset (= " + + startOffset + + ") must be <= finalOffset (= " + + finalOffset + + ") term=" + + termAtt, + startOffset <= finalOffset.intValue()); + assertTrue( + "endOffset must be <= finalOffset: got endOffset=" + + endOffset + + " vs finalOffset=" + + finalOffset.intValue() + + " term=" + + termAtt, + endOffset <= finalOffset.intValue()); } - assertTrue("offsets must not go backwards startOffset=" + startOffset + " is < lastStartOffset=" + lastStartOffset + " term=" + termAtt, offsetAtt.startOffset() >= lastStartOffset); + assertTrue( + "offsets must not go backwards startOffset=" + + startOffset + + " is < lastStartOffset=" + + lastStartOffset + + " term=" + + termAtt, + offsetAtt.startOffset() >= lastStartOffset); lastStartOffset = offsetAtt.startOffset(); if (graphOffsetsAreCorrect && posLengthAtt != null && posIncrAtt != null) { @@ -267,12 +310,21 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { if (!posToStartOffset.containsKey(pos)) { // First time we've seen a token leaving from this position: posToStartOffset.put(pos, startOffset); - //System.out.println(" + s " + pos + " -> " + startOffset); + // System.out.println(" + s " + pos + " -> " + startOffset); } else { // We've seen a token leaving from this position // before; verify the startOffset is the same: - //System.out.println(" + vs " + pos + " -> " + startOffset); - assertEquals(i + " inconsistent startOffset: pos=" + pos + " posLen=" + posLength + " token=" + termAtt, posToStartOffset.get(pos).intValue(), startOffset); + // System.out.println(" + vs " + pos + " -> " + startOffset); + assertEquals( + i + + " inconsistent startOffset: pos=" + + pos + + " posLen=" + + posLength + + " token=" + + termAtt, + posToStartOffset.get(pos).intValue(), + startOffset); } final int endPos = pos + posLength; @@ -280,36 +332,53 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { if (!posToEndOffset.containsKey(endPos)) { // First time we've seen a token arriving to this position: posToEndOffset.put(endPos, endOffset); - //System.out.println(" + e " + endPos + " -> " + endOffset); + // System.out.println(" + e " + endPos + " -> " + endOffset); } else { // We've seen a token arriving to this position // before; verify the endOffset is the same: - //System.out.println(" + ve " + endPos + " -> " + endOffset); - assertEquals("inconsistent endOffset " + i + " pos=" + pos + " posLen=" + posLength + " token=" + termAtt, posToEndOffset.get(endPos).intValue(), endOffset); + // System.out.println(" + ve " + endPos + " -> " + endOffset); + assertEquals( + "inconsistent endOffset " + + i + + " pos=" + + pos + + " posLen=" + + posLength + + " token=" + + termAtt, + posToEndOffset.get(endPos).intValue(), + endOffset); } } } } if (ts.incrementToken()) { - fail("TokenStream has more tokens than expected (expected count=" + output.length + "); extra token=" + ts.getAttribute(CharTermAttribute.class)); + fail( + "TokenStream has more tokens than expected (expected count=" + + output.length + + "); extra token=" + + ts.getAttribute(CharTermAttribute.class)); } // repeat our extra safety checks for end() ts.clearAttributes(); if (termAtt != null) termAtt.setEmpty().append("bogusTerm"); - if (offsetAtt != null) offsetAtt.setOffset(14584724,24683243); + if (offsetAtt != null) offsetAtt.setOffset(14584724, 24683243); if (typeAtt != null) typeAtt.setType("bogusType"); if (posIncrAtt != null) posIncrAtt.setPositionIncrement(45987657); if (posLengthAtt != null) posLengthAtt.setPositionLength(45987653); if (keywordAtt != null) keywordAtt.setKeyword(true); - if (payloadAtt != null) payloadAtt.setPayload(new BytesRef(new byte[] { 0x00, -0x21, 0x12, -0x43, 0x24 })); + if (payloadAtt != null) + payloadAtt.setPayload(new BytesRef(new byte[] {0x00, -0x21, 0x12, -0x43, 0x24})); if (flagsAtt != null) flagsAtt.setFlags(~0); // all 1's checkClearAtt.getAndResetClearCalled(); // reset it, because we called clearAttribute() before ts.end(); - assertTrue("super.end()/clearAttributes() was not called correctly in end()", checkClearAtt.getAndResetClearCalled()); + assertTrue( + "super.end()/clearAttributes() was not called correctly in end()", + checkClearAtt.getAndResetClearCalled()); if (finalOffset != null) { assertEquals("finalOffset", finalOffset.intValue(), offsetAtt.endOffset()); @@ -324,118 +393,342 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { ts.close(); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], - int posLengths[], Integer finalOffset, boolean[] keywordAtts, - boolean graphOffsetsAreCorrect) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, posLengths, finalOffset, null, keywordAtts, graphOffsetsAreCorrect, null, null); - } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], - int posLengths[], Integer finalOffset, Integer finalPosInc, boolean[] keywordAtts, - boolean graphOffsetsAreCorrect, byte[][] payloads) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, posLengths, finalOffset, finalPosInc, keywordAtts, graphOffsetsAreCorrect, payloads, null); + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int posLengths[], + Integer finalOffset, + boolean[] keywordAtts, + boolean graphOffsetsAreCorrect) + throws IOException { + assertTokenStreamContents( + ts, + output, + startOffsets, + endOffsets, + types, + posIncrements, + posLengths, + finalOffset, + null, + keywordAtts, + graphOffsetsAreCorrect, + null, + null); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[], Integer finalOffset, boolean graphOffsetsAreCorrect) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, posLengths, finalOffset, null, graphOffsetsAreCorrect); + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int posLengths[], + Integer finalOffset, + Integer finalPosInc, + boolean[] keywordAtts, + boolean graphOffsetsAreCorrect, + byte[][] payloads) + throws IOException { + assertTokenStreamContents( + ts, + output, + startOffsets, + endOffsets, + types, + posIncrements, + posLengths, + finalOffset, + finalPosInc, + keywordAtts, + graphOffsetsAreCorrect, + payloads, + null); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[], Integer finalOffset) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, posLengths, finalOffset, true); + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int posLengths[], + Integer finalOffset, + boolean graphOffsetsAreCorrect) + throws IOException { + assertTokenStreamContents( + ts, + output, + startOffsets, + endOffsets, + types, + posIncrements, + posLengths, + finalOffset, + null, + graphOffsetsAreCorrect); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], Integer finalOffset) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, null, finalOffset); + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int posLengths[], + Integer finalOffset) + throws IOException { + assertTokenStreamContents( + ts, output, startOffsets, endOffsets, types, posIncrements, posLengths, finalOffset, true); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, null, null); + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + Integer finalOffset) + throws IOException { + assertTokenStreamContents( + ts, output, startOffsets, endOffsets, types, posIncrements, null, finalOffset); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int[] posLengths) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, posLengths, null); + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[]) + throws IOException { + assertTokenStreamContents( + ts, output, startOffsets, endOffsets, types, posIncrements, null, null); + } + + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int[] posLengths) + throws IOException { + assertTokenStreamContents( + ts, output, startOffsets, endOffsets, types, posIncrements, posLengths, null); } public static void assertTokenStreamContents(TokenStream ts, String[] output) throws IOException { assertTokenStreamContents(ts, output, null, null, null, null, null, null); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, String[] types) throws IOException { + public static void assertTokenStreamContents(TokenStream ts, String[] output, String[] types) + throws IOException { assertTokenStreamContents(ts, output, null, null, types, null, null, null); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int[] posIncrements) throws IOException { + public static void assertTokenStreamContents(TokenStream ts, String[] output, int[] posIncrements) + throws IOException { assertTokenStreamContents(ts, output, null, null, null, posIncrements, null, null); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[]) throws IOException { + public static void assertTokenStreamContents( + TokenStream ts, String[] output, int startOffsets[], int endOffsets[]) throws IOException { assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null, null); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], Integer finalOffset) throws IOException { + public static void assertTokenStreamContents( + TokenStream ts, String[] output, int startOffsets[], int endOffsets[], Integer finalOffset) + throws IOException { assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null, finalOffset); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, null, null); + public static void assertTokenStreamContents( + TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) + throws IOException { + assertTokenStreamContents( + ts, output, startOffsets, endOffsets, null, posIncrements, null, null); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements, Integer finalOffset) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, null, finalOffset); + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + int[] posIncrements, + Integer finalOffset) + throws IOException { + assertTokenStreamContents( + ts, output, startOffsets, endOffsets, null, posIncrements, null, finalOffset); } - public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements, int[] posLengths, Integer finalOffset) throws IOException { - assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, posLengths, finalOffset); + public static void assertTokenStreamContents( + TokenStream ts, + String[] output, + int startOffsets[], + int endOffsets[], + int[] posIncrements, + int[] posLengths, + Integer finalOffset) + throws IOException { + assertTokenStreamContents( + ts, output, startOffsets, endOffsets, null, posIncrements, posLengths, finalOffset); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException { - assertTokenStreamContents(a.tokenStream("dummy", input), output, startOffsets, endOffsets, types, posIncrements, null, input.length()); + public static void assertAnalyzesTo( + Analyzer a, + String input, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[]) + throws IOException { + assertTokenStreamContents( + a.tokenStream("dummy", input), + output, + startOffsets, + endOffsets, + types, + posIncrements, + null, + input.length()); checkResetException(a, input); checkAnalysisConsistency(random(), a, true, input); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[]) throws IOException { - assertTokenStreamContents(a.tokenStream("dummy", input), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.length()); + public static void assertAnalyzesTo( + Analyzer a, + String input, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int posLengths[]) + throws IOException { + assertTokenStreamContents( + a.tokenStream("dummy", input), + output, + startOffsets, + endOffsets, + types, + posIncrements, + posLengths, + input.length()); checkResetException(a, input); checkAnalysisConsistency(random(), a, true, input); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[], boolean graphOffsetsAreCorrect) throws IOException { - assertTokenStreamContents(a.tokenStream("dummy", input), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.length(), graphOffsetsAreCorrect); + public static void assertAnalyzesTo( + Analyzer a, + String input, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int posLengths[], + boolean graphOffsetsAreCorrect) + throws IOException { + assertTokenStreamContents( + a.tokenStream("dummy", input), + output, + startOffsets, + endOffsets, + types, + posIncrements, + posLengths, + input.length(), + graphOffsetsAreCorrect); checkResetException(a, input); checkAnalysisConsistency(random(), a, true, input, graphOffsetsAreCorrect); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[], boolean graphOffsetsAreCorrect, byte[][] payloads) throws IOException { - assertTokenStreamContents(a.tokenStream("dummy", input), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.length(), null, null, graphOffsetsAreCorrect, payloads); + public static void assertAnalyzesTo( + Analyzer a, + String input, + String[] output, + int startOffsets[], + int endOffsets[], + String types[], + int posIncrements[], + int posLengths[], + boolean graphOffsetsAreCorrect, + byte[][] payloads) + throws IOException { + assertTokenStreamContents( + a.tokenStream("dummy", input), + output, + startOffsets, + endOffsets, + types, + posIncrements, + posLengths, + input.length(), + null, + null, + graphOffsetsAreCorrect, + payloads); checkResetException(a, input); checkAnalysisConsistency(random(), a, true, input, graphOffsetsAreCorrect); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output) throws IOException { + public static void assertAnalyzesTo(Analyzer a, String input, String[] output) + throws IOException { assertAnalyzesTo(a, input, output, null, null, null, null, null); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output, String[] types) throws IOException { + public static void assertAnalyzesTo(Analyzer a, String input, String[] output, String[] types) + throws IOException { assertAnalyzesTo(a, input, output, null, null, types, null, null); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int[] posIncrements) throws IOException { + public static void assertAnalyzesTo( + Analyzer a, String input, String[] output, int[] posIncrements) throws IOException { assertAnalyzesTo(a, input, output, null, null, null, posIncrements, null); } - public static void assertAnalyzesToPositions(Analyzer a, String input, String[] output, int[] posIncrements, int[] posLengths) throws IOException { + public static void assertAnalyzesToPositions( + Analyzer a, String input, String[] output, int[] posIncrements, int[] posLengths) + throws IOException { assertAnalyzesTo(a, input, output, null, null, null, posIncrements, posLengths); } - public static void assertAnalyzesToPositions(Analyzer a, String input, String[] output, String[] types, int[] posIncrements, int[] posLengths) throws IOException { + public static void assertAnalyzesToPositions( + Analyzer a, + String input, + String[] output, + String[] types, + int[] posIncrements, + int[] posLengths) + throws IOException { assertAnalyzesTo(a, input, output, null, null, types, posIncrements, posLengths); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[]) throws IOException { + public static void assertAnalyzesTo( + Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[]) + throws IOException { assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, null, null); } - public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) throws IOException { + public static void assertAnalyzesTo( + Analyzer a, + String input, + String[] output, + int startOffsets[], + int endOffsets[], + int[] posIncrements) + throws IOException { assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, posIncrements, null); } @@ -443,7 +736,7 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { TokenStream ts = a.tokenStream("bogus", input); try { if (ts.incrementToken()) { - //System.out.println(ts.reflectAsString(false)); + // System.out.println(ts.reflectAsString(false)); fail("didn't get expected exception when reset() not called"); } } catch (IllegalStateException expected) { @@ -454,7 +747,7 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } finally { // consume correctly ts.reset(); - while (ts.incrementToken()) { } + while (ts.incrementToken()) {} ts.end(); ts.close(); } @@ -476,30 +769,39 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { // simple utility method for testing stemmers - public static void checkOneTerm(Analyzer a, final String input, final String expected) throws IOException { - assertAnalyzesTo(a, input, new String[]{expected}); + public static void checkOneTerm(Analyzer a, final String input, final String expected) + throws IOException { + assertAnalyzesTo(a, input, new String[] {expected}); } - /** utility method for blasting tokenstreams with data to make sure they don't do anything crazy */ + /** + * utility method for blasting tokenstreams with data to make sure they don't do anything crazy + */ public static void checkRandomData(Random random, Analyzer a, int iterations) throws IOException { checkRandomData(random, a, iterations, 20, false, true); } - /** utility method for blasting tokenstreams with data to make sure they don't do anything crazy */ - public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength) throws IOException { + /** + * utility method for blasting tokenstreams with data to make sure they don't do anything crazy + */ + public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength) + throws IOException { checkRandomData(random, a, iterations, maxWordLength, false, true); } /** * utility method for blasting tokenstreams with data to make sure they don't do anything crazy + * * @param simple true if only ascii strings will be used (try to avoid) */ - public static void checkRandomData(Random random, Analyzer a, int iterations, boolean simple) throws IOException { + public static void checkRandomData(Random random, Analyzer a, int iterations, boolean simple) + throws IOException { checkRandomData(random, a, iterations, 20, simple, true); } /** Asserts that the given stream has expected number of tokens. */ - public static void assertStreamHasNumberOfTokens(TokenStream ts, int expectedCount) throws IOException { + public static void assertStreamHasNumberOfTokens(TokenStream ts, int expectedCount) + throws IOException { ts.reset(); int count = 0; while (ts.incrementToken()) { @@ -525,7 +827,16 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { // interact)... so this is just "best effort": public boolean failed; - AnalysisThread(long seed, CountDownLatch latch, Analyzer a, int iterations, int maxWordLength, boolean useCharFilter, boolean simple, boolean graphOffsetsAreCorrect, RandomIndexWriter iw) { + AnalysisThread( + long seed, + CountDownLatch latch, + Analyzer a, + int iterations, + int maxWordLength, + boolean useCharFilter, + boolean simple, + boolean graphOffsetsAreCorrect, + RandomIndexWriter iw) { this.seed = seed; this.a = a; this.iterations = iterations; @@ -544,7 +855,15 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { latch.await(); // see the part in checkRandomData where it replays the same text again // to verify reproducability/reuse: hopefully this would catch thread hazards. - checkRandomData(new Random(seed), a, iterations, maxWordLength, useCharFilter, simple, graphOffsetsAreCorrect, iw); + checkRandomData( + new Random(seed), + a, + iterations, + maxWordLength, + useCharFilter, + simple, + graphOffsetsAreCorrect, + iw); success = true; } catch (Exception e) { Rethrow.rethrow(e); @@ -552,19 +871,28 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { failed = !success; } } - }; + } - public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength, boolean simple) throws IOException { + public static void checkRandomData( + Random random, Analyzer a, int iterations, int maxWordLength, boolean simple) + throws IOException { checkRandomData(random, a, iterations, maxWordLength, simple, true); } - public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength, boolean simple, boolean graphOffsetsAreCorrect) throws IOException { + public static void checkRandomData( + Random random, + Analyzer a, + int iterations, + int maxWordLength, + boolean simple, + boolean graphOffsetsAreCorrect) + throws IOException { checkResetException(a, "best effort"); long seed = random.nextLong(); boolean useCharFilter = random.nextBoolean(); Directory dir = null; RandomIndexWriter iw = null; - final String postingsFormat = TestUtil.getPostingsFormat("dummy"); + final String postingsFormat = TestUtil.getPostingsFormat("dummy"); boolean codecOk = iterations * maxWordLength < 100000 && !(postingsFormat.equals("SimpleText")); if (rarely(random) && codecOk) { dir = newFSDirectory(createTempDir("bttc")); @@ -572,14 +900,33 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } boolean success = false; try { - checkRandomData(new Random(seed), a, iterations, maxWordLength, useCharFilter, simple, graphOffsetsAreCorrect, iw); - // now test with multiple threads: note we do the EXACT same thing we did before in each thread, + checkRandomData( + new Random(seed), + a, + iterations, + maxWordLength, + useCharFilter, + simple, + graphOffsetsAreCorrect, + iw); + // now test with multiple threads: note we do the EXACT same thing we did before in each + // thread, // so this should only really fail from another thread if it's an actual thread problem int numThreads = TestUtil.nextInt(random, 2, 4); final CountDownLatch startingGun = new CountDownLatch(1); AnalysisThread threads[] = new AnalysisThread[numThreads]; for (int i = 0; i < threads.length; i++) { - threads[i] = new AnalysisThread(seed, startingGun, a, iterations, maxWordLength, useCharFilter, simple, graphOffsetsAreCorrect, iw); + threads[i] = + new AnalysisThread( + seed, + startingGun, + a, + iterations, + maxWordLength, + useCharFilter, + simple, + graphOffsetsAreCorrect, + iw); } for (int i = 0; i < threads.length; i++) { threads[i].start(); @@ -610,7 +957,16 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } } - private static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength, boolean useCharFilter, boolean simple, boolean graphOffsetsAreCorrect, RandomIndexWriter iw) throws IOException { + private static void checkRandomData( + Random random, + Analyzer a, + int iterations, + int maxWordLength, + boolean useCharFilter, + boolean simple, + boolean graphOffsetsAreCorrect, + RandomIndexWriter iw) + throws IOException { Document doc = null; Field field = null, currentField = null; @@ -629,10 +985,16 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { if (random.nextBoolean()) { ft.setOmitNorms(true); } - switch(random.nextInt(4)) { - case 0: ft.setIndexOptions(IndexOptions.DOCS); break; - case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break; - case 2: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break; + switch (random.nextInt(4)) { + case 0: + ft.setIndexOptions(IndexOptions.DOCS); + break; + case 1: + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + break; + case 2: + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + break; default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); } @@ -644,7 +1006,8 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { String text = TestUtil.randomAnalysisString(random, maxWordLength, simple); try { - checkAnalysisConsistency(random, a, useCharFilter, text, graphOffsetsAreCorrect, currentField); + checkAnalysisConsistency( + random, a, useCharFilter, text, graphOffsetsAreCorrect, currentField); if (iw != null) { if (random.nextInt(7) == 0) { // pile up a multivalued field @@ -664,7 +1027,8 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } catch (Throwable t) { // TODO: really we should pass a random seed to // checkAnalysisConsistency then print it here too: - System.err.println("TEST FAIL: useCharFilter=" + useCharFilter + " text='" + escape(text) + "'"); + System.err.println( + "TEST FAIL: useCharFilter=" + useCharFilter + " text='" + escape(text) + "'"); Rethrow.rethrow(t); } } @@ -699,23 +1063,37 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { return sb.toString(); } - public static void checkAnalysisConsistency(Random random, Analyzer a, boolean useCharFilter, String text) throws IOException { + public static void checkAnalysisConsistency( + Random random, Analyzer a, boolean useCharFilter, String text) throws IOException { checkAnalysisConsistency(random, a, useCharFilter, text, true); } - public static void checkAnalysisConsistency(Random random, Analyzer a, boolean useCharFilter, String text, boolean graphOffsetsAreCorrect) throws IOException { + public static void checkAnalysisConsistency( + Random random, Analyzer a, boolean useCharFilter, String text, boolean graphOffsetsAreCorrect) + throws IOException { checkAnalysisConsistency(random, a, useCharFilter, text, graphOffsetsAreCorrect, null); } - private static void checkAnalysisConsistency(Random random, Analyzer a, boolean useCharFilter, String text, boolean graphOffsetsAreCorrect, Field field) throws IOException { + private static void checkAnalysisConsistency( + Random random, + Analyzer a, + boolean useCharFilter, + String text, + boolean graphOffsetsAreCorrect, + Field field) + throws IOException { if (VERBOSE) { - System.out.println(Thread.currentThread().getName() + ": NOTE: BaseTokenStreamTestCase: get first token stream now text=" + text); + System.out.println( + Thread.currentThread().getName() + + ": NOTE: BaseTokenStreamTestCase: get first token stream now text=" + + text); } int remainder = random.nextInt(10); Reader reader = new StringReader(text); - TokenStream ts = a.tokenStream("dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader); + TokenStream ts = + a.tokenStream("dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader); CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class); OffsetAttribute offsetAtt = ts.getAttribute(OffsetAttribute.class); PositionIncrementAttribute posIncAtt = ts.getAttribute(PositionIncrementAttribute.class); @@ -755,12 +1133,14 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { final int evilness = random.nextInt(50); if (evilness == 17) { if (VERBOSE) { - System.out.println(Thread.currentThread().getName() + ": NOTE: BaseTokenStreamTestCase: re-run analysis w/ exception"); + System.out.println( + Thread.currentThread().getName() + + ": NOTE: BaseTokenStreamTestCase: re-run analysis w/ exception"); } // Throw an errant exception from the Reader: MockReaderWrapper evilReader = new MockReaderWrapper(random, new StringReader(text)); - evilReader.throwExcAfterChar(random.nextInt(text.length()+1)); + evilReader.throwExcAfterChar(random.nextInt(text.length() + 1)); reader = evilReader; try { @@ -773,9 +1153,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { // currently allow it, so, we must call // a.tokenStream inside the try since we may // hit the exc on init: - ts = a.tokenStream("dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader); + ts = + a.tokenStream( + "dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader); ts.reset(); - while (ts.incrementToken()); + while (ts.incrementToken()) {} fail("did not hit exception"); } catch (RuntimeException re) { assertTrue(MockReaderWrapper.isMyEvilException(re)); @@ -796,13 +1178,21 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { // Only consume a subset of the tokens: final int numTokensToRead = random.nextInt(tokens.size()); if (VERBOSE) { - System.out.println(Thread.currentThread().getName() + ": NOTE: BaseTokenStreamTestCase: re-run analysis, only consuming " + numTokensToRead + " of " + tokens.size() + " tokens"); + System.out.println( + Thread.currentThread().getName() + + ": NOTE: BaseTokenStreamTestCase: re-run analysis, only consuming " + + numTokensToRead + + " of " + + tokens.size() + + " tokens"); } reader = new StringReader(text); - ts = a.tokenStream("dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader); + ts = + a.tokenStream( + "dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader); ts.reset(); - for(int tokenCount=0;tokenCount stringsList = new ArrayList<>(strings); Collections.sort(stringsList); StringBuilder b = new StringBuilder(); - for(String s : stringsList) { + for (String s : stringsList) { b.append(" "); b.append(s); b.append('\n'); @@ -995,15 +1397,17 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } /** - * Enumerates all accepted strings in the token graph created by the analyzer on the provided text, and then - * asserts that it's equal to the expected strings. - * Uses {@link TokenStreamToAutomaton} to create an automaton. Asserts the finite strings of the automaton are all - * and only the given valid strings. + * Enumerates all accepted strings in the token graph created by the analyzer on the provided + * text, and then asserts that it's equal to the expected strings. Uses {@link + * TokenStreamToAutomaton} to create an automaton. Asserts the finite strings of the automaton are + * all and only the given valid strings. + * * @param analyzer analyzer containing the SynonymFilter under test. * @param text text to be analyzed. * @param expectedStrings all expected finite strings. */ - public static void assertGraphStrings(Analyzer analyzer, String text, String... expectedStrings) throws IOException { + public static void assertGraphStrings(Analyzer analyzer, String text, String... expectedStrings) + throws IOException { checkAnalysisConsistency(random(), analyzer, true, text, true); try (TokenStream tokenStream = analyzer.tokenStream("dummy", text)) { assertGraphStrings(tokenStream, expectedStrings); @@ -1011,9 +1415,11 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } /** - * Enumerates all accepted strings in the token graph created by the already initialized {@link TokenStream}. + * Enumerates all accepted strings in the token graph created by the already initialized {@link + * TokenStream}. */ - public static void assertGraphStrings(TokenStream tokenStream, String... expectedStrings) throws IOException { + public static void assertGraphStrings(TokenStream tokenStream, String... expectedStrings) + throws IOException { Automaton automaton = new TokenStreamToAutomaton().toAutomaton(tokenStream); Set actualStringPaths = AutomatonTestUtil.getFiniteStringsRecursive(automaton, -1); @@ -1021,40 +1427,66 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { BytesRefBuilder scratchBytesRefBuilder = new BytesRefBuilder(); Set actualStrings = new HashSet<>(); - for (IntsRef ir: actualStringPaths) { - actualStrings.add(Util.toBytesRef(ir, scratchBytesRefBuilder).utf8ToString().replace((char) TokenStreamToAutomaton.POS_SEP, ' ')); + for (IntsRef ir : actualStringPaths) { + actualStrings.add( + Util.toBytesRef(ir, scratchBytesRefBuilder) + .utf8ToString() + .replace((char) TokenStreamToAutomaton.POS_SEP, ' ')); } for (String s : actualStrings) { - assertTrue("Analyzer created unexpected string path: " + s + "\nexpected:\n" + toString(expectedStringsSet) + "\nactual:\n" + toString(actualStrings), expectedStringsSet.contains(s)); + assertTrue( + "Analyzer created unexpected string path: " + + s + + "\nexpected:\n" + + toString(expectedStringsSet) + + "\nactual:\n" + + toString(actualStrings), + expectedStringsSet.contains(s)); } for (String s : expectedStrings) { - assertTrue("Analyzer created unexpected string path: " + s + "\nexpected:\n" + toString(expectedStringsSet) + "\nactual:\n" + toString(actualStrings), actualStrings.contains(s)); + assertTrue( + "Analyzer created unexpected string path: " + + s + + "\nexpected:\n" + + toString(expectedStringsSet) + + "\nactual:\n" + + toString(actualStrings), + actualStrings.contains(s)); } } - /** Returns all paths accepted by the token stream graph produced by analyzing text with the provided analyzer. The tokens {@link - * CharTermAttribute} values are concatenated, and separated with space. */ + /** + * Returns all paths accepted by the token stream graph produced by analyzing text with the + * provided analyzer. The tokens {@link CharTermAttribute} values are concatenated, and separated + * with space. + */ public static Set getGraphStrings(Analyzer analyzer, String text) throws IOException { - try(TokenStream tokenStream = analyzer.tokenStream("dummy", text)) { + try (TokenStream tokenStream = analyzer.tokenStream("dummy", text)) { return getGraphStrings(tokenStream); } } - /** Returns all paths accepted by the token stream graph produced by the already initialized {@link TokenStream}. */ + /** + * Returns all paths accepted by the token stream graph produced by the already initialized {@link + * TokenStream}. + */ public static Set getGraphStrings(TokenStream tokenStream) throws IOException { Automaton automaton = new TokenStreamToAutomaton().toAutomaton(tokenStream); Set actualStringPaths = AutomatonTestUtil.getFiniteStringsRecursive(automaton, -1); BytesRefBuilder scratchBytesRefBuilder = new BytesRefBuilder(); Set paths = new HashSet<>(); - for (IntsRef ir: actualStringPaths) { - paths.add(Util.toBytesRef(ir, scratchBytesRefBuilder).utf8ToString().replace((char) TokenStreamToAutomaton.POS_SEP, ' ')); + for (IntsRef ir : actualStringPaths) { + paths.add( + Util.toBytesRef(ir, scratchBytesRefBuilder) + .utf8ToString() + .replace((char) TokenStreamToAutomaton.POS_SEP, ' ')); } return paths; } /** Returns a {@code String} summary of the tokens this analyzer produces on this text */ public static String toString(Analyzer analyzer, String text) throws IOException { - try(TokenStream ts = analyzer.tokenStream("field", text)) { + try (TokenStream ts = analyzer.tokenStream("field", text)) { StringBuilder b = new StringBuilder(); CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class); PositionIncrementAttribute posIncAtt = ts.getAttribute(PositionIncrementAttribute.class); diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CannedBinaryTokenStream.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CannedBinaryTokenStream.java index 7eab0f4a050..8db3ac5cddf 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CannedBinaryTokenStream.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CannedBinaryTokenStream.java @@ -22,14 +22,11 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; import org.apache.lucene.util.BytesRef; -/** - * TokenStream from a canned list of binary (BytesRef-based) - * tokens. - */ +/** TokenStream from a canned list of binary (BytesRef-based) tokens. */ public final class CannedBinaryTokenStream extends TokenStream { /** Represents a binary token. */ - public final static class BinaryToken { + public static final class BinaryToken { BytesRef term; int posInc; int posLen; @@ -52,7 +49,8 @@ public final class CannedBinaryTokenStream extends TokenStream { private final BinaryToken[] tokens; private int upto = 0; private final BytesTermAttribute termAtt = addAttribute(BytesTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncrAtt = + addAttribute(PositionIncrementAttribute.class); private final PositionLengthAttribute posLengthAtt = addAttribute(PositionLengthAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); @@ -60,14 +58,14 @@ public final class CannedBinaryTokenStream extends TokenStream { super(Token.TOKEN_ATTRIBUTE_FACTORY); this.tokens = tokens; } - + @Override public boolean incrementToken() { if (upto < tokens.length) { - final BinaryToken token = tokens[upto++]; + final BinaryToken token = tokens[upto++]; // TODO: can we just capture/restoreState so // we get all attrs...? - clearAttributes(); + clearAttributes(); termAtt.setBytesRef(token.term); posIncrAtt.setPositionIncrement(token.posInc); posLengthAtt.setPositionLength(token.posLen); diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CannedTokenStream.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CannedTokenStream.java index 5c882e6868e..3a208078d43 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CannedTokenStream.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CannedTokenStream.java @@ -17,18 +17,16 @@ package org.apache.lucene.analysis; import java.io.IOException; - import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -/** - * TokenStream from a canned list of Tokens. - */ +/** TokenStream from a canned list of Tokens. */ public final class CannedTokenStream extends TokenStream { private final Token[] tokens; private int upto = 0; private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncrAtt = + addAttribute(PositionIncrementAttribute.class); private final int finalOffset; private final int finalPosInc; @@ -36,8 +34,7 @@ public final class CannedTokenStream extends TokenStream { this(0, 0, tokens); } - /** If you want trailing holes, pass a non-zero - * finalPosInc. */ + /** If you want trailing holes, pass a non-zero finalPosInc. */ public CannedTokenStream(int finalPosInc, int finalOffset, Token... tokens) { super(Token.TOKEN_ATTRIBUTE_FACTORY); this.tokens = tokens; @@ -62,7 +59,8 @@ public final class CannedTokenStream extends TokenStream { public boolean incrementToken() { if (upto < tokens.length) { clearAttributes(); - // NOTE: this looks weird, casting offsetAtt to Token, but because we are using the Token class's AttributeFactory, all attributes are + // NOTE: this looks weird, casting offsetAtt to Token, but because we are using the Token + // class's AttributeFactory, all attributes are // in fact backed by the Token class, so we just copy the current token into our Token: tokens[upto++].copyTo((Token) offsetAtt); return true; diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java index 4efc2a9cd3f..3f5d7468904 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java @@ -19,7 +19,6 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -44,20 +43,22 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -/** - * Base test class for testing Unicode collation. - */ +/** Base test class for testing Unicode collation. */ public abstract class CollationTestBase extends LuceneTestCase { protected String firstRangeBeginningOriginal = "\u062F"; protected String firstRangeEndOriginal = "\u0698"; - + protected String secondRangeBeginningOriginal = "\u0633"; protected String secondRangeEndOriginal = "\u0638"; - - public void testFarsiRangeFilterCollating(Analyzer analyzer, BytesRef firstBeg, - BytesRef firstEnd, BytesRef secondBeg, - BytesRef secondEnd) throws Exception { + + public void testFarsiRangeFilterCollating( + Analyzer analyzer, + BytesRef firstBeg, + BytesRef firstEnd, + BytesRef secondBeg, + BytesRef secondEnd) + throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer)); Document doc = new Document(); @@ -67,7 +68,7 @@ public abstract class CollationTestBase extends LuceneTestCase { writer.close(); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); - Query query = new TermQuery(new Term("body","body")); + Query query = new TermQuery(new Term("body", "body")); // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi // orders the U+0698 character before the U+0633 character, so the single @@ -89,10 +90,14 @@ public abstract class CollationTestBase extends LuceneTestCase { reader.close(); dir.close(); } - - public void testFarsiRangeQueryCollating(Analyzer analyzer, BytesRef firstBeg, - BytesRef firstEnd, BytesRef secondBeg, - BytesRef secondEnd) throws Exception { + + public void testFarsiRangeQueryCollating( + Analyzer analyzer, + BytesRef firstBeg, + BytesRef firstEnd, + BytesRef secondBeg, + BytesRef secondEnd) + throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer)); Document doc = new Document(); @@ -118,8 +123,13 @@ public abstract class CollationTestBase extends LuceneTestCase { dir.close(); } - public void testFarsiTermRangeQuery(Analyzer analyzer, BytesRef firstBeg, - BytesRef firstEnd, BytesRef secondBeg, BytesRef secondEnd) throws Exception { + public void testFarsiTermRangeQuery( + Analyzer analyzer, + BytesRef firstBeg, + BytesRef firstEnd, + BytesRef secondBeg, + BytesRef secondEnd) + throws Exception { Directory farsiIndex = newDirectory(); IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(analyzer)); @@ -131,36 +141,34 @@ public abstract class CollationTestBase extends LuceneTestCase { IndexReader reader = DirectoryReader.open(farsiIndex); IndexSearcher search = newSearcher(reader); - + // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi // orders the U+0698 character before the U+0633 character, so the single // index Term below should NOT be returned by a TermRangeQuery - // with a Farsi Collator (or an Arabic one for the case when Farsi is + // with a Farsi Collator (or an Arabic one for the case when Farsi is // not supported). - Query csrq - = new TermRangeQuery("content", firstBeg, firstEnd, true, true); + Query csrq = new TermRangeQuery("content", firstBeg, firstEnd, true, true); ScoreDoc[] result = search.search(csrq, 1000).scoreDocs; assertEquals("The index Term should not be included.", 0, result.length); - csrq = new TermRangeQuery - ("content", secondBeg, secondEnd, true, true); + csrq = new TermRangeQuery("content", secondBeg, secondEnd, true, true); result = search.search(csrq, 1000).scoreDocs; assertEquals("The index Term should be included.", 1, result.length); reader.close(); farsiIndex.close(); } - + // Make sure the documents returned by the search match the expected list // Copied from TestSort.java - private void assertMatches(IndexSearcher searcher, Query query, Sort sort, - String expectedResult) throws IOException { + private void assertMatches(IndexSearcher searcher, Query query, Sort sort, String expectedResult) + throws IOException { ScoreDoc[] result = searcher.search(query, 1000, sort).scoreDocs; StringBuilder buff = new StringBuilder(10); int n = result.length; - for (int i = 0 ; i < n ; ++i) { + for (int i = 0; i < n; ++i) { Document doc = searcher.doc(result[i].doc); IndexableField[] v = doc.getFields("tracer"); - for (int j = 0 ; j < v.length ; ++j) { + for (int j = 0; j < v.length; ++j) { buff.append(v[j].stringValue()); } } @@ -170,8 +178,8 @@ public abstract class CollationTestBase extends LuceneTestCase { public void assertThreadSafe(final Analyzer analyzer) throws Exception { int numTestPoints = 100; int numThreads = TestUtil.nextInt(random(), 3, 5); - final HashMap map = new HashMap<>(); - + final HashMap map = new HashMap<>(); + // create a map up front. // then with multiple threads, generate sort keys for all the keys in the map // and ensure they are the same as the ones we produced in serial fashion. @@ -188,30 +196,32 @@ public abstract class CollationTestBase extends LuceneTestCase { ts.end(); } } - + Thread threads[] = new Thread[numThreads]; for (int i = 0; i < numThreads; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - for (Map.Entry mapping : map.entrySet()) { - String term = mapping.getKey(); - BytesRef expected = mapping.getValue(); - try (TokenStream ts = analyzer.tokenStream("fake", term)) { - TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); - ts.reset(); - assertTrue(ts.incrementToken()); - assertEquals(expected, termAtt.getBytesRef()); - assertFalse(ts.incrementToken()); - ts.end(); + threads[i] = + new Thread() { + @Override + public void run() { + try { + for (Map.Entry mapping : map.entrySet()) { + String term = mapping.getKey(); + BytesRef expected = mapping.getValue(); + try (TokenStream ts = analyzer.tokenStream("fake", term)) { + TermToBytesRefAttribute termAtt = + ts.addAttribute(TermToBytesRefAttribute.class); + ts.reset(); + assertTrue(ts.incrementToken()); + assertEquals(expected, termAtt.getBytesRef()); + assertFalse(ts.incrementToken()); + ts.end(); + } + } + } catch (IOException e) { + throw new RuntimeException(e); } } - } catch (IOException e) { - throw new RuntimeException(e); - } - } - }; + }; } for (int i = 0; i < numThreads; i++) { threads[i].start(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CrankyTokenFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CrankyTokenFilter.java index 9c29d3fc7b1..d0a82761b53 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CrankyTokenFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CrankyTokenFilter.java @@ -19,16 +19,16 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.util.Random; -/** +/** * Throws IOException from random Tokenstream methods. - *

    - * This can be used to simulate a buggy analyzer in IndexWriter, - * where we must delete the document but not abort everything in the buffer. + * + *

    This can be used to simulate a buggy analyzer in IndexWriter, where we must delete the + * document but not abort everything in the buffer. */ public final class CrankyTokenFilter extends TokenFilter { final Random random; int thingToDo; - + /** Creates a new CrankyTokenFilter */ public CrankyTokenFilter(TokenStream input, Random random) { super(input); @@ -42,7 +42,7 @@ public final class CrankyTokenFilter extends TokenFilter { } return input.incrementToken(); } - + @Override public void end() throws IOException { super.end(); @@ -50,7 +50,7 @@ public final class CrankyTokenFilter extends TokenFilter { throw new IOException("Fake IOException from TokenStream.end()"); } } - + @Override public void reset() throws IOException { super.reset(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java index b0ab6edb316..99e774149f4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java @@ -19,7 +19,6 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; @@ -29,16 +28,18 @@ import org.apache.lucene.util.RollingBuffer; // TODO: cut SynFilter over to this // TODO: somehow add "nuke this input token" capability... -/** An abstract TokenFilter to make it easier to build graph - * token filters requiring some lookahead. This class handles - * the details of buffering up tokens, recording them by - * position, restoring them, providing access to them, etc. */ +/** + * An abstract TokenFilter to make it easier to build graph token filters requiring some lookahead. + * This class handles the details of buffering up tokens, recording them by position, restoring + * them, providing access to them, etc. + */ +public abstract class LookaheadTokenFilter + extends TokenFilter { -public abstract class LookaheadTokenFilter extends TokenFilter { + private static final boolean DEBUG = false; - private final static boolean DEBUG = false; - - protected final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + protected final PositionIncrementAttribute posIncAtt = + addAttribute(PositionIncrementAttribute.class); protected final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class); protected final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); @@ -47,15 +48,16 @@ public abstract class LookaheadTokenFilter inputTokens = new ArrayList<>(); @@ -91,9 +93,10 @@ public abstract class LookaheadTokenFilter positions = new RollingBuffer() { - @Override - protected T newInstance() { - return newPosition(); - } - }; + protected final RollingBuffer positions = + new RollingBuffer() { + @Override + protected T newInstance() { + return newPosition(); + } + }; /** Returns true if there is a new token. */ protected boolean peekToken() throws IOException { if (DEBUG) { - System.out.println("LTF.peekToken inputPos=" + inputPos + " outputPos=" + outputPos + " tokenPending=" + tokenPending); + System.out.println( + "LTF.peekToken inputPos=" + + inputPos + + " outputPos=" + + outputPos + + " tokenPending=" + + tokenPending); } assert !end; assert inputPos == -1 || outputPos <= inputPos; @@ -141,7 +150,7 @@ public abstract class LookaheadTokenFilter= 10) { - throw new IllegalArgumentException("invalid remainder parameter (must be 0..10): " + remainder); + throw new IllegalArgumentException( + "invalid remainder parameter (must be 0..10): " + remainder); } } - + // for testing only, uses a remainder of 0 public MockCharFilter(Reader in) { this(in, 0); } - + int currentOffset = -1; int delta = 0; int bufferedCh = -1; - + @Override public int read() throws IOException { // we have a buffered character, add an offset correction and return it @@ -53,22 +56,23 @@ public class MockCharFilter extends CharFilter { int ch = bufferedCh; bufferedCh = -1; currentOffset++; - - addOffCorrectMap(currentOffset, delta-1); + + addOffCorrectMap(currentOffset, delta - 1); delta--; return ch; } - - // otherwise actually read one + + // otherwise actually read one int ch = input.read(); - if (ch < 0) - return ch; - + if (ch < 0) return ch; + currentOffset++; - if ((ch % 10) != remainder || Character.isHighSurrogate((char)ch) || Character.isLowSurrogate((char)ch)) { + if ((ch % 10) != remainder + || Character.isHighSurrogate((char) ch) + || Character.isLowSurrogate((char) ch)) { return ch; } - + // we will double this character, so buffer it. bufferedCh = ch; return ch; @@ -88,15 +92,15 @@ public class MockCharFilter extends CharFilter { @Override public int correct(int currentOff) { - Map.Entry lastEntry = corrections.lowerEntry(currentOff+1); + Map.Entry lastEntry = corrections.lowerEntry(currentOff + 1); int ret = lastEntry == null ? currentOff : currentOff + lastEntry.getValue(); - assert ret >= 0 : "currentOff=" + currentOff + ",diff=" + (ret-currentOff); + assert ret >= 0 : "currentOff=" + currentOff + ",diff=" + (ret - currentOff); return ret; } - + protected void addOffCorrectMap(int off, int cumulativeDiff) { corrections.put(off, cumulativeDiff); } - - TreeMap corrections = new TreeMap<>(); + + TreeMap corrections = new TreeMap<>(); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockFixedLengthPayloadFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockFixedLengthPayloadFilter.java index 4a92a9ea763..d248034a549 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockFixedLengthPayloadFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockFixedLengthPayloadFilter.java @@ -18,13 +18,10 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.util.Random; - import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.util.BytesRef; -/** - * TokenFilter that adds random fixed-length payloads. - */ +/** TokenFilter that adds random fixed-length payloads. */ public final class MockFixedLengthPayloadFilter extends TokenFilter { private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class); private final Random random; diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockGraphTokenFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockGraphTokenFilter.java index 2dca7832657..77021d6f001 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockGraphTokenFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockGraphTokenFilter.java @@ -18,17 +18,17 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.util.Random; - import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.util.TestUtil; // TODO: sometimes remove tokens too...? -/** Randomly inserts overlapped (posInc=0) tokens with - * posLength sometimes > 1. The chain must have - * an OffsetAttribute. */ - -public final class MockGraphTokenFilter extends LookaheadTokenFilter { +/** + * Randomly inserts overlapped (posInc=0) tokens with posLength sometimes > 1. The chain must + * have an OffsetAttribute. + */ +public final class MockGraphTokenFilter + extends LookaheadTokenFilter { private static boolean DEBUG = false; @@ -64,7 +64,7 @@ public final class MockGraphTokenFilter extends LookaheadTokenFilter - *

  1. Union a list of singletons to act like a stopfilter. - *
  2. Use the complement to act like a keepwordfilter - *
  3. Use a regex like .{12,} to act like a lengthfilter + *
  4. Union a list of singletons to act like a stopfilter. + *
  5. Use the complement to act like a keepwordfilter + *
  6. Use a regex like .{12,} to act like a lengthfilter * */ public final class MockTokenFilter extends TokenFilter { /** Empty set of stopwords */ - public static final CharacterRunAutomaton EMPTY_STOPSET = - new CharacterRunAutomaton(makeEmpty()); - + public static final CharacterRunAutomaton EMPTY_STOPSET = new CharacterRunAutomaton(makeEmpty()); + /** Set of common english stopwords */ - public static final CharacterRunAutomaton ENGLISH_STOPSET = - new CharacterRunAutomaton(Operations.union(Arrays.asList( - makeString("a"), makeString("an"), makeString("and"), makeString("are"), - makeString("as"), makeString("at"), makeString("be"), makeString("but"), - makeString("by"), makeString("for"), makeString("if"), makeString("in"), - makeString("into"), makeString("is"), makeString("it"), makeString("no"), - makeString("not"), makeString("of"), makeString("on"), makeString("or"), - makeString("such"), makeString("that"), makeString("the"), makeString("their"), - makeString("then"), makeString("there"), makeString("these"), makeString("they"), - makeString("this"), makeString("to"), makeString("was"), makeString("will"), - makeString("with")))); - + public static final CharacterRunAutomaton ENGLISH_STOPSET = + new CharacterRunAutomaton( + Operations.union( + Arrays.asList( + makeString("a"), + makeString("an"), + makeString("and"), + makeString("are"), + makeString("as"), + makeString("at"), + makeString("be"), + makeString("but"), + makeString("by"), + makeString("for"), + makeString("if"), + makeString("in"), + makeString("into"), + makeString("is"), + makeString("it"), + makeString("no"), + makeString("not"), + makeString("of"), + makeString("on"), + makeString("or"), + makeString("such"), + makeString("that"), + makeString("the"), + makeString("their"), + makeString("then"), + makeString("there"), + makeString("these"), + makeString("they"), + makeString("this"), + makeString("to"), + makeString("was"), + makeString("will"), + makeString("with")))); + private final CharacterRunAutomaton filter; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncrAtt = + addAttribute(PositionIncrementAttribute.class); private int skippedPositions; /** * Create a new MockTokenFilter. - * + * * @param input TokenStream to filter * @param filter DFA representing the terms that should be removed. */ @@ -69,12 +95,12 @@ public final class MockTokenFilter extends TokenFilter { super(input); this.filter = filter; } - + @Override public boolean incrementToken() throws IOException { // TODO: fix me when posInc=false, to work like FilteringTokenFilter in that case and not return // initial token with posInc=0 ever - + // return the first non-stop word found skippedPositions = 0; while (input.incrementToken()) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java index 20287042493..880e0c2a396 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java @@ -16,43 +16,42 @@ */ package org.apache.lucene.analysis; +import com.carrotsearch.randomizedtesting.RandomizedContext; import java.io.IOException; import java.nio.CharBuffer; import java.util.Random; - import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.util.AttributeFactory; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.RegExp; -import com.carrotsearch.randomizedtesting.RandomizedContext; - /** * Tokenizer for testing. - *

    - * This tokenizer is a replacement for {@link #WHITESPACE}, {@link #SIMPLE}, and {@link #KEYWORD} - * tokenizers. If you are writing a component such as a TokenFilter, it's a great idea to test - * it wrapping this tokenizer instead for extra checks. This tokenizer has the following behavior: + * + *

    This tokenizer is a replacement for {@link #WHITESPACE}, {@link #SIMPLE}, and {@link #KEYWORD} + * tokenizers. If you are writing a component such as a TokenFilter, it's a great idea to test it + * wrapping this tokenizer instead for extra checks. This tokenizer has the following behavior: + * *

      - *
    • An internal state-machine is used for checking consumer consistency. These checks can - * be disabled with {@link #setEnableChecks(boolean)}. + *
    • An internal state-machine is used for checking consumer consistency. These checks can be + * disabled with {@link #setEnableChecks(boolean)}. *
    • For convenience, optionally lowercases terms that it outputs. *
    */ public class MockTokenizer extends Tokenizer { /** Acts Similar to WhitespaceTokenizer */ - public static final CharacterRunAutomaton WHITESPACE = - new CharacterRunAutomaton(new RegExp("[^ \t\r\n]+").toAutomaton()); - /** Acts Similar to KeywordTokenizer. - * TODO: Keyword returns an "empty" token for an empty reader... + public static final CharacterRunAutomaton WHITESPACE = + new CharacterRunAutomaton(new RegExp("[^ \t\r\n]+").toAutomaton()); + /** + * Acts Similar to KeywordTokenizer. TODO: Keyword returns an "empty" token for an empty reader... */ public static final CharacterRunAutomaton KEYWORD = - new CharacterRunAutomaton(new RegExp(".*").toAutomaton()); + new CharacterRunAutomaton(new RegExp(".*").toAutomaton()); /** Acts like LetterTokenizer. */ // the ugly regex below is incomplete Unicode 5.2 [:Letter:] public static final CharacterRunAutomaton SIMPLE = - new CharacterRunAutomaton(new RegExp("[A-Za-zªµºÀ-ÖØ-öø-ˁ一-鿌]+").toAutomaton()); + new CharacterRunAutomaton(new RegExp("[A-Za-zªµºÀ-ÖØ-öø-ˁ一-鿌]+").toAutomaton()); private final CharacterRunAutomaton runAutomaton; private final boolean lowerCase; @@ -63,7 +62,7 @@ public class MockTokenizer extends Tokenizer { private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); int off = 0; - + // buffered state (previous codepoint and offset). we replay this once we // hit a reject state in case it's permissible as the start of a new term. int bufferedCodePoint = -1; // -1 indicates empty buffer @@ -72,23 +71,27 @@ public class MockTokenizer extends Tokenizer { // TODO: "register" with LuceneTestCase to ensure all streams are closed() ? // currently, we can only check that the lifecycle is correct if someone is reusing, // but not for "one-offs". - private static enum State { - SETREADER, // consumer set a reader input either via ctor or via reset(Reader) - RESET, // consumer has called reset() - INCREMENT, // consumer is consuming, has called incrementToken() == true + private static enum State { + SETREADER, // consumer set a reader input either via ctor or via reset(Reader) + RESET, // consumer has called reset() + INCREMENT, // consumer is consuming, has called incrementToken() == true INCREMENT_FALSE, // consumer has called incrementToken() which returned false - END, // consumer has called end() to perform end of stream operations - CLOSE // consumer has called close() to release any resources + END, // consumer has called end() to perform end of stream operations + CLOSE // consumer has called close() to release any resources }; - + private State streamState = State.CLOSE; private int lastOffset = 0; // only for checks private boolean enableChecks = true; - + // evil: but we don't change the behavior with this random, we only switch up how we read private final Random random = new Random(RandomizedContext.current().getRandom().nextLong()); - - public MockTokenizer(AttributeFactory factory, CharacterRunAutomaton runAutomaton, boolean lowerCase, int maxTokenLength) { + + public MockTokenizer( + AttributeFactory factory, + CharacterRunAutomaton runAutomaton, + boolean lowerCase, + int maxTokenLength) { super(factory); this.runAutomaton = runAutomaton; this.lowerCase = lowerCase; @@ -104,21 +107,27 @@ public class MockTokenizer extends Tokenizer { this(runAutomaton, lowerCase, DEFAULT_MAX_TOKEN_LENGTH); } - /** Calls {@link #MockTokenizer(CharacterRunAutomaton, boolean) MockTokenizer(Reader, WHITESPACE, true)} */ + /** + * Calls {@link #MockTokenizer(CharacterRunAutomaton, boolean) MockTokenizer(Reader, WHITESPACE, + * true)} + */ public MockTokenizer() { this(WHITESPACE, true); } - public MockTokenizer(AttributeFactory factory, CharacterRunAutomaton runAutomaton, boolean lowerCase) { + public MockTokenizer( + AttributeFactory factory, CharacterRunAutomaton runAutomaton, boolean lowerCase) { this(factory, runAutomaton, lowerCase, DEFAULT_MAX_TOKEN_LENGTH); } - /** Calls {@link #MockTokenizer(AttributeFactory,CharacterRunAutomaton,boolean) - * MockTokenizer(AttributeFactory, Reader, WHITESPACE, true)} */ + /** + * Calls {@link #MockTokenizer(AttributeFactory,CharacterRunAutomaton,boolean) + * MockTokenizer(AttributeFactory, Reader, WHITESPACE, true)} + */ public MockTokenizer(AttributeFactory factory) { this(factory, WHITESPACE, true); } - + // we allow some checks (e.g. state machine) to be turned off. // turning off checks just means we suppress exceptions from them private void fail(String message) { @@ -126,7 +135,7 @@ public class MockTokenizer extends Tokenizer { throw new IllegalStateException(message); } } - + private void failAlways(String message) { throw new IllegalStateException(message); } @@ -138,7 +147,7 @@ public class MockTokenizer extends Tokenizer { } clearAttributes(); - for (;;) { + for (; ; ) { int startOffset; int cp; if (bufferedCodePoint >= 0) { @@ -165,7 +174,7 @@ public class MockTokenizer extends Tokenizer { } cp = readCodePoint(); } while (cp >= 0 && isTokenChar(cp)); - + if (termAtt.length() < maxTokenLength) { // buffer up, in case the "rejected" char can start a new word of its own bufferedCodePoint = cp; @@ -177,17 +186,32 @@ public class MockTokenizer extends Tokenizer { int correctedStartOffset = correctOffset(startOffset); int correctedEndOffset = correctOffset(endOffset); if (correctedStartOffset < 0) { - failAlways("invalid start offset: " + correctedStartOffset + ", before correction: " + startOffset); + failAlways( + "invalid start offset: " + + correctedStartOffset + + ", before correction: " + + startOffset); } if (correctedEndOffset < 0) { - failAlways("invalid end offset: " + correctedEndOffset + ", before correction: " + endOffset); + failAlways( + "invalid end offset: " + correctedEndOffset + ", before correction: " + endOffset); } if (correctedStartOffset < lastOffset) { - failAlways("start offset went backwards: " + correctedStartOffset + ", before correction: " + startOffset + ", lastOffset: " + lastOffset); + failAlways( + "start offset went backwards: " + + correctedStartOffset + + ", before correction: " + + startOffset + + ", lastOffset: " + + lastOffset); } lastOffset = correctedStartOffset; if (correctedEndOffset < correctedStartOffset) { - failAlways("end offset: " + correctedEndOffset + " is before start offset: " + correctedStartOffset); + failAlways( + "end offset: " + + correctedEndOffset + + " is before start offset: " + + correctedStartOffset); } offsetAtt.setOffset(correctedStartOffset, correctedEndOffset); if (state == -1 || runAutomaton.isAccept(state)) { @@ -215,7 +239,11 @@ public class MockTokenizer extends Tokenizer { if (ch2 >= 0) { off++; if (!Character.isLowSurrogate((char) ch2)) { - failAlways("unpaired high surrogate: " + Integer.toHexString(ch) + ", followed by: " + Integer.toHexString(ch2)); + failAlways( + "unpaired high surrogate: " + + Integer.toHexString(ch) + + ", followed by: " + + Integer.toHexString(ch2)); } return Character.toCodePoint((char) ch, (char) ch2); } else { @@ -225,29 +253,32 @@ public class MockTokenizer extends Tokenizer { return ch; } } - + protected int readChar() throws IOException { - switch(random.nextInt(10)) { - case 0: { - // read(char[]) - char c[] = new char[1]; - int ret = input.read(c); - return ret < 0 ? ret : c[0]; - } - case 1: { - // read(char[], int, int) - char c[] = new char[2]; - int ret = input.read(c, 1, 1); - return ret < 0 ? ret : c[1]; - } - case 2: { - // read(CharBuffer) - char c[] = new char[1]; - CharBuffer cb = CharBuffer.wrap(c); - int ret = input.read(cb); - return ret < 0 ? ret : c[0]; - } - default: + switch (random.nextInt(10)) { + case 0: + { + // read(char[]) + char c[] = new char[1]; + int ret = input.read(c); + return ret < 0 ? ret : c[0]; + } + case 1: + { + // read(char[], int, int) + char c[] = new char[2]; + int ret = input.read(c, 1, 1); + return ret < 0 ? ret : c[1]; + } + case 2: + { + // read(CharBuffer) + char c[] = new char[1]; + CharBuffer cb = CharBuffer.wrap(c); + int ret = input.read(cb); + return ret < 0 ? ret : c[0]; + } + default: // read() return input.read(); } @@ -264,7 +295,7 @@ public class MockTokenizer extends Tokenizer { return true; } } - + protected int normalize(int c) { return lowerCase ? Character.toLowerCase(c) : c; } @@ -283,7 +314,7 @@ public class MockTokenizer extends Tokenizer { streamState = State.RESET; } } - + @Override public void close() throws IOException { try { @@ -316,7 +347,8 @@ public class MockTokenizer extends Tokenizer { super.end(); int finalOffset = correctOffset(off); offsetAtt.setOffset(finalOffset, finalOffset); - // some tokenizers, such as limiting tokenizers, call end() before incrementToken() returns false. + // some tokenizers, such as limiting tokenizers, call end() before incrementToken() returns + // false. // these tests should disable this check (in general you should consume the entire stream) if (streamState != State.INCREMENT_FALSE) { fail("end() called in wrong state=" + streamState + "!"); @@ -326,9 +358,9 @@ public class MockTokenizer extends Tokenizer { } } - /** - * Toggle consumer workflow checking: if your test consumes tokenstreams normally you - * should leave this enabled. + /** + * Toggle consumer workflow checking: if your test consumes tokenstreams normally you should leave + * this enabled. */ public void setEnableChecks(boolean enableChecks) { this.enableChecks = enableChecks; diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockUTF16TermAttributeImpl.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockUTF16TermAttributeImpl.java index e65f15185e5..fe8d652b20e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockUTF16TermAttributeImpl.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockUTF16TermAttributeImpl.java @@ -17,21 +17,21 @@ package org.apache.lucene.analysis; import java.nio.charset.StandardCharsets; - import org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl; import org.apache.lucene.util.AttributeFactory; import org.apache.lucene.util.BytesRef; /** - * Extension of {@link CharTermAttributeImpl} that encodes the term - * text as UTF-16 bytes instead of as UTF-8 bytes. + * Extension of {@link CharTermAttributeImpl} that encodes the term text as UTF-16 bytes instead of + * as UTF-8 bytes. */ public class MockUTF16TermAttributeImpl extends CharTermAttributeImpl { - + /** Factory that returns an instance of this class for CharTermAttribute */ public static final AttributeFactory UTF16_TERM_ATTRIBUTE_FACTORY = - AttributeFactory.getStaticImplementation(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, MockUTF16TermAttributeImpl.class); - + AttributeFactory.getStaticImplementation( + AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, MockUTF16TermAttributeImpl.class); + @Override public BytesRef getBytesRef() { final BytesRef ref = this.builder.get(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockVariableLengthPayloadFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockVariableLengthPayloadFilter.java index a70cef3e108..11c1c0fa4d1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockVariableLengthPayloadFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockVariableLengthPayloadFilter.java @@ -18,13 +18,10 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.util.Random; - import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.util.BytesRef; -/** - * TokenFilter that adds random variable-length payloads. - */ +/** TokenFilter that adds random variable-length payloads. */ public final class MockVariableLengthPayloadFilter extends TokenFilter { private static final int MAXLENGTH = 129; diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/SimplePayloadFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/SimplePayloadFilter.java index 954fd16a2a4..7324d52809e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/SimplePayloadFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/SimplePayloadFilter.java @@ -18,7 +18,6 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.nio.charset.StandardCharsets; - import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.util.BytesRef; diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/Token.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/Token.java index 18b6b8b8593..4940e0b0637 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/Token.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/Token.java @@ -16,7 +16,6 @@ */ package org.apache.lucene.analysis; - import org.apache.lucene.analysis.tokenattributes.FlagsAttribute; import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; @@ -25,50 +24,51 @@ import org.apache.lucene.util.AttributeImpl; import org.apache.lucene.util.AttributeReflector; import org.apache.lucene.util.BytesRef; -/** - A Token is an occurrence of a term from the text of a field. It consists of - a term's text, the start and end offset of the term in the text of the field, - and a type string. -

    - The start and end offsets permit applications to re-associate a token with - its source text, e.g., to display highlighted query terms in a document - browser, or to show matching text fragments in a KWIC - display, etc. -

    - The type is a string, assigned by a lexical analyzer - (a.k.a. tokenizer), naming the lexical or syntactic class that the token - belongs to. For example an end of sentence marker token might be implemented - with type "eos". The default token type is "word". -

    - A Token can optionally have metadata (a.k.a. payload) in the form of a variable - length byte array. Use {@link org.apache.lucene.index.PostingsEnum#getPayload()} to retrieve the - payloads from the index. - - A few things to note: -

      -
    • clear() initializes all of the fields to default values. This was changed in contrast to Lucene 2.4, but should affect no one.
    • -
    • Because TokenStreams can be chained, one cannot assume that the Token's current type is correct.
    • -
    • The startOffset and endOffset represent the start and offset in the source text, so be careful in adjusting them.
    • -
    • When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.
    • -
    -*/ +/** + * A Token is an occurrence of a term from the text of a field. It consists of a term's text, the + * start and end offset of the term in the text of the field, and a type string. + * + *

    The start and end offsets permit applications to re-associate a token with its source text, + * e.g., to display highlighted query terms in a document browser, or to show matching text + * fragments in a KWIC display, etc. + * + *

    The type is a string, assigned by a lexical analyzer (a.k.a. tokenizer), naming the lexical or + * syntactic class that the token belongs to. For example an end of sentence marker token might be + * implemented with type "eos". The default token type is "word". + * + *

    A Token can optionally have metadata (a.k.a. payload) in the form of a variable length byte + * array. Use {@link org.apache.lucene.index.PostingsEnum#getPayload()} to retrieve the payloads + * from the index. + * + *

    A few things to note: + * + *

      + *
    • clear() initializes all of the fields to default values. This was changed in contrast to + * Lucene 2.4, but should affect no one. + *
    • Because TokenStreams can be chained, one cannot assume that the Token's + * current type is correct. + *
    • The startOffset and endOffset represent the start and offset in the source text, so be + * careful in adjusting them. + *
    • When caching a reusable token, clone it. When injecting a cached token into a stream that + * can be reset, clone it again. + *
    + */ public class Token extends PackedTokenAttributeImpl implements FlagsAttribute, PayloadAttribute { private int flags; private BytesRef payload; /** Constructs a Token will null text. */ - public Token() { - } + public Token() {} - /** Constructs a Token with the given term text, start - * and end offsets. The type defaults to "word." - * NOTE: for better indexing speed you should - * instead use the char[] termBuffer methods to set the - * term text. - * @param text term text - * @param start start offset in the source text - * @param end end offset in the source text + /** + * Constructs a Token with the given term text, start and end offsets. The type defaults to + * "word." NOTE: for better indexing speed you should instead use the char[] termBuffer + * methods to set the term text. + * + * @param text term text + * @param start start offset in the source text + * @param end end offset in the source text */ public Token(CharSequence text, int start, int end) { append(text); @@ -91,6 +91,7 @@ public class Token extends PackedTokenAttributeImpl implements FlagsAttribute, P /** * {@inheritDoc} + * * @see FlagsAttribute */ @Override @@ -100,6 +101,7 @@ public class Token extends PackedTokenAttributeImpl implements FlagsAttribute, P /** * {@inheritDoc} + * * @see FlagsAttribute */ @Override @@ -109,6 +111,7 @@ public class Token extends PackedTokenAttributeImpl implements FlagsAttribute, P /** * {@inheritDoc} + * * @see PayloadAttribute */ @Override @@ -118,15 +121,17 @@ public class Token extends PackedTokenAttributeImpl implements FlagsAttribute, P /** * {@inheritDoc} + * * @see PayloadAttribute */ @Override public void setPayload(BytesRef payload) { this.payload = payload; } - - /** Resets the term text, payload, flags, positionIncrement, positionLength, - * startOffset, endOffset and token type to default. + + /** + * Resets the term text, payload, flags, positionIncrement, positionLength, startOffset, endOffset + * and token type to default. */ @Override public void clear() { @@ -137,18 +142,14 @@ public class Token extends PackedTokenAttributeImpl implements FlagsAttribute, P @Override public boolean equals(Object obj) { - if (obj == this) - return true; + if (obj == this) return true; if (obj instanceof Token) { final Token other = (Token) obj; - return ( - flags == other.flags && - (payload == null ? other.payload == null : payload.equals(other.payload)) && - super.equals(obj) - ); - } else - return false; + return (flags == other.flags + && (payload == null ? other.payload == null : payload.equals(other.payload)) + && super.equals(obj)); + } else return false; } @Override @@ -172,6 +173,7 @@ public class Token extends PackedTokenAttributeImpl implements FlagsAttribute, P /** * Copy the prototype token's fields into this one. Note: Payloads are shared. + * * @param prototype source Token to copy fields from */ public void reinit(Token prototype) { @@ -199,11 +201,13 @@ public class Token extends PackedTokenAttributeImpl implements FlagsAttribute, P reflector.reflect(PayloadAttribute.class, "payload", payload); } - /** Convenience factory that returns Token as implementation for the basic - * attributes and return the default impl (with "Impl" appended) for all other - * attributes. + /** + * Convenience factory that returns Token as implementation for the basic attributes + * and return the default impl (with "Impl" appended) for all other attributes. + * * @since 3.0 */ public static final AttributeFactory TOKEN_ATTRIBUTE_FACTORY = - AttributeFactory.getStaticImplementation(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, Token.class); + AttributeFactory.getStaticImplementation( + AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, Token.class); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java index 64923db08db..cef3b88de8a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java @@ -16,9 +16,8 @@ */ package org.apache.lucene.analysis; -import java.io.PrintWriter; import java.io.IOException; - +import java.io.PrintWriter; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; @@ -35,9 +34,10 @@ public class TokenStreamToDot { private final String inputText; protected final PrintWriter out; - /** If inputText is non-null, and the TokenStream has - * offsets, we include the surface form in each arc's - * label. */ + /** + * If inputText is non-null, and the TokenStream has offsets, we include the surface form in each + * arc's label. + */ public TokenStreamToDot(String inputText, TokenStream in, PrintWriter out) { this.in = in; this.out = out; @@ -91,7 +91,8 @@ public class TokenStreamToDot { if (offsetAtt != null) { final int startOffset = offsetAtt.startOffset(); final int endOffset = offsetAtt.endOffset(); - //System.out.println("start=" + startOffset + " end=" + endOffset + " len=" + inputText.length()); + // System.out.println("start=" + startOffset + " end=" + endOffset + " len=" + + // inputText.length()); if (inputText != null) { String fragment = inputText.substring(startOffset, endOffset); if (fragment.equals(termAtt.toString()) == false) { @@ -139,18 +140,22 @@ public class TokenStreamToDot { out.println(); } - private final static String FONT_NAME = "Helvetica"; + private static final String FONT_NAME = "Helvetica"; /** Override to customize. */ protected void writeHeader() { out.println("digraph tokens {"); - out.println(" graph [ fontsize=30 labelloc=\"t\" label=\"\" splines=true overlap=false rankdir = \"LR\" ];"); + out.println( + " graph [ fontsize=30 labelloc=\"t\" label=\"\" splines=true overlap=false rankdir = \"LR\" ];"); out.println(" // A2 paper size"); out.println(" size = \"34.4,16.5\";"); - //out.println(" // try to fill paper"); - //out.println(" ratio = fill;"); + // out.println(" // try to fill paper"); + // out.println(" ratio = fill;"); out.println(" edge [ fontname=\"" + FONT_NAME + "\" fontcolor=\"red\" color=\"#606060\" ]"); - out.println(" node [ style=\"filled\" fillcolor=\"#e8e8f0\" shape=\"Mrecord\" fontname=\"" + FONT_NAME + "\" ]"); + out.println( + " node [ style=\"filled\" fillcolor=\"#e8e8f0\" shape=\"Mrecord\" fontname=\"" + + FONT_NAME + + "\" ]"); out.println(); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java index 5d5859ea023..0f37ac66470 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java @@ -23,7 +23,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; - import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; @@ -38,8 +37,9 @@ import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; // TODO: BTSTC should just append this to the chain // instead of checking itself: -/** A TokenFilter that checks consistency of the tokens (eg - * offsets are consistent with one another). */ +/** + * A TokenFilter that checks consistency of the tokens (eg offsets are consistent with one another). + */ public final class ValidatingTokenFilter extends TokenFilter { private static final int MAX_DEBUG_TOKENS = 20; @@ -48,10 +48,11 @@ public final class ValidatingTokenFilter extends TokenFilter { private int lastStartOffset; // Maps position to the start/end offset: - private final Map posToStartOffset = new HashMap<>(); - private final Map posToEndOffset = new HashMap<>(); + private final Map posToStartOffset = new HashMap<>(); + private final Map posToEndOffset = new HashMap<>(); - private final PositionIncrementAttribute posIncAtt = getAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncAtt = + getAttribute(PositionIncrementAttribute.class); private final PositionLengthAttribute posLenAtt = getAttribute(PositionLengthAttribute.class); private final OffsetAttribute offsetAtt = getAttribute(OffsetAttribute.class); private final CharTermAttribute termAtt = getAttribute(CharTermAttribute.class); @@ -61,9 +62,10 @@ public final class ValidatingTokenFilter extends TokenFilter { private final String name; - /** The name arg is used to identify this stage when - * throwing exceptions (useful if you have more than one - * instance in your chain). */ + /** + * The name arg is used to identify this stage when throwing exceptions (useful if you have more + * than one instance in your chain). + */ public ValidatingTokenFilter(TokenStream in, String name) { super(in); this.name = name; @@ -96,7 +98,7 @@ public final class ValidatingTokenFilter extends TokenFilter { addToken(startOffset, endOffset, posInc); // System.out.println(name + ": " + this); - + if (posIncAtt != null) { pos += posInc; if (pos == -1) { @@ -104,15 +106,20 @@ public final class ValidatingTokenFilter extends TokenFilter { throw new IllegalStateException(name + ": first posInc must be > 0"); } } - + if (offsetAtt != null) { if (startOffset < lastStartOffset) { dumpValidatingTokenFilters(this, System.err); - throw new IllegalStateException(name + ": offsets must not go backwards startOffset=" + startOffset + " is < lastStartOffset=" + lastStartOffset); + throw new IllegalStateException( + name + + ": offsets must not go backwards startOffset=" + + startOffset + + " is < lastStartOffset=" + + lastStartOffset); } lastStartOffset = offsetAtt.startOffset(); } - + if (offsetAtt != null && posIncAtt != null) { if (!posToStartOffset.containsKey(pos)) { @@ -126,7 +133,16 @@ public final class ValidatingTokenFilter extends TokenFilter { final int oldStartOffset = posToStartOffset.get(pos); if (oldStartOffset != startOffset) { dumpValidatingTokenFilters(this, System.err); - throw new IllegalStateException(name + ": inconsistent startOffset at pos=" + pos + ": " + oldStartOffset + " vs " + startOffset + "; token=" + termAtt); + throw new IllegalStateException( + name + + ": inconsistent startOffset at pos=" + + pos + + ": " + + oldStartOffset + + " vs " + + startOffset + + "; token=" + + termAtt); } } @@ -135,15 +151,24 @@ public final class ValidatingTokenFilter extends TokenFilter { if (!posToEndOffset.containsKey(endPos)) { // First time we've seen a token arriving to this position: posToEndOffset.put(endPos, endOffset); - //System.out.println(name + " + e " + endPos + " -> " + endOffset); + // System.out.println(name + " + e " + endPos + " -> " + endOffset); } else { // We've seen a token arriving to this position // before; verify the endOffset is the same: - //System.out.println(name + " + ve " + endPos + " -> " + endOffset); + // System.out.println(name + " + ve " + endPos + " -> " + endOffset); final int oldEndOffset = posToEndOffset.get(endPos); if (oldEndOffset != endOffset) { dumpValidatingTokenFilters(this, System.err); - throw new IllegalStateException(name + ": inconsistent endOffset at pos=" + endPos + ": " + oldEndOffset + " vs " + endOffset + "; token=" + termAtt); + throw new IllegalStateException( + name + + ": inconsistent endOffset at pos=" + + endPos + + ": " + + oldEndOffset + + " vs " + + endOffset + + "; token=" + + termAtt); } } } @@ -171,7 +196,6 @@ public final class ValidatingTokenFilter extends TokenFilter { tokens.clear(); } - private void addToken(int startOffset, int endOffset, int posInc) { if (tokens.size() == MAX_DEBUG_TOKENS) { tokens.remove(0); @@ -181,6 +205,7 @@ public final class ValidatingTokenFilter extends TokenFilter { /** * Prints details about consumed tokens stored in any ValidatingTokenFilters in the input chain + * * @param in the input token stream * @param out the output print stream */ @@ -197,10 +222,15 @@ public final class ValidatingTokenFilter extends TokenFilter { StringBuilder buf = new StringBuilder(); buf.append(name).append(": "); for (Token token : tokens) { - buf.append(String.format(Locale.ROOT, "%s<[%d-%d] +%d> ", - token, token.startOffset(), token.endOffset(), token.getPositionIncrement())); + buf.append( + String.format( + Locale.ROOT, + "%s<[%d-%d] +%d> ", + token, + token.startOffset(), + token.endOffset(), + token.getPositionIncrement())); } return buf.toString(); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/VocabularyAssert.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/VocabularyAssert.java index 4275c667b7f..70087b232a5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/VocabularyAssert.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/VocabularyAssert.java @@ -23,7 +23,6 @@ import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; - import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.junit.Assert; @@ -32,11 +31,11 @@ import org.junit.Assert; public class VocabularyAssert { /** Run a vocabulary test against two data files. */ public static void assertVocabulary(Analyzer a, InputStream voc, InputStream out) - throws IOException { - BufferedReader vocReader = new BufferedReader( - new InputStreamReader(voc, StandardCharsets.UTF_8)); - BufferedReader outputReader = new BufferedReader( - new InputStreamReader(out, StandardCharsets.UTF_8)); + throws IOException { + BufferedReader vocReader = + new BufferedReader(new InputStreamReader(voc, StandardCharsets.UTF_8)); + BufferedReader outputReader = + new BufferedReader(new InputStreamReader(out, StandardCharsets.UTF_8)); String inputWord = null; while ((inputWord = vocReader.readLine()) != null) { String expectedWord = outputReader.readLine(); @@ -44,33 +43,32 @@ public class VocabularyAssert { BaseTokenStreamTestCase.checkOneTerm(a, inputWord, expectedWord); } } - + /** Run a vocabulary test against one file: tab separated. */ - public static void assertVocabulary(Analyzer a, InputStream vocOut) - throws IOException { - BufferedReader vocReader = new BufferedReader( - new InputStreamReader(vocOut, StandardCharsets.UTF_8)); + public static void assertVocabulary(Analyzer a, InputStream vocOut) throws IOException { + BufferedReader vocReader = + new BufferedReader(new InputStreamReader(vocOut, StandardCharsets.UTF_8)); String inputLine = null; while ((inputLine = vocReader.readLine()) != null) { - if (inputLine.startsWith("#") || inputLine.trim().length() == 0) - continue; /* comment */ + if (inputLine.startsWith("#") || inputLine.trim().length() == 0) continue; /* comment */ String words[] = inputLine.split("\t"); BaseTokenStreamTestCase.checkOneTerm(a, words[0], words[1]); } } - + /** Run a vocabulary test against two data files inside a zip file */ - public static void assertVocabulary(Analyzer a, Path zipFile, String voc, String out) throws IOException { + public static void assertVocabulary(Analyzer a, Path zipFile, String voc, String out) + throws IOException { Path tmp = LuceneTestCase.createTempDir(); try (InputStream in = Files.newInputStream(zipFile)) { TestUtil.unzip(in, tmp); } - try (InputStream v = Files.newInputStream(tmp.resolve(voc)); - InputStream o = Files.newInputStream(tmp.resolve(out))) { + try (InputStream v = Files.newInputStream(tmp.resolve(voc)); + InputStream o = Files.newInputStream(tmp.resolve(out))) { assertVocabulary(a, v, o); } } - + /** Run a vocabulary test against a tab-separated data file inside a zip file */ public static void assertVocabulary(Analyzer a, Path zipFile, String vocOut) throws IOException { Path tmp = LuceneTestCase.createTempDir(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/MissingOrdRemapper.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/MissingOrdRemapper.java index cb2d7dcfcf7..b3a0805c392 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/MissingOrdRemapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/MissingOrdRemapper.java @@ -17,15 +17,14 @@ package org.apache.lucene.codecs; import java.util.Iterator; - import org.apache.lucene.util.BytesRef; -/** - * a utility class to write missing values for SORTED as if they were the empty string - * (to simulate pre-Lucene4.5 dv behavior for testing old codecs) +/** + * a utility class to write missing values for SORTED as if they were the empty string (to simulate + * pre-Lucene4.5 dv behavior for testing old codecs) */ public class MissingOrdRemapper { - + /** insert an empty byte[] to the front of this iterable */ public static Iterable insertEmptyValue(final Iterable iterable) { return new Iterable() { @@ -34,7 +33,7 @@ public class MissingOrdRemapper { return new Iterator() { boolean seenEmpty = false; Iterator in = iterable.iterator(); - + @Override public boolean hasNext() { return !seenEmpty || in.hasNext(); @@ -58,7 +57,7 @@ public class MissingOrdRemapper { } }; } - + /** remaps ord -1 to ord 0 on this iterable. */ public static Iterable mapMissingToOrd0(final Iterable iterable) { return new Iterable() { @@ -66,7 +65,7 @@ public class MissingOrdRemapper { public Iterator iterator() { return new Iterator() { Iterator in = iterable.iterator(); - + @Override public boolean hasNext() { return in.hasNext(); @@ -90,7 +89,7 @@ public class MissingOrdRemapper { } }; } - + /** remaps every ord+1 on this iterable */ public static Iterable mapAllOrds(final Iterable iterable) { return new Iterable() { @@ -98,7 +97,7 @@ public class MissingOrdRemapper { public Iterator iterator() { return new Iterator() { Iterator in = iterable.iterator(); - + @Override public boolean hasNext() { return in.hasNext(); @@ -107,7 +106,7 @@ public class MissingOrdRemapper { @Override public Number next() { Number n = in.next(); - return n.longValue()+1; + return n.longValue() + 1; } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingCodec.java index 58791181891..91260d03605 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingCodec.java @@ -28,33 +28,38 @@ import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat; import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; import org.apache.lucene.util.TestUtil; -/** - * Acts like the default codec but with additional asserts. - */ +/** Acts like the default codec but with additional asserts. */ public class AssertingCodec extends FilterCodec { static void assertThread(String object, Thread creationThread) { if (creationThread != Thread.currentThread()) { - throw new AssertionError(object + " are only supposed to be consumed in " - + "the thread in which they have been acquired. But was acquired in " - + creationThread + " and consumed in " + Thread.currentThread() + "."); + throw new AssertionError( + object + + " are only supposed to be consumed in " + + "the thread in which they have been acquired. But was acquired in " + + creationThread + + " and consumed in " + + Thread.currentThread() + + "."); } } - private final PostingsFormat postings = new PerFieldPostingsFormat() { - @Override - public PostingsFormat getPostingsFormatForField(String field) { - return AssertingCodec.this.getPostingsFormatForField(field); - } - }; - - private final DocValuesFormat docValues = new PerFieldDocValuesFormat() { - @Override - public DocValuesFormat getDocValuesFormatForField(String field) { - return AssertingCodec.this.getDocValuesFormatForField(field); - } - }; - + private final PostingsFormat postings = + new PerFieldPostingsFormat() { + @Override + public PostingsFormat getPostingsFormatForField(String field) { + return AssertingCodec.this.getPostingsFormatForField(field); + } + }; + + private final DocValuesFormat docValues = + new PerFieldDocValuesFormat() { + @Override + public DocValuesFormat getDocValuesFormatForField(String field) { + return AssertingCodec.this.getDocValuesFormatForField(field); + } + }; + private final TermVectorsFormat vectors = new AssertingTermVectorsFormat(); private final StoredFieldsFormat storedFields = new AssertingStoredFieldsFormat(); private final NormsFormat norms = new AssertingNormsFormat(); @@ -106,20 +111,21 @@ public class AssertingCodec extends FilterCodec { public String toString() { return "Asserting(" + delegate + ")"; } - - /** Returns the postings format that should be used for writing - * new segments of field. - * - * The default implementation always returns "Asserting" + + /** + * Returns the postings format that should be used for writing new segments of field. + * + *

    The default implementation always returns "Asserting" */ public PostingsFormat getPostingsFormatForField(String field) { return defaultFormat; } - - /** Returns the docvalues format that should be used for writing - * new segments of field. - * - * The default implementation always returns "Asserting" + + /** + * Returns the docvalues format that should be used for writing new segments of field + * . + * + *

    The default implementation always returns "Asserting" */ public DocValuesFormat getDocValuesFormatForField(String field) { return defaultDVFormat; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java index f246acfda7b..2b85a9a095c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java @@ -16,9 +16,10 @@ */ package org.apache.lucene.codecs.asserting; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + import java.io.IOException; import java.util.Collection; - import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.DocValuesProducer; @@ -38,14 +39,10 @@ import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LongBitSet; import org.apache.lucene.util.TestUtil; -import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; - -/** - * Just like the default but with additional asserts. - */ +/** Just like the default but with additional asserts. */ public class AssertingDocValuesFormat extends DocValuesFormat { private final DocValuesFormat in = TestUtil.getDefaultDocValuesFormat(); - + public AssertingDocValuesFormat() { super("Asserting"); } @@ -64,18 +61,19 @@ public class AssertingDocValuesFormat extends DocValuesFormat { assert producer != null; return new AssertingDocValuesProducer(producer, state.segmentInfo.maxDoc(), false); } - + static class AssertingDocValuesConsumer extends DocValuesConsumer { private final DocValuesConsumer in; private final int maxDoc; - + AssertingDocValuesConsumer(DocValuesConsumer in, int maxDoc) { this.in = in; this.maxDoc = maxDoc; } @Override - public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { NumericDocValues values = valuesProducer.getNumeric(field); int docID; @@ -86,14 +84,15 @@ public class AssertingDocValuesFormat extends DocValuesFormat { lastDocID = docID; long value = values.longValue(); } - + in.addNumericField(field, valuesProducer); } - + @Override - public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { BinaryDocValues values = valuesProducer.getBinary(field); - + int docID; int lastDocID = -1; while ((docID = values.nextDoc()) != NO_MORE_DOCS) { @@ -106,15 +105,16 @@ public class AssertingDocValuesFormat extends DocValuesFormat { in.addBinaryField(field, valuesProducer); } - + @Override - public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { SortedDocValues values = valuesProducer.getSorted(field); int valueCount = values.getValueCount(); assert valueCount <= maxDoc; BytesRef lastValue = null; - for (int ord=0;ord= 0 && ord < valueCount; seenOrds.set(ord); } - + assert seenOrds.cardinality() == valueCount; in.addSortedField(field, valuesProducer); } - + @Override - public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { SortedNumericDocValues values = valuesProducer.getSortedNumeric(field); long valueCount = 0; @@ -166,14 +167,15 @@ public class AssertingDocValuesFormat extends DocValuesFormat { } in.addSortedNumericField(field, valuesProducer); } - + @Override - public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { SortedSetDocValues values = valuesProducer.getSortedSet(field); long valueCount = values.getValueCount(); BytesRef lastValue = null; - for (long i=0;i= 0 && ord < valueCount: "ord=" + ord + " is not in bounds 0 .." + (valueCount-1); + assert ord >= 0 && ord < valueCount + : "ord=" + ord + " is not in bounds 0 .." + (valueCount - 1); assert ord > lastOrd : "ord=" + ord + ",lastOrd=" + lastOrd; seenOrds.set(ord); lastOrd = ord; } } - + assert seenOrds.cardinality() == valueCount; in.addSortedSetField(field, valuesProducer); } - + @Override public void close() throws IOException { in.close(); in.close(); // close again } } - + static class AssertingDocValuesProducer extends DocValuesProducer { private final DocValuesProducer in; private final int maxDoc; private final boolean merging; private final Thread creationThread; - + AssertingDocValuesProducer(DocValuesProducer in, int maxDoc, boolean merging) { this.in = in; this.maxDoc = maxDoc; @@ -265,7 +268,7 @@ public class AssertingDocValuesFormat extends DocValuesFormat { assert values != null; return new AssertingLeafReader.AssertingSortedDocValues(values, maxDoc); } - + @Override public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException { if (merging) { @@ -276,7 +279,7 @@ public class AssertingDocValuesFormat extends DocValuesFormat { assert values != null; return AssertingLeafReader.AssertingSortedNumericDocValues.create(values, maxDoc); } - + @Override public SortedSetDocValues getSortedSet(FieldInfo field) throws IOException { if (merging) { @@ -287,7 +290,7 @@ public class AssertingDocValuesFormat extends DocValuesFormat { assert values != null; return AssertingLeafReader.AssertingSortedSetDocValues.create(values, maxDoc); } - + @Override public void close() throws IOException { in.close(); @@ -312,7 +315,7 @@ public class AssertingDocValuesFormat extends DocValuesFormat { public void checkIntegrity() throws IOException { in.checkIntegrity(); } - + @Override public DocValuesProducer getMergeInstance() { return new AssertingDocValuesProducer(in.getMergeInstance(), maxDoc, true); diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java index 6cf511a486f..54f7ba1fe8b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.asserting; import java.io.IOException; import java.util.Collection; - import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.store.Directory; @@ -26,14 +25,13 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Bits; import org.apache.lucene.util.TestUtil; -/** - * Just like the default live docs format but with additional asserts. - */ +/** Just like the default live docs format but with additional asserts. */ public class AssertingLiveDocsFormat extends LiveDocsFormat { private final LiveDocsFormat in = TestUtil.getDefaultCodec().liveDocsFormat(); @Override - public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException { + public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) + throws IOException { Bits raw = in.readLiveDocs(dir, info, context); assert raw != null; check(raw, info.info.maxDoc(), info.getDelCount()); @@ -41,7 +39,9 @@ public class AssertingLiveDocsFormat extends LiveDocsFormat { } @Override - public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException { + public void writeLiveDocs( + Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) + throws IOException { check(bits, info.info.maxDoc(), info.getDelCount() + newDelCount); in.writeLiveDocs(bits, dir, info, newDelCount, context); } @@ -54,7 +54,8 @@ public class AssertingLiveDocsFormat extends LiveDocsFormat { deletedCount++; } } - assert deletedCount == expectedDeleteCount : "deleted: " + deletedCount + " != expected: " + expectedDeleteCount; + assert deletedCount == expectedDeleteCount + : "deleted: " + deletedCount + " != expected: " + expectedDeleteCount; } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingNormsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingNormsFormat.java index 937b8f6a7c2..b6683d72b1c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingNormsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingNormsFormat.java @@ -16,9 +16,10 @@ */ package org.apache.lucene.codecs.asserting; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + import java.io.IOException; import java.util.Collection; - import org.apache.lucene.codecs.NormsConsumer; import org.apache.lucene.codecs.NormsFormat; import org.apache.lucene.codecs.NormsProducer; @@ -30,14 +31,10 @@ import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.TestUtil; -import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; - -/** - * Just like the default but with additional asserts. - */ +/** Just like the default but with additional asserts. */ public class AssertingNormsFormat extends NormsFormat { private final NormsFormat in = TestUtil.getDefaultCodec().normsFormat(); - + @Override public NormsConsumer normsConsumer(SegmentWriteState state) throws IOException { NormsConsumer consumer = in.normsConsumer(state); @@ -52,11 +49,11 @@ public class AssertingNormsFormat extends NormsFormat { assert producer != null; return new AssertingNormsProducer(producer, state.segmentInfo.maxDoc(), false); } - + static class AssertingNormsConsumer extends NormsConsumer { private final NormsConsumer in; private final int maxDoc; - + AssertingNormsConsumer(NormsConsumer in, int maxDoc) { this.in = in; this.maxDoc = maxDoc; @@ -77,20 +74,20 @@ public class AssertingNormsFormat extends NormsFormat { in.addNormsField(field, valuesProducer); } - + @Override public void close() throws IOException { in.close(); in.close(); // close again } } - + static class AssertingNormsProducer extends NormsProducer { private final NormsProducer in; private final int maxDoc; private final boolean merging; private final Thread creationThread; - + AssertingNormsProducer(NormsProducer in, int maxDoc, boolean merging) { this.in = in; this.maxDoc = maxDoc; @@ -125,7 +122,7 @@ public class AssertingNormsFormat extends NormsFormat { assert v >= 0; return v; } - + @Override public Collection getChildResources() { Collection res = in.getChildResources(); @@ -137,12 +134,12 @@ public class AssertingNormsFormat extends NormsFormat { public void checkIntegrity() throws IOException { in.checkIntegrity(); } - + @Override public NormsProducer getMergeInstance() { return new AssertingNormsProducer(in.getMergeInstance(), maxDoc, true); } - + @Override public String toString() { return getClass().getSimpleName() + "(" + in.toString() + ")"; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java index 71709cc9b90..3c9494a7fc9 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.asserting; import java.io.IOException; import java.util.Collection; - import org.apache.lucene.codecs.PointsFormat; import org.apache.lucene.codecs.PointsReader; import org.apache.lucene.codecs.PointsWriter; @@ -31,10 +30,7 @@ import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.TestUtil; -/** - * Just like the default point format but with additional asserts. - */ - +/** Just like the default point format but with additional asserts. */ public final class AssertingPointsFormat extends PointsFormat { private final PointsFormat in; @@ -44,15 +40,15 @@ public final class AssertingPointsFormat extends PointsFormat { } /** - * Expert: Create an AssertingPointsFormat. - * This is only intended to pass special parameters for testing. + * Expert: Create an AssertingPointsFormat. This is only intended to pass special parameters for + * testing. */ // TODO: can we randomize this a cleaner way? e.g. stored fields and vectors do // this with a separate codec... public AssertingPointsFormat(PointsFormat in) { this.in = in; } - + @Override public PointsWriter fieldsWriter(SegmentWriteState state) throws IOException { return new AssertingPointsWriter(state, in.fieldsWriter(state)); @@ -63,13 +59,12 @@ public final class AssertingPointsFormat extends PointsFormat { return new AssertingPointsReader(state.segmentInfo.maxDoc(), in.fieldsReader(state), false); } - static class AssertingPointsReader extends PointsReader { private final PointsReader in; private final int maxDoc; private final boolean merging; private final Thread creationThread; - + AssertingPointsReader(int maxDoc, PointsReader in, boolean merging) { this.in = in; this.maxDoc = maxDoc; @@ -80,7 +75,7 @@ public final class AssertingPointsFormat extends PointsFormat { assert ramBytesUsed() >= 0; assert getChildResources() != null; } - + @Override public void close() throws IOException { in.close(); @@ -105,7 +100,7 @@ public final class AssertingPointsFormat extends PointsFormat { assert v >= 0; return v; } - + @Override public Collection getChildResources() { Collection res = in.getChildResources(); @@ -117,7 +112,7 @@ public final class AssertingPointsFormat extends PointsFormat { public void checkIntegrity() throws IOException { in.checkIntegrity(); } - + @Override public PointsReader getMergeInstance() { return new AssertingPointsReader(maxDoc, in.getMergeInstance(), true); @@ -135,11 +130,12 @@ public final class AssertingPointsFormat extends PointsFormat { AssertingPointsWriter(SegmentWriteState writeState, PointsWriter in) { this.in = in; } - + @Override public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException { if (fieldInfo.getPointDimensionCount() == 0) { - throw new IllegalArgumentException("writing field=\"" + fieldInfo.name + "\" but pointDimensionalCount is 0"); + throw new IllegalArgumentException( + "writing field=\"" + fieldInfo.name + "\" but pointDimensionalCount is 0"); } in.writeField(fieldInfo, values); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java index 8446972aac3..e5af5c4ad8a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java @@ -19,7 +19,6 @@ package org.apache.lucene.codecs.asserting; import java.io.IOException; import java.util.Collection; import java.util.Iterator; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.NormsProducer; @@ -39,16 +38,14 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.TestUtil; -/** - * Just like the default postings format but with additional asserts. - */ +/** Just like the default postings format but with additional asserts. */ public final class AssertingPostingsFormat extends PostingsFormat { private final PostingsFormat in = TestUtil.getDefaultPostingsFormat(); - + public AssertingPostingsFormat() { super("Asserting"); } - + @Override public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { return new AssertingFieldsConsumer(state, in.fieldsConsumer(state)); @@ -58,10 +55,10 @@ public final class AssertingPostingsFormat extends PostingsFormat { public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { return new AssertingFieldsProducer(in.fieldsProducer(state)); } - + static class AssertingFieldsProducer extends FieldsProducer { private final FieldsProducer in; - + AssertingFieldsProducer(FieldsProducer in) { this.in = in; // do a few simple checks on init @@ -69,7 +66,7 @@ public final class AssertingPostingsFormat extends PostingsFormat { assert ramBytesUsed() >= 0; assert getChildResources() != null; } - + @Override public void close() throws IOException { in.close(); @@ -100,7 +97,7 @@ public final class AssertingPostingsFormat extends PostingsFormat { assert v >= 0; return v; } - + @Override public Collection getChildResources() { Collection res = in.getChildResources(); @@ -112,7 +109,7 @@ public final class AssertingPostingsFormat extends PostingsFormat { public void checkIntegrity() throws IOException { in.checkIntegrity(); } - + @Override public FieldsProducer getMergeInstance() { return new AssertingFieldsProducer(in.getMergeInstance()); @@ -132,7 +129,7 @@ public final class AssertingPostingsFormat extends PostingsFormat { this.writeState = writeState; this.in = in; } - + @Override public void write(Fields fields, NormsProducer norms) throws IOException { in.write(fields, norms); @@ -141,10 +138,10 @@ public final class AssertingPostingsFormat extends PostingsFormat { // "limited" CheckIndex here??? Or ... can we improve // AssertingFieldsProducer and us it also to wrap the // incoming Fields here? - + String lastField = null; - for(String field : fields) { + for (String field : fields) { FieldInfo fieldInfo = writeState.fieldInfos.fieldInfo(field); assert fieldInfo != null; @@ -162,14 +159,19 @@ public final class AssertingPostingsFormat extends PostingsFormat { PostingsEnum postingsEnum = null; boolean hasFreqs = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0; - boolean hasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; - boolean hasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + boolean hasPositions = + fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + boolean hasOffsets = + fieldInfo + .getIndexOptions() + .compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) + >= 0; boolean hasPayloads = terms.hasPayloads(); assert hasPositions == terms.hasPositions(); assert hasOffsets == terms.hasOffsets(); - while(true) { + while (true) { BytesRef term = termsEnum.next(); if (term == null) { break; @@ -203,7 +205,7 @@ public final class AssertingPostingsFormat extends PostingsFormat { int lastDocID = -1; - while(true) { + while (true) { int docID = postingsEnum.nextDoc(); if (docID == PostingsEnum.NO_MORE_DOCS) { break; @@ -217,10 +219,12 @@ public final class AssertingPostingsFormat extends PostingsFormat { if (hasPositions) { int lastPos = -1; int lastStartOffset = -1; - for(int i=0;i= lastPos: "pos=" + pos + " vs lastPos=" + lastPos + " i=" + i + " freq=" + freq; - assert pos <= IndexWriter.MAX_POSITION: "pos=" + pos + " is > IndexWriter.MAX_POSITION=" + IndexWriter.MAX_POSITION; + assert pos >= lastPos + : "pos=" + pos + " vs lastPos=" + lastPos + " i=" + i + " freq=" + freq; + assert pos <= IndexWriter.MAX_POSITION + : "pos=" + pos + " is > IndexWriter.MAX_POSITION=" + IndexWriter.MAX_POSITION; lastPos = pos; if (hasOffsets) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingStoredFieldsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingStoredFieldsFormat.java index 4a552281154..0dc101fcb2c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingStoredFieldsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingStoredFieldsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.asserting; import java.io.IOException; import java.util.Collection; - import org.apache.lucene.codecs.StoredFieldsFormat; import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.codecs.StoredFieldsWriter; @@ -32,28 +31,29 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.TestUtil; -/** - * Just like the default stored fields format but with additional asserts. - */ +/** Just like the default stored fields format but with additional asserts. */ public class AssertingStoredFieldsFormat extends StoredFieldsFormat { private final StoredFieldsFormat in = TestUtil.getDefaultCodec().storedFieldsFormat(); @Override - public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException { - return new AssertingStoredFieldsReader(in.fieldsReader(directory, si, fn, context), si.maxDoc(), false); + public StoredFieldsReader fieldsReader( + Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException { + return new AssertingStoredFieldsReader( + in.fieldsReader(directory, si, fn, context), si.maxDoc(), false); } @Override - public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException { + public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) + throws IOException { return new AssertingStoredFieldsWriter(in.fieldsWriter(directory, si, context)); } - + static class AssertingStoredFieldsReader extends StoredFieldsReader { private final StoredFieldsReader in; private final int maxDoc; private final boolean merging; private final Thread creationThread; - + AssertingStoredFieldsReader(StoredFieldsReader in, int maxDoc, boolean merging) { this.in = in; this.maxDoc = maxDoc; @@ -64,7 +64,7 @@ public class AssertingStoredFieldsFormat extends StoredFieldsFormat { assert ramBytesUsed() >= 0; assert getChildResources() != null; } - + @Override public void close() throws IOException { in.close(); @@ -115,14 +115,16 @@ public class AssertingStoredFieldsFormat extends StoredFieldsFormat { } enum Status { - UNDEFINED, STARTED, FINISHED; + UNDEFINED, + STARTED, + FINISHED; } static class AssertingStoredFieldsWriter extends StoredFieldsWriter { private final StoredFieldsWriter in; private int numWritten; private Status docStatus; - + AssertingStoredFieldsWriter(StoredFieldsWriter in) { this.in = in; this.docStatus = Status.UNDEFINED; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingTermVectorsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingTermVectorsFormat.java index b8ea51084ad..64b3d54c03d 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingTermVectorsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingTermVectorsFormat.java @@ -19,7 +19,6 @@ package org.apache.lucene.codecs.asserting; import java.io.IOException; import java.util.Collection; import java.util.Collections; - import org.apache.lucene.codecs.TermVectorsFormat; import org.apache.lucene.codecs.TermVectorsReader; import org.apache.lucene.codecs.TermVectorsWriter; @@ -34,19 +33,21 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; -/** - * Just like the default vectors format but with additional asserts. - */ +/** Just like the default vectors format but with additional asserts. */ public class AssertingTermVectorsFormat extends TermVectorsFormat { private final TermVectorsFormat in = TestUtil.getDefaultCodec().termVectorsFormat(); @Override - public TermVectorsReader vectorsReader(Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context) throws IOException { - return new AssertingTermVectorsReader(in.vectorsReader(directory, segmentInfo, fieldInfos, context)); + public TermVectorsReader vectorsReader( + Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context) + throws IOException { + return new AssertingTermVectorsReader( + in.vectorsReader(directory, segmentInfo, fieldInfos, context)); } @Override - public TermVectorsWriter vectorsWriter(Directory directory, SegmentInfo segmentInfo, IOContext context) throws IOException { + public TermVectorsWriter vectorsWriter( + Directory directory, SegmentInfo segmentInfo, IOContext context) throws IOException { return new AssertingTermVectorsWriter(in.vectorsWriter(directory, segmentInfo, context)); } @@ -84,7 +85,7 @@ public class AssertingTermVectorsFormat extends TermVectorsFormat { assert v >= 0; return v; } - + @Override public Collection getChildResources() { Collection res = in.getChildResources(); @@ -96,7 +97,7 @@ public class AssertingTermVectorsFormat extends TermVectorsFormat { public void checkIntegrity() throws IOException { in.checkIntegrity(); } - + @Override public TermVectorsReader getMergeInstance() { return new AssertingTermVectorsReader(in.getMergeInstance()); @@ -109,7 +110,9 @@ public class AssertingTermVectorsFormat extends TermVectorsFormat { } enum Status { - UNDEFINED, STARTED, FINISHED; + UNDEFINED, + STARTED, + FINISHED; } static class AssertingTermVectorsWriter extends TermVectorsWriter { @@ -145,8 +148,9 @@ public class AssertingTermVectorsFormat extends TermVectorsFormat { } @Override - public void startField(FieldInfo info, int numTerms, boolean positions, - boolean offsets, boolean payloads) throws IOException { + public void startField( + FieldInfo info, int numTerms, boolean positions, boolean offsets, boolean payloads) + throws IOException { assert termCount == 0; assert docStatus == Status.STARTED; assert fieldStatus != Status.STARTED; @@ -187,8 +191,8 @@ public class AssertingTermVectorsFormat extends TermVectorsFormat { } @Override - public void addPosition(int position, int startOffset, int endOffset, - BytesRef payload) throws IOException { + public void addPosition(int position, int startOffset, int endOffset, BytesRef payload) + throws IOException { assert docStatus == Status.STARTED; assert fieldStatus == Status.STARTED; assert termStatus == Status.STARTED; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/package-info.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/package-info.java index bccdd5d4bf6..164584ed4a6 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/package-info.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Codec for testing that asserts various contracts of the codec apis. - */ +/** Codec for testing that asserts various contracts of the codec apis. */ package org.apache.lucene.codecs.asserting; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneFixedGap.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneFixedGap.java index 7caeb187941..fadfaf2f28c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneFixedGap.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneFixedGap.java @@ -17,7 +17,6 @@ package org.apache.lucene.codecs.blockterms; import java.io.IOException; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.PostingsFormat; @@ -33,16 +32,15 @@ import org.apache.lucene.index.SegmentWriteState; // any PostingsFormat and make it ord-able... /** - * Customized version of {@link Lucene84PostingsFormat} that uses - * {@link FixedGapTermsIndexWriter}. + * Customized version of {@link Lucene84PostingsFormat} that uses {@link FixedGapTermsIndexWriter}. */ public final class LuceneFixedGap extends PostingsFormat { final int termIndexInterval; - + public LuceneFixedGap() { this(FixedGapTermsIndexWriter.DEFAULT_TERM_INDEX_INTERVAL); } - + public LuceneFixedGap(int termIndexInterval) { super("LuceneFixedGap"); this.termIndexInterval = termIndexInterval; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneVarGapDocFreqInterval.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneVarGapDocFreqInterval.java index 94421c0f1f0..ce17f386d93 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneVarGapDocFreqInterval.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneVarGapDocFreqInterval.java @@ -17,7 +17,6 @@ package org.apache.lucene.codecs.blockterms; import java.io.IOException; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.PostingsFormat; @@ -33,18 +32,18 @@ import org.apache.lucene.index.SegmentWriteState; // any PostingsFormat and make it ord-able... /** - * Customized version of {@link Lucene84PostingsFormat} that uses - * {@link VariableGapTermsIndexWriter} with a fixed interval, but - * forcing high docfreq terms to be indexed terms. + * Customized version of {@link Lucene84PostingsFormat} that uses {@link + * VariableGapTermsIndexWriter} with a fixed interval, but forcing high docfreq terms to be indexed + * terms. */ public final class LuceneVarGapDocFreqInterval extends PostingsFormat { final int termIndexInterval; final int docFreqThreshold; - + public LuceneVarGapDocFreqInterval() { this(1000000, FixedGapTermsIndexWriter.DEFAULT_TERM_INDEX_INTERVAL); } - + public LuceneVarGapDocFreqInterval(int docFreqThreshold, int termIndexInterval) { super("LuceneVarGapDocFreqInterval"); this.termIndexInterval = termIndexInterval; @@ -62,7 +61,11 @@ public final class LuceneVarGapDocFreqInterval extends PostingsFormat { TermsIndexWriterBase indexWriter; boolean success = false; try { - indexWriter = new VariableGapTermsIndexWriter(state, new VariableGapTermsIndexWriter.EveryNOrDocFreqTermSelector(docFreqThreshold, termIndexInterval)); + indexWriter = + new VariableGapTermsIndexWriter( + state, + new VariableGapTermsIndexWriter.EveryNOrDocFreqTermSelector( + docFreqThreshold, termIndexInterval)); success = true; } finally { if (!success) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneVarGapFixedInterval.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneVarGapFixedInterval.java index 1049fea56df..44c050f059a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneVarGapFixedInterval.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/blockterms/LuceneVarGapFixedInterval.java @@ -17,7 +17,6 @@ package org.apache.lucene.codecs.blockterms; import java.io.IOException; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.PostingsFormat; @@ -33,16 +32,16 @@ import org.apache.lucene.index.SegmentWriteState; // any PostingsFormat and make it ord-able... /** - * Customized version of {@link Lucene84PostingsFormat} that uses - * {@link VariableGapTermsIndexWriter} with a fixed interval. + * Customized version of {@link Lucene84PostingsFormat} that uses {@link + * VariableGapTermsIndexWriter} with a fixed interval. */ public final class LuceneVarGapFixedInterval extends PostingsFormat { final int termIndexInterval; - + public LuceneVarGapFixedInterval() { this(FixedGapTermsIndexWriter.DEFAULT_TERM_INDEX_INTERVAL); } - + public LuceneVarGapFixedInterval(int termIndexInterval) { super("LuceneVarGapFixedInterval"); this.termIndexInterval = termIndexInterval; @@ -59,7 +58,9 @@ public final class LuceneVarGapFixedInterval extends PostingsFormat { TermsIndexWriterBase indexWriter; boolean success = false; try { - indexWriter = new VariableGapTermsIndexWriter(state, new VariableGapTermsIndexWriter.EveryNTermSelector(termIndexInterval)); + indexWriter = + new VariableGapTermsIndexWriter( + state, new VariableGapTermsIndexWriter.EveryNTermSelector(termIndexInterval)); success = true; } finally { if (!success) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/bloom/TestBloomFilteredLucenePostings.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/bloom/TestBloomFilteredLucenePostings.java index 7c38d4cf330..29a14f8f01c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/bloom/TestBloomFilteredLucenePostings.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/bloom/TestBloomFilteredLucenePostings.java @@ -17,7 +17,6 @@ package org.apache.lucene.codecs.bloom; import java.io.IOException; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.PostingsFormat; @@ -27,24 +26,23 @@ import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.util.TestUtil; /** - * A class used for testing {@link BloomFilteringPostingsFormat} with a concrete - * delegate (Lucene41). Creates a Bloom filter on ALL fields and with tiny - * amounts of memory reserved for the filter. DO NOT USE IN A PRODUCTION - * APPLICATION This is not a realistic application of Bloom Filters as they - * ordinarily are larger and operate on only primary key type fields. + * A class used for testing {@link BloomFilteringPostingsFormat} with a concrete delegate + * (Lucene41). Creates a Bloom filter on ALL fields and with tiny amounts of memory reserved for the + * filter. DO NOT USE IN A PRODUCTION APPLICATION This is not a realistic application of Bloom + * Filters as they ordinarily are larger and operate on only primary key type fields. */ public final class TestBloomFilteredLucenePostings extends PostingsFormat { - + private BloomFilteringPostingsFormat delegate; - + // Special class used to avoid OOM exceptions where Junit tests create many // fields. static class LowMemoryBloomFactory extends BloomFilterFactory { @Override - public FuzzySet getSetForField(SegmentWriteState state,FieldInfo info) { + public FuzzySet getSetForField(SegmentWriteState state, FieldInfo info) { return FuzzySet.createSetBasedOnMaxMemory(1024); } - + @Override public boolean isSaturated(FuzzySet bloomFilter, FieldInfo fieldInfo) { // For test purposes always maintain the BloomFilter - even past the point @@ -52,22 +50,21 @@ public final class TestBloomFilteredLucenePostings extends PostingsFormat { return false; } } - + public TestBloomFilteredLucenePostings() { super("TestBloomFilteredLucenePostings"); - delegate = new BloomFilteringPostingsFormat(TestUtil.getDefaultPostingsFormat(), - new LowMemoryBloomFactory()); + delegate = + new BloomFilteringPostingsFormat( + TestUtil.getDefaultPostingsFormat(), new LowMemoryBloomFactory()); } - + @Override - public FieldsConsumer fieldsConsumer(SegmentWriteState state) - throws IOException { + public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { return delegate.fieldsConsumer(state); } - + @Override - public FieldsProducer fieldsProducer(SegmentReadState state) - throws IOException { + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { return delegate.fieldsProducer(state); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardCodec.java index 065b0a0c3d2..1da4301ea16 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardCodec.java @@ -21,16 +21,16 @@ import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.util.TestUtil; /** Codec that tries to use as little ram as possible because he spent all his money on beer */ -// TODO: better name :) +// TODO: better name :) // but if we named it "LowMemory" in codecs/ package, it would be irresistible like optimize()! public class CheapBastardCodec extends FilterCodec { - + private final PostingsFormat postings = TestUtil.getDefaultPostingsFormat(100, 200); public CheapBastardCodec() { super("CheapBastard", TestUtil.getDefaultCodec()); } - + @Override public PostingsFormat postingsFormat() { return postings; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/package-info.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/package-info.java index f9d245ef58b..4896800d61b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/package-info.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/package-info.java @@ -15,8 +15,8 @@ * limitations under the License. */ -/** - * Codec that unreasonably tries to use as little RAM as possible. - * For testing, benchmarking, API purposes only! +/** + * Codec that unreasonably tries to use as little RAM as possible. For testing, benchmarking, API + * purposes only! */ package org.apache.lucene.codecs.cheapbastard; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java index a59b2c89687..7539511a250 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java @@ -16,8 +16,8 @@ */ package org.apache.lucene.codecs.compressing; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import java.util.Random; - import org.apache.lucene.codecs.FilterCodec; import org.apache.lucene.codecs.StoredFieldsFormat; import org.apache.lucene.codecs.TermVectorsFormat; @@ -25,89 +25,109 @@ import org.apache.lucene.codecs.compressing.dummy.DummyCompressingCodec; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.packed.DirectMonotonicWriter; -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; - /** - * A codec that uses {@link CompressingStoredFieldsFormat} for its stored - * fields and delegates to the default codec for everything else. + * A codec that uses {@link CompressingStoredFieldsFormat} for its stored fields and delegates to + * the default codec for everything else. */ public abstract class CompressingCodec extends FilterCodec { - /** - * Create a random instance. - */ - public static CompressingCodec randomInstance(Random random, int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockShift) { + /** Create a random instance. */ + public static CompressingCodec randomInstance( + Random random, + int chunkSize, + int maxDocsPerChunk, + boolean withSegmentSuffix, + int blockShift) { switch (random.nextInt(6)) { - case 0: - return new FastCompressingCodec(chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); - case 1: - return new FastDecompressionCompressingCodec(chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); - case 2: - return new HighCompressionCompressingCodec(chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); - case 3: - return new DummyCompressingCodec(chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); - case 4: - return new DeflateWithPresetCompressingCodec(chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); - case 5: - return new LZ4WithPresetCompressingCodec(chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); - default: - throw new AssertionError(); + case 0: + return new FastCompressingCodec(chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); + case 1: + return new FastDecompressionCompressingCodec( + chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); + case 2: + return new HighCompressionCompressingCodec( + chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); + case 3: + return new DummyCompressingCodec(chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); + case 4: + return new DeflateWithPresetCompressingCodec( + chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); + case 5: + return new LZ4WithPresetCompressingCodec( + chunkSize, maxDocsPerChunk, withSegmentSuffix, blockShift); + default: + throw new AssertionError(); } } - /** - * Creates a random {@link CompressingCodec} that is using an empty segment - * suffix - */ + /** Creates a random {@link CompressingCodec} that is using an empty segment suffix */ public static CompressingCodec randomInstance(Random random) { - final int chunkSize = random.nextBoolean() ? RandomNumbers.randomIntBetween(random, 10, 100) : RandomNumbers.randomIntBetween(random, 10, 1 << 15); - final int chunkDocs = random.nextBoolean() ? RandomNumbers.randomIntBetween(random, 1, 10) : RandomNumbers.randomIntBetween(random, 64, 1024); - final int blockSize = random.nextBoolean() - ? RandomNumbers.randomIntBetween(random, DirectMonotonicWriter.MIN_BLOCK_SHIFT, 10) - : RandomNumbers.randomIntBetween(random, DirectMonotonicWriter.MIN_BLOCK_SHIFT, DirectMonotonicWriter.MAX_BLOCK_SHIFT); + final int chunkSize = + random.nextBoolean() + ? RandomNumbers.randomIntBetween(random, 10, 100) + : RandomNumbers.randomIntBetween(random, 10, 1 << 15); + final int chunkDocs = + random.nextBoolean() + ? RandomNumbers.randomIntBetween(random, 1, 10) + : RandomNumbers.randomIntBetween(random, 64, 1024); + final int blockSize = + random.nextBoolean() + ? RandomNumbers.randomIntBetween(random, DirectMonotonicWriter.MIN_BLOCK_SHIFT, 10) + : RandomNumbers.randomIntBetween( + random, + DirectMonotonicWriter.MIN_BLOCK_SHIFT, + DirectMonotonicWriter.MAX_BLOCK_SHIFT); return randomInstance(random, chunkSize, chunkDocs, false, blockSize); } - /** - * Creates a random {@link CompressingCodec} with more reasonable parameters for big tests. - */ + /** Creates a random {@link CompressingCodec} with more reasonable parameters for big tests. */ public static CompressingCodec reasonableInstance(Random random) { // e.g. defaults use 2^14 for FAST and ~ 2^16 for HIGH - final int chunkSize = TestUtil.nextInt(random, 1<<13, 1<<17); + final int chunkSize = TestUtil.nextInt(random, 1 << 13, 1 << 17); // e.g. defaults use 128 for FAST and 512 for HIGH - final int chunkDocs = TestUtil.nextInt(random, 1<<6, 1<<10); + final int chunkDocs = TestUtil.nextInt(random, 1 << 6, 1 << 10); // e.g. defaults use 1024 for both cases final int blockShift = TestUtil.nextInt(random, 8, 12); return randomInstance(random, chunkSize, chunkDocs, false, blockShift); } - - /** - * Creates a random {@link CompressingCodec} that is using a segment suffix - */ + + /** Creates a random {@link CompressingCodec} that is using a segment suffix */ public static CompressingCodec randomInstance(Random random, boolean withSegmentSuffix) { - return randomInstance(random, - RandomNumbers.randomIntBetween(random, 1, 1 << 15), - RandomNumbers.randomIntBetween(random, 64, 1024), - withSegmentSuffix, - RandomNumbers.randomIntBetween(random, 1, 1024)); + return randomInstance( + random, + RandomNumbers.randomIntBetween(random, 1, 1 << 15), + RandomNumbers.randomIntBetween(random, 64, 1024), + withSegmentSuffix, + RandomNumbers.randomIntBetween(random, 1, 1024)); } private final CompressingStoredFieldsFormat storedFieldsFormat; private final CompressingTermVectorsFormat termVectorsFormat; - /** - * Creates a compressing codec with a given segment suffix - */ - public CompressingCodec(String name, String segmentSuffix, CompressionMode compressionMode, int chunkSize, int maxDocsPerChunk, int blockShift) { + /** Creates a compressing codec with a given segment suffix */ + public CompressingCodec( + String name, + String segmentSuffix, + CompressionMode compressionMode, + int chunkSize, + int maxDocsPerChunk, + int blockShift) { super(name, TestUtil.getDefaultCodec()); - this.storedFieldsFormat = new CompressingStoredFieldsFormat(name, segmentSuffix, compressionMode, chunkSize, maxDocsPerChunk, blockShift); - this.termVectorsFormat = new CompressingTermVectorsFormat(name, segmentSuffix, compressionMode, chunkSize, blockShift); + this.storedFieldsFormat = + new CompressingStoredFieldsFormat( + name, segmentSuffix, compressionMode, chunkSize, maxDocsPerChunk, blockShift); + this.termVectorsFormat = + new CompressingTermVectorsFormat( + name, segmentSuffix, compressionMode, chunkSize, blockShift); } - - /** - * Creates a compressing codec with an empty segment suffix - */ - public CompressingCodec(String name, CompressionMode compressionMode, int chunkSize, int maxDocsPerChunk, int blockSize) { + + /** Creates a compressing codec with an empty segment suffix */ + public CompressingCodec( + String name, + CompressionMode compressionMode, + int chunkSize, + int maxDocsPerChunk, + int blockSize) { this(name, "", compressionMode, chunkSize, maxDocsPerChunk, blockSize); } @@ -123,6 +143,11 @@ public abstract class CompressingCodec extends FilterCodec { @Override public String toString() { - return getName() + "(storedFieldsFormat=" + storedFieldsFormat + ", termVectorsFormat=" + termVectorsFormat + ")"; + return getName() + + "(storedFieldsFormat=" + + storedFieldsFormat + + ", termVectorsFormat=" + + termVectorsFormat + + ")"; } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/DeflateWithPresetCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/DeflateWithPresetCompressingCodec.java index cf20279be1c..dbc21e3748f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/DeflateWithPresetCompressingCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/DeflateWithPresetCompressingCodec.java @@ -22,15 +22,19 @@ import org.apache.lucene.codecs.lucene87.DeflateWithPresetDictCompressionMode; public class DeflateWithPresetCompressingCodec extends CompressingCodec { /** Constructor that allows to configure the chunk size. */ - public DeflateWithPresetCompressingCodec(int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { - super("DeflateWithPresetCompressingStoredFieldsData", - withSegmentSuffix ? "DeflateWithPresetCompressingStoredFields" : "", - new DeflateWithPresetDictCompressionMode(), chunkSize, maxDocsPerChunk, blockSize); + public DeflateWithPresetCompressingCodec( + int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { + super( + "DeflateWithPresetCompressingStoredFieldsData", + withSegmentSuffix ? "DeflateWithPresetCompressingStoredFields" : "", + new DeflateWithPresetDictCompressionMode(), + chunkSize, + maxDocsPerChunk, + blockSize); } /** No-arg constructor. */ public DeflateWithPresetCompressingCodec() { - this(1<<18, 512, false, 10); + this(1 << 18, 512, false, 10); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastCompressingCodec.java index 337eaeac9dd..073f9aaa0eb 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastCompressingCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastCompressingCodec.java @@ -20,10 +20,15 @@ package org.apache.lucene.codecs.compressing; public class FastCompressingCodec extends CompressingCodec { /** Constructor that allows to configure the chunk size. */ - public FastCompressingCodec(int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { - super("FastCompressingStoredFieldsData", - withSegmentSuffix ? "FastCompressingStoredFields" : "", - CompressionMode.FAST, chunkSize, maxDocsPerChunk, blockSize); + public FastCompressingCodec( + int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { + super( + "FastCompressingStoredFieldsData", + withSegmentSuffix ? "FastCompressingStoredFields" : "", + CompressionMode.FAST, + chunkSize, + maxDocsPerChunk, + blockSize); } /** Default constructor. */ diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastDecompressionCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastDecompressionCompressingCodec.java index 64454f48682..6c5f6a78351 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastDecompressionCompressingCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastDecompressionCompressingCodec.java @@ -20,10 +20,15 @@ package org.apache.lucene.codecs.compressing; public class FastDecompressionCompressingCodec extends CompressingCodec { /** Constructor that allows to configure the chunk size. */ - public FastDecompressionCompressingCodec(int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { - super("FastDecompressionCompressingStoredFieldsData", - withSegmentSuffix ? "FastDecompressionCompressingStoredFields" : "", - CompressionMode.FAST_DECOMPRESSION, chunkSize, maxDocsPerChunk, blockSize); + public FastDecompressionCompressingCodec( + int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { + super( + "FastDecompressionCompressingStoredFieldsData", + withSegmentSuffix ? "FastDecompressionCompressingStoredFields" : "", + CompressionMode.FAST_DECOMPRESSION, + chunkSize, + maxDocsPerChunk, + blockSize); } /** Default constructor. */ diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/HighCompressionCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/HighCompressionCompressingCodec.java index 7453ff30467..25fdaad3ba9 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/HighCompressionCompressingCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/HighCompressionCompressingCodec.java @@ -20,10 +20,15 @@ package org.apache.lucene.codecs.compressing; public class HighCompressionCompressingCodec extends CompressingCodec { /** Constructor that allows to configure the chunk size. */ - public HighCompressionCompressingCodec(int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockShift) { - super("HighCompressionCompressingStoredFieldsData", - withSegmentSuffix ? "HighCompressionCompressingStoredFields" : "", - CompressionMode.HIGH_COMPRESSION, chunkSize, maxDocsPerChunk, blockShift); + public HighCompressionCompressingCodec( + int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockShift) { + super( + "HighCompressionCompressingStoredFieldsData", + withSegmentSuffix ? "HighCompressionCompressingStoredFields" : "", + CompressionMode.HIGH_COMPRESSION, + chunkSize, + maxDocsPerChunk, + blockShift); } /** Default constructor. */ diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/LZ4WithPresetCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/LZ4WithPresetCompressingCodec.java index 690d26c80e9..a51493b45e1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/LZ4WithPresetCompressingCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/LZ4WithPresetCompressingCodec.java @@ -22,15 +22,19 @@ import org.apache.lucene.codecs.lucene87.LZ4WithPresetDictCompressionMode; public class LZ4WithPresetCompressingCodec extends CompressingCodec { /** Constructor that allows to configure the chunk size. */ - public LZ4WithPresetCompressingCodec(int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { - super("LZ4WithPresetCompressingStoredFieldsData", - withSegmentSuffix ? "DeflateWithPresetCompressingStoredFields" : "", - new LZ4WithPresetDictCompressionMode(), chunkSize, maxDocsPerChunk, blockSize); + public LZ4WithPresetCompressingCodec( + int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { + super( + "LZ4WithPresetCompressingStoredFieldsData", + withSegmentSuffix ? "DeflateWithPresetCompressingStoredFields" : "", + new LZ4WithPresetDictCompressionMode(), + chunkSize, + maxDocsPerChunk, + blockSize); } /** No-arg constructor. */ public LZ4WithPresetCompressingCodec() { - this(1<<18, 512, false, 10); + this(1 << 18, 512, false, 10); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/dummy/DummyCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/dummy/DummyCompressingCodec.java index 4b414dac6ca..1d838ee2d01 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/dummy/DummyCompressingCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/dummy/DummyCompressingCodec.java @@ -17,7 +17,6 @@ package org.apache.lucene.codecs.compressing.dummy; import java.io.IOException; - import org.apache.lucene.codecs.compressing.CompressingCodec; import org.apache.lucene.codecs.compressing.CompressionMode; import org.apache.lucene.codecs.compressing.Compressor; @@ -32,68 +31,73 @@ import org.apache.lucene.util.BytesRef; // visible enough to let people write their own CompressionMode public class DummyCompressingCodec extends CompressingCodec { - public static final CompressionMode DUMMY = new CompressionMode() { + public static final CompressionMode DUMMY = + new CompressionMode() { - @Override - public Compressor newCompressor() { - return DUMMY_COMPRESSOR; - } + @Override + public Compressor newCompressor() { + return DUMMY_COMPRESSOR; + } - @Override - public Decompressor newDecompressor() { - return DUMMY_DECOMPRESSOR; - } + @Override + public Decompressor newDecompressor() { + return DUMMY_DECOMPRESSOR; + } - @Override - public String toString() { - return "DUMMY"; - } + @Override + public String toString() { + return "DUMMY"; + } + }; - }; + private static final Decompressor DUMMY_DECOMPRESSOR = + new Decompressor() { - private static final Decompressor DUMMY_DECOMPRESSOR = new Decompressor() { + @Override + public void decompress( + DataInput in, int originalLength, int offset, int length, BytesRef bytes) + throws IOException { + assert offset + length <= originalLength; + if (bytes.bytes.length < originalLength) { + bytes.bytes = new byte[ArrayUtil.oversize(originalLength, 1)]; + } + in.readBytes(bytes.bytes, 0, offset + length); + bytes.offset = offset; + bytes.length = length; + } - @Override - public void decompress(DataInput in, int originalLength, - int offset, int length, BytesRef bytes) throws IOException { - assert offset + length <= originalLength; - if (bytes.bytes.length < originalLength) { - bytes.bytes = new byte[ArrayUtil.oversize(originalLength, 1)]; - } - in.readBytes(bytes.bytes, 0, offset + length); - bytes.offset = offset; - bytes.length = length; - } + @Override + public Decompressor clone() { + return this; + } + }; - @Override - public Decompressor clone() { - return this; - } + private static final Compressor DUMMY_COMPRESSOR = + new Compressor() { - }; + @Override + public void compress(byte[] bytes, int off, int len, DataOutput out) throws IOException { + out.writeBytes(bytes, off, len); + } - private static final Compressor DUMMY_COMPRESSOR = new Compressor() { - - @Override - public void compress(byte[] bytes, int off, int len, DataOutput out) throws IOException { - out.writeBytes(bytes, off, len); - } - - @Override - public void close() throws IOException {}; - - }; + @Override + public void close() throws IOException {} + }; /** Constructor that allows to configure the chunk size. */ - public DummyCompressingCodec(int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { - super("DummyCompressingStoredFieldsData", - withSegmentSuffix ? "DummyCompressingStoredFields" : "", - DUMMY, chunkSize, maxDocsPerChunk, blockSize); + public DummyCompressingCodec( + int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) { + super( + "DummyCompressingStoredFieldsData", + withSegmentSuffix ? "DummyCompressingStoredFields" : "", + DUMMY, + chunkSize, + maxDocsPerChunk, + blockSize); } /** Default constructor. */ public DummyCompressingCodec() { this(1 << 14, 128, false, 10); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/dummy/package-info.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/dummy/package-info.java index f8fcbdfd55e..087536d545f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/dummy/package-info.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/dummy/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Dummy CompressingCodec implementation used for testing. - */ +/** Dummy CompressingCodec implementation used for testing. */ package org.apache.lucene.codecs.compressing.dummy; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyCodec.java index 086a574ba83..2b49306f7f2 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyCodec.java @@ -17,7 +17,6 @@ package org.apache.lucene.codecs.cranky; import java.util.Random; - import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.CompoundFormat; import org.apache.lucene.codecs.DocValuesFormat; @@ -34,11 +33,8 @@ import org.apache.lucene.codecs.TermVectorsFormat; /** Codec for testing that throws random IOExceptions */ public class CrankyCodec extends FilterCodec { final Random random; - - /** - * Wrap the provided codec with crankiness. - * Try passing Asserting for the most fun. - */ + + /** Wrap the provided codec with crankiness. Try passing Asserting for the most fun. */ public CrankyCodec(Codec delegate, Random random) { // we impersonate the passed-in codec, so we don't need to be in SPI, // and so we dont change file formats diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyCompoundFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyCompoundFormat.java index 2114135b261..3ff3941371e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyCompoundFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyCompoundFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Random; - import org.apache.lucene.codecs.CompoundDirectory; import org.apache.lucene.codecs.CompoundFormat; import org.apache.lucene.index.SegmentInfo; @@ -28,17 +27,18 @@ import org.apache.lucene.store.IOContext; class CrankyCompoundFormat extends CompoundFormat { CompoundFormat delegate; Random random; - + CrankyCompoundFormat(CompoundFormat delegate, Random random) { this.delegate = delegate; this.random = random; } - + @Override - public CompoundDirectory getCompoundReader(Directory dir, SegmentInfo si, IOContext context) throws IOException { + public CompoundDirectory getCompoundReader(Directory dir, SegmentInfo si, IOContext context) + throws IOException { return delegate.getCompoundReader(dir, si, context); } - + @Override public void write(Directory dir, SegmentInfo si, IOContext context) throws IOException { if (random.nextInt(100) == 0) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyDocValuesFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyDocValuesFormat.java index 10ae5362c07..6e74b0d725f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyDocValuesFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyDocValuesFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Random; - import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.DocValuesProducer; @@ -29,7 +28,7 @@ import org.apache.lucene.index.SegmentWriteState; class CrankyDocValuesFormat extends DocValuesFormat { final DocValuesFormat delegate; final Random random; - + CrankyDocValuesFormat(DocValuesFormat delegate, Random random) { // we impersonate the passed-in codec, so we don't need to be in SPI, // and so we dont change file formats @@ -50,16 +49,16 @@ class CrankyDocValuesFormat extends DocValuesFormat { public DocValuesProducer fieldsProducer(SegmentReadState state) throws IOException { return delegate.fieldsProducer(state); } - + static class CrankyDocValuesConsumer extends DocValuesConsumer { final DocValuesConsumer delegate; final Random random; - + CrankyDocValuesConsumer(DocValuesConsumer delegate, Random random) { this.delegate = delegate; this.random = random; } - + @Override public void close() throws IOException { delegate.close(); @@ -69,7 +68,8 @@ class CrankyDocValuesFormat extends DocValuesFormat { } @Override - public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from DocValuesConsumer.addNumericField()"); } @@ -77,7 +77,8 @@ class CrankyDocValuesFormat extends DocValuesFormat { } @Override - public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from DocValuesConsumer.addBinaryField()"); } @@ -85,15 +86,17 @@ class CrankyDocValuesFormat extends DocValuesFormat { } @Override - public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from DocValuesConsumer.addSortedField()"); } delegate.addSortedField(field, valuesProducer); } - + @Override - public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from DocValuesConsumer.addSortedNumericField()"); } @@ -101,7 +104,8 @@ class CrankyDocValuesFormat extends DocValuesFormat { } @Override - public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) + throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from DocValuesConsumer.addSortedSetField()"); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyFieldInfosFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyFieldInfosFormat.java index 4c5ab598056..d442fb0cb44 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyFieldInfosFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyFieldInfosFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Random; - import org.apache.lucene.codecs.FieldInfosFormat; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.SegmentInfo; @@ -28,19 +27,27 @@ import org.apache.lucene.store.IOContext; class CrankyFieldInfosFormat extends FieldInfosFormat { final FieldInfosFormat delegate; final Random random; - + CrankyFieldInfosFormat(FieldInfosFormat delegate, Random random) { this.delegate = delegate; this.random = random; } @Override - public FieldInfos read(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, IOContext iocontext) throws IOException { + public FieldInfos read( + Directory directory, SegmentInfo segmentInfo, String segmentSuffix, IOContext iocontext) + throws IOException { return delegate.read(directory, segmentInfo, segmentSuffix, iocontext); } @Override - public void write(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, FieldInfos infos, IOContext context) throws IOException { + public void write( + Directory directory, + SegmentInfo segmentInfo, + String segmentSuffix, + FieldInfos infos, + IOContext context) + throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from FieldInfosFormat.getFieldInfosWriter()"); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyLiveDocsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyLiveDocsFormat.java index 027050e635f..44b8b578263 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyLiveDocsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyLiveDocsFormat.java @@ -19,7 +19,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Collection; import java.util.Random; - import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.store.Directory; @@ -29,19 +28,22 @@ import org.apache.lucene.util.Bits; class CrankyLiveDocsFormat extends LiveDocsFormat { final LiveDocsFormat delegate; final Random random; - + CrankyLiveDocsFormat(LiveDocsFormat delegate, Random random) { this.delegate = delegate; this.random = random; } @Override - public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException { + public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) + throws IOException { return delegate.readLiveDocs(dir, info, context); } @Override - public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException { + public void writeLiveDocs( + Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) + throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from LiveDocsFormat.writeLiveDocs()"); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyNormsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyNormsFormat.java index 9168ce0c39b..44998b10119 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyNormsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyNormsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Random; - import org.apache.lucene.codecs.NormsConsumer; import org.apache.lucene.codecs.NormsFormat; import org.apache.lucene.codecs.NormsProducer; @@ -29,7 +28,7 @@ import org.apache.lucene.index.SegmentWriteState; class CrankyNormsFormat extends NormsFormat { final NormsFormat delegate; final Random random; - + CrankyNormsFormat(NormsFormat delegate, Random random) { this.delegate = delegate; this.random = random; @@ -47,16 +46,16 @@ class CrankyNormsFormat extends NormsFormat { public NormsProducer normsProducer(SegmentReadState state) throws IOException { return delegate.normsProducer(state); } - + static class CrankyNormsConsumer extends NormsConsumer { final NormsConsumer delegate; final Random random; - + CrankyNormsConsumer(NormsConsumer delegate, Random random) { this.delegate = delegate; this.random = random; } - + @Override public void close() throws IOException { delegate.close(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java index 9fc2c26787a..89a18b272cf 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Random; - import org.apache.lucene.codecs.PointsFormat; import org.apache.lucene.codecs.PointsReader; import org.apache.lucene.codecs.PointsWriter; @@ -31,7 +30,7 @@ import org.apache.lucene.index.SegmentWriteState; class CrankyPointsFormat extends PointsFormat { PointsFormat delegate; Random random; - + CrankyPointsFormat(PointsFormat delegate, Random random) { this.delegate = delegate; this.random = random; @@ -60,7 +59,7 @@ class CrankyPointsFormat extends PointsFormat { public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } delegate.writeField(fieldInfo, values); } @@ -68,22 +67,22 @@ class CrankyPointsFormat extends PointsFormat { public void finish() throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } delegate.finish(); if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } } @Override public void merge(MergeState mergeState) throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } delegate.merge(mergeState); if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } } @Override @@ -91,13 +90,14 @@ class CrankyPointsFormat extends PointsFormat { delegate.close(); if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } } } static class CrankyPointsReader extends PointsReader { final PointsReader delegate; final Random random; + public CrankyPointsReader(PointsReader delegate, Random random) { this.delegate = delegate; this.random = random; @@ -111,7 +111,7 @@ class CrankyPointsFormat extends PointsFormat { delegate.checkIntegrity(); if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } } @Override @@ -130,7 +130,7 @@ class CrankyPointsFormat extends PointsFormat { delegate.intersect(visitor); if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } } @Override @@ -187,7 +187,6 @@ class CrankyPointsFormat extends PointsFormat { public int getDocCount() { return delegate.getDocCount(); } - }; } @@ -196,7 +195,7 @@ class CrankyPointsFormat extends PointsFormat { delegate.close(); if (random.nextInt(100) == 0) { throw new IOException("Fake IOException"); - } + } } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java index 00e168b4b27..715bd2163a5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Random; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.NormsProducer; @@ -30,7 +29,7 @@ import org.apache.lucene.index.SegmentWriteState; class CrankyPostingsFormat extends PostingsFormat { final PostingsFormat delegate; final Random random; - + CrankyPostingsFormat(PostingsFormat delegate, Random random) { // we impersonate the passed-in codec, so we don't need to be in SPI, // and so we dont change file formats @@ -38,12 +37,12 @@ class CrankyPostingsFormat extends PostingsFormat { this.delegate = delegate; this.random = random; } - + @Override public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from PostingsFormat.fieldsConsumer()"); - } + } return new CrankyFieldsConsumer(delegate.fieldsConsumer(state), random); } @@ -51,21 +50,21 @@ class CrankyPostingsFormat extends PostingsFormat { public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { return delegate.fieldsProducer(state); } - + static class CrankyFieldsConsumer extends FieldsConsumer { final FieldsConsumer delegate; final Random random; - + CrankyFieldsConsumer(FieldsConsumer delegate, Random random) { this.delegate = delegate; this.random = random; } - + @Override public void write(Fields fields, NormsProducer norms) throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from FieldsConsumer.write()"); - } + } delegate.write(fields, norms); } @@ -74,7 +73,7 @@ class CrankyPostingsFormat extends PostingsFormat { delegate.close(); if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from FieldsConsumer.close()"); - } + } } } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankySegmentInfoFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankySegmentInfoFormat.java index 971ae02ce9b..664a69a3691 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankySegmentInfoFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankySegmentInfoFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Random; - import org.apache.lucene.codecs.SegmentInfoFormat; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.store.Directory; @@ -27,14 +26,16 @@ import org.apache.lucene.store.IOContext; class CrankySegmentInfoFormat extends SegmentInfoFormat { final SegmentInfoFormat delegate; final Random random; - + CrankySegmentInfoFormat(SegmentInfoFormat delegate, Random random) { this.delegate = delegate; this.random = random; } - + @Override - public SegmentInfo read(Directory directory, String segmentName, byte[] segmentID, IOContext context) throws IOException { + public SegmentInfo read( + Directory directory, String segmentName, byte[] segmentID, IOContext context) + throws IOException { return delegate.read(directory, segmentName, segmentID, context); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyStoredFieldsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyStoredFieldsFormat.java index edd59839a1c..6984cd2f2d0 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyStoredFieldsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyStoredFieldsFormat.java @@ -19,7 +19,6 @@ package org.apache.lucene.codecs.cranky; import java.io.IOException; import java.util.Collection; import java.util.Random; - import org.apache.lucene.codecs.StoredFieldsFormat; import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.codecs.StoredFieldsWriter; @@ -35,30 +34,32 @@ import org.apache.lucene.util.Accountable; class CrankyStoredFieldsFormat extends StoredFieldsFormat { final StoredFieldsFormat delegate; final Random random; - + CrankyStoredFieldsFormat(StoredFieldsFormat delegate, Random random) { this.delegate = delegate; this.random = random; } @Override - public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException { + public StoredFieldsReader fieldsReader( + Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException { return delegate.fieldsReader(directory, si, fn, context); } @Override - public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException { + public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) + throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from StoredFieldsFormat.fieldsWriter()"); } return new CrankyStoredFieldsWriter(delegate.fieldsWriter(directory, si, context), random); } - + static class CrankyStoredFieldsWriter extends StoredFieldsWriter { - + final StoredFieldsWriter delegate; final Random random; - + CrankyStoredFieldsWriter(StoredFieldsWriter delegate, Random random) { this.delegate = delegate; this.random = random; @@ -79,7 +80,7 @@ class CrankyStoredFieldsFormat extends StoredFieldsFormat { } return super.merge(mergeState); } - + @Override public void close() throws IOException { delegate.close(); @@ -87,7 +88,7 @@ class CrankyStoredFieldsFormat extends StoredFieldsFormat { throw new IOException("Fake IOException from StoredFieldsWriter.close()"); } } - + // per doc/field methods: lower probability since they are invoked so many times. @Override @@ -97,7 +98,7 @@ class CrankyStoredFieldsFormat extends StoredFieldsFormat { } delegate.startDocument(); } - + @Override public void finishDocument() throws IOException { if (random.nextInt(10000) == 0) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyTermVectorsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyTermVectorsFormat.java index 375c0424bab..2701254be44 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyTermVectorsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyTermVectorsFormat.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.Random; - import org.apache.lucene.codecs.TermVectorsFormat; import org.apache.lucene.codecs.TermVectorsReader; import org.apache.lucene.codecs.TermVectorsWriter; @@ -37,34 +36,38 @@ import org.apache.lucene.util.BytesRef; class CrankyTermVectorsFormat extends TermVectorsFormat { final TermVectorsFormat delegate; final Random random; - + CrankyTermVectorsFormat(TermVectorsFormat delegate, Random random) { this.delegate = delegate; this.random = random; } @Override - public TermVectorsReader vectorsReader(Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context) throws IOException { + public TermVectorsReader vectorsReader( + Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context) + throws IOException { return delegate.vectorsReader(directory, segmentInfo, fieldInfos, context); } @Override - public TermVectorsWriter vectorsWriter(Directory directory, SegmentInfo segmentInfo, IOContext context) throws IOException { + public TermVectorsWriter vectorsWriter( + Directory directory, SegmentInfo segmentInfo, IOContext context) throws IOException { if (random.nextInt(100) == 0) { throw new IOException("Fake IOException from TermVectorsFormat.vectorsWriter()"); } - return new CrankyTermVectorsWriter(delegate.vectorsWriter(directory, segmentInfo, context), random); + return new CrankyTermVectorsWriter( + delegate.vectorsWriter(directory, segmentInfo, context), random); } - + static class CrankyTermVectorsWriter extends TermVectorsWriter { final TermVectorsWriter delegate; final Random random; - + CrankyTermVectorsWriter(TermVectorsWriter delegate, Random random) { this.delegate = delegate; this.random = random; } - + @Override public int merge(MergeState mergeState) throws IOException { if (random.nextInt(100) == 0) { @@ -98,7 +101,7 @@ class CrankyTermVectorsFormat extends TermVectorsFormat { } delegate.startDocument(numVectorFields); } - + @Override public void finishDocument() throws IOException { if (random.nextInt(10000) == 0) { @@ -106,9 +109,11 @@ class CrankyTermVectorsFormat extends TermVectorsFormat { } delegate.finishDocument(); } - + @Override - public void startField(FieldInfo info, int numTerms, boolean positions, boolean offsets, boolean payloads) throws IOException { + public void startField( + FieldInfo info, int numTerms, boolean positions, boolean offsets, boolean payloads) + throws IOException { if (random.nextInt(10000) == 0) { throw new IOException("Fake IOException from TermVectorsWriter.startField()"); } @@ -122,7 +127,7 @@ class CrankyTermVectorsFormat extends TermVectorsFormat { } delegate.finishField(); } - + @Override public void startTerm(BytesRef term, int freq) throws IOException { if (random.nextInt(10000) == 0) { @@ -138,9 +143,10 @@ class CrankyTermVectorsFormat extends TermVectorsFormat { } delegate.finishTerm(); } - + @Override - public void addPosition(int position, int startOffset, int endOffset, BytesRef payload) throws IOException { + public void addPosition(int position, int startOffset, int endOffset, BytesRef payload) + throws IOException { if (random.nextInt(10000) == 0) { throw new IOException("Fake IOException from TermVectorsWriter.addPosition()"); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/package-info.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/package-info.java index 676beacc4d3..a599238ef58 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/package-info.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Codec for testing that throws random IOExceptions - */ +/** Codec for testing that throws random IOExceptions */ package org.apache.lucene.codecs.cranky; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java index 939e8e7c1d1..b26beba6fd2 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.mockrandom; import java.io.IOException; import java.util.Random; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; @@ -52,28 +51,26 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -/** - * Randomly combines terms index impl w/ postings impls. - */ - +/** Randomly combines terms index impl w/ postings impls. */ public final class MockRandomPostingsFormat extends PostingsFormat { private final Random seedRandom; private static final String SEED_EXT = "sd"; - + public MockRandomPostingsFormat() { // This ctor should *only* be used at read-time: get NPE if you use it! this(null); } - + public MockRandomPostingsFormat(Random random) { super("MockRandom"); if (random == null) { - this.seedRandom = new Random(0L) { - @Override - protected int next(int arg0) { - throw new IllegalStateException("Please use MockRandomPostingsFormat(Random)"); - } - }; + this.seedRandom = + new Random(0L) { + @Override + protected int next(int arg0) { + throw new IllegalStateException("Please use MockRandomPostingsFormat(Random)"); + } + }; } else { this.seedRandom = new Random(random.nextLong()); } @@ -94,26 +91,34 @@ public final class MockRandomPostingsFormat extends PostingsFormat { // NOTE: Currently not passed to postings writer. // before, it was being passed in wrongly as acceptableOverhead! int skipInterval = TestUtil.nextInt(seedRandom, minSkipInterval, 10); - + if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: skipInterval=" + skipInterval); } - + final long seed = seedRandom.nextLong(); if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: writing to seg=" + state.segmentInfo.name + " formatID=" + state.segmentSuffix + " seed=" + seed); + System.out.println( + "MockRandomCodec: writing to seg=" + + state.segmentInfo.name + + " formatID=" + + state.segmentSuffix + + " seed=" + + seed); } - final String seedFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, SEED_EXT); - try(IndexOutput out = state.directory.createOutput(seedFileName, state.context)) { - CodecUtil.writeIndexHeader(out, "MockRandomSeed", 0, state.segmentInfo.getId(), state.segmentSuffix); + final String seedFileName = + IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, SEED_EXT); + try (IndexOutput out = state.directory.createOutput(seedFileName, state.context)) { + CodecUtil.writeIndexHeader( + out, "MockRandomSeed", 0, state.segmentInfo.getId(), state.segmentSuffix); out.writeLong(seed); CodecUtil.writeFooter(out); } final Random random = new Random(seed); - + random.nextInt(); // consume a random for buffersize PostingsWriterBase postingsWriter = new Lucene84PostingsWriter(state); @@ -141,7 +146,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat { // TODO: would be nice to allow 1 but this is very // slow to write final int minTermsInBlock = TestUtil.nextInt(random, 2, 100); - final int maxTermsInBlock = Math.max(2, (minTermsInBlock-1)*2 + random.nextInt(100)); + final int maxTermsInBlock = Math.max(2, (minTermsInBlock - 1) * 2 + random.nextInt(100)); boolean success = false; try { @@ -165,7 +170,8 @@ public final class MockRandomPostingsFormat extends PostingsFormat { if (random.nextBoolean()) { int termIndexInterval = TestUtil.nextInt(random, 1, 100); if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: fixed-gap terms index (tii=" + termIndexInterval + ")"); + System.out.println( + "MockRandomCodec: fixed-gap terms index (tii=" + termIndexInterval + ")"); } indexWriter = new FixedGapTermsIndexWriter(state, termIndexInterval); } else { @@ -174,31 +180,32 @@ public final class MockRandomPostingsFormat extends PostingsFormat { if (n2 == 0) { final int tii = TestUtil.nextInt(random, 1, 100); selector = new VariableGapTermsIndexWriter.EveryNTermSelector(tii); - if (LuceneTestCase.VERBOSE) { + if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: variable-gap terms index (tii=" + tii + ")"); } } else if (n2 == 1) { final int docFreqThresh = TestUtil.nextInt(random, 2, 100); final int tii = TestUtil.nextInt(random, 1, 100); - selector = new VariableGapTermsIndexWriter.EveryNOrDocFreqTermSelector(docFreqThresh, tii); + selector = + new VariableGapTermsIndexWriter.EveryNOrDocFreqTermSelector(docFreqThresh, tii); } else { final long seed2 = random.nextLong(); final int gap = TestUtil.nextInt(random, 2, 40); if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: random-gap terms index (max gap=" + gap + ")"); + System.out.println("MockRandomCodec: random-gap terms index (max gap=" + gap + ")"); } - selector = new VariableGapTermsIndexWriter.IndexTermSelector() { - final Random rand = new Random(seed2); + selector = + new VariableGapTermsIndexWriter.IndexTermSelector() { + final Random rand = new Random(seed2); - @Override - public boolean isIndexTerm(BytesRef term, TermStats stats) { - return rand.nextInt(gap) == gap/2; - } + @Override + public boolean isIndexTerm(BytesRef term, TermStats stats) { + return rand.nextInt(gap) == gap / 2; + } - @Override - public void newField(FieldInfo fieldInfo) { - } - }; + @Override + public void newField(FieldInfo fieldInfo) {} + }; } indexWriter = new VariableGapTermsIndexWriter(state, selector); } @@ -231,18 +238,19 @@ public final class MockRandomPostingsFormat extends PostingsFormat { // TODO: would be nice to allow 1 but this is very // slow to write final int minTermsInBlock = TestUtil.nextInt(random, 2, 100); - final int maxTermsInBlock = Math.max(2, (minTermsInBlock-1)*2 + random.nextInt(100)); + final int maxTermsInBlock = Math.max(2, (minTermsInBlock - 1) * 2 + random.nextInt(100)); boolean success = false; try { - fields = new OrdsBlockTreeTermsWriter(state, postingsWriter, minTermsInBlock, maxTermsInBlock); + fields = + new OrdsBlockTreeTermsWriter(state, postingsWriter, minTermsInBlock, maxTermsInBlock); success = true; } finally { if (!success) { postingsWriter.close(); } } - + } else { // BUG! throw new AssertionError(); @@ -254,18 +262,26 @@ public final class MockRandomPostingsFormat extends PostingsFormat { @Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { - final String seedFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, SEED_EXT); + final String seedFileName = + IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, SEED_EXT); final ChecksumIndexInput in = state.directory.openChecksumInput(seedFileName, state.context); - CodecUtil.checkIndexHeader(in, "MockRandomSeed", 0, 0, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.checkIndexHeader( + in, "MockRandomSeed", 0, 0, state.segmentInfo.getId(), state.segmentSuffix); final long seed = in.readLong(); CodecUtil.checkFooter(in); if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: reading from seg=" + state.segmentInfo.name + " formatID=" + state.segmentSuffix + " seed=" + seed); + System.out.println( + "MockRandomCodec: reading from seg=" + + state.segmentInfo.name + + " formatID=" + + state.segmentSuffix + + " seed=" + + seed); } in.close(); final Random random = new Random(seed); - + int readBufferSize = TestUtil.nextInt(random, 1, 4096); if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: readBufferSize=" + readBufferSize); @@ -328,7 +344,6 @@ public final class MockRandomPostingsFormat extends PostingsFormat { System.out.println("MockRandomCodec: variable-gap terms index"); } indexReader = new VariableGapTermsIndexReader(state); - } success = true; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/package-info.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/package-info.java index 8a755e46273..5dcfc89fbd4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/package-info.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Frankenstein codec for testing that pieces together random components. - */ +/** Frankenstein codec for testing that pieces together random components. */ package org.apache.lucene.codecs.mockrandom; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java index 0e46b3b1ea2..0dc92585836 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java @@ -27,7 +27,6 @@ import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; @@ -35,12 +34,12 @@ import org.apache.lucene.codecs.NormsProducer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.TermStats; import org.apache.lucene.index.BaseTermsEnum; -import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.Fields; import org.apache.lucene.index.ImpactsEnum; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SlowImpactsEnum; @@ -55,21 +54,21 @@ import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.RamUsageEstimator; -/** Stores all postings data in RAM, but writes a small - * token (header + single int) to identify which "slot" the - * index is using in RAM HashMap. +/** + * Stores all postings data in RAM, but writes a small token (header + single int) to identify which + * "slot" the index is using in RAM HashMap. * - * NOTE: this codec sorts terms by reverse-unicode-order! */ - + *

    NOTE: this codec sorts terms by reverse-unicode-order! + */ public final class RAMOnlyPostingsFormat extends PostingsFormat { public RAMOnlyPostingsFormat() { super("RAMOnly"); } - + // Postings state: static class RAMPostings extends FieldsProducer { - final Map fieldToTerms = new TreeMap<>(); + final Map fieldToTerms = new TreeMap<>(); @Override public Terms terms(String field) { @@ -87,18 +86,17 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { } @Override - public void close() { - } + public void close() {} @Override public long ramBytesUsed() { long sizeInBytes = 0; - for(RAMField field : fieldToTerms.values()) { + for (RAMField field : fieldToTerms.values()) { sizeInBytes += field.ramBytesUsed(); } return sizeInBytes; } - + @Override public Collection getChildResources() { return Accountables.namedAccountables("field", fieldToTerms); @@ -106,11 +104,11 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { @Override public void checkIntegrity() throws IOException {} - } + } static class RAMField extends Terms implements Accountable { final String field; - final SortedMap termToDocs = new TreeMap<>(); + final SortedMap termToDocs = new TreeMap<>(); long sumTotalTermFreq; long sumDocFreq; int docCount; @@ -124,7 +122,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { @Override public long ramBytesUsed() { long sizeInBytes = 0; - for(RAMTerm term : termToDocs.values()) { + for (RAMTerm term : termToDocs.values()) { sizeInBytes += term.ramBytesUsed(); } return sizeInBytes; @@ -144,7 +142,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { public long getSumDocFreq() throws IOException { return sumDocFreq; } - + @Override public int getDocCount() throws IOException { return docCount; @@ -162,14 +160,15 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { @Override public boolean hasOffsets() { - return info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + return info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) + >= 0; } @Override public boolean hasPositions() { return info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; } - + @Override public boolean hasPayloads() { return info.hasPayloads(); @@ -180,6 +179,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { final String term; long totalTermFreq; final List docs = new ArrayList<>(); + public RAMTerm(String term) { this.term = term; } @@ -187,7 +187,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { @Override public long ramBytesUsed() { long sizeInBytes = 0; - for(RAMDoc rDoc : docs) { + for (RAMDoc rDoc : docs) { sizeInBytes += rDoc.ramBytesUsed(); } return sizeInBytes; @@ -207,11 +207,11 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { @Override public long ramBytesUsed() { long sizeInBytes = 0; - sizeInBytes += (positions!=null) ? RamUsageEstimator.sizeOf(positions) : 0; - + sizeInBytes += (positions != null) ? RamUsageEstimator.sizeOf(positions) : 0; + if (payloads != null) { - for(byte[] payload: payloads) { - sizeInBytes += (payload!=null) ? RamUsageEstimator.sizeOf(payload) : 0; + for (byte[] payload : payloads) { + sizeInBytes += (payload != null) ? RamUsageEstimator.sizeOf(payload) : 0; } } return sizeInBytes; @@ -232,7 +232,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { @Override public void write(Fields fields, NormsProducer norms) throws IOException { - for(String field : fields) { + for (String field : fields) { Terms terms = fields.terms(field); if (terms == null) { @@ -242,7 +242,10 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { TermsEnum termsEnum = terms.iterator(); FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field); - if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) { + if (fieldInfo + .getIndexOptions() + .compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) + >= 0) { throw new UnsupportedOperationException("this codec cannot index offsets"); } @@ -258,8 +261,10 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { IndexOptions indexOptions = fieldInfo.getIndexOptions(); boolean writeFreqs = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) >= 0; - boolean writePositions = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; - boolean writeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + boolean writePositions = + indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + boolean writeOffsets = + indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; boolean writePayloads = fieldInfo.hasPayloads(); if (writeFreqs == false) { @@ -308,7 +313,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { postingsWriter.startDoc(docID, freq); if (writePositions) { - for (int i=0;i state = new HashMap<>(); + private final Map state = new HashMap<>(); private final AtomicInteger nextID = new AtomicInteger(); private final String RAM_ONLY_NAME = "RAMOnly"; - private final static int VERSION_START = 0; - private final static int VERSION_LATEST = VERSION_START; + private static final int VERSION_START = 0; + private static final int VERSION_LATEST = VERSION_START; private static final String ID_EXTENSION = "id"; @@ -569,7 +573,9 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { // TODO -- ok to do this up front instead of // on close....? should be ok? // Write our ID: - final String idFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, ID_EXTENSION); + final String idFileName = + IndexFileNames.segmentFileName( + writeState.segmentInfo.name, writeState.segmentSuffix, ID_EXTENSION); IndexOutput out = writeState.directory.createOutput(idFileName, writeState.context); boolean success = false; try { @@ -583,22 +589,23 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { IOUtils.close(out); } } - + final RAMPostings postings = new RAMPostings(); final RAMFieldsConsumer consumer = new RAMFieldsConsumer(writeState, postings); - synchronized(state) { + synchronized (state) { state.put(id, postings); } return consumer; } @Override - public FieldsProducer fieldsProducer(SegmentReadState readState) - throws IOException { + public FieldsProducer fieldsProducer(SegmentReadState readState) throws IOException { // Load our ID: - final String idFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, ID_EXTENSION); + final String idFileName = + IndexFileNames.segmentFileName( + readState.segmentInfo.name, readState.segmentSuffix, ID_EXTENSION); IndexInput in = readState.directory.openInput(idFileName, readState.context); boolean success = false; final int id; @@ -613,8 +620,8 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { IOUtils.close(in); } } - - synchronized(state) { + + synchronized (state) { return state.get(id); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/package-info.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/package-info.java index 8ea282fa3a6..828c04e008d 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/package-info.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/package-info.java @@ -15,7 +15,5 @@ * limitations under the License. */ -/** - * Codec for testing that never writes to disk. - */ +/** Codec for testing that never writes to disk. */ package org.apache.lucene.codecs.ramonly; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/Rot13CypherTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/Rot13CypherTestUtil.java index 8483d310de4..d57185cf894 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/Rot13CypherTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/Rot13CypherTestUtil.java @@ -18,14 +18,11 @@ package org.apache.lucene.codecs.uniformsplit; import java.io.IOException; - import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; import org.apache.lucene.util.BytesRef; -/** - * Test utility for simple ROT13 cipher (https://en.wikipedia.org/wiki/ROT13). - */ +/** Test utility for simple ROT13 cipher (https://en.wikipedia.org/wiki/ROT13). */ public class Rot13CypherTestUtil { private static final int ENCODING_OFFSET = 7; @@ -34,7 +31,7 @@ public class Rot13CypherTestUtil { public static byte[] encode(DataInput bytesInput, int length) throws IOException { byte[] encodedBytes = new byte[length + ENCODING_OFFSET]; for (int i = 0; i < length; i++) { - encodedBytes[i + ENCODING_OFFSET] = (byte)(bytesInput.readByte() + ENCODING_ROTATION); + encodedBytes[i + ENCODING_OFFSET] = (byte) (bytesInput.readByte() + ENCODING_ROTATION); } return encodedBytes; } @@ -44,7 +41,7 @@ public class Rot13CypherTestUtil { bytesInput.skipBytes(ENCODING_OFFSET); byte[] decodedBytes = new byte[Math.toIntExact(length)]; for (int i = 0; i < length; i++) { - decodedBytes[i] = (byte)(bytesInput.readByte() - ENCODING_ROTATION); + decodedBytes[i] = (byte) (bytesInput.readByte() - ENCODING_ROTATION); } return decodedBytes; } @@ -69,4 +66,4 @@ public class Rot13CypherTestUtil { public static BlockDecoder getBlockDecoder() { return (blockBytes, length) -> new BytesRef(Rot13CypherTestUtil.decode(blockBytes, length)); } -} \ No newline at end of file +} diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/UniformSplitRot13PostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/UniformSplitRot13PostingsFormat.java index 26d14adb290..54ab0fa91e6 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/UniformSplitRot13PostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/UniformSplitRot13PostingsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.uniformsplit; import java.io.IOException; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.PostingsFormat; @@ -33,9 +32,7 @@ import org.apache.lucene.store.DataOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -/** - * {@link UniformSplitPostingsFormat} with block encoding using ROT13 cypher. - */ +/** {@link UniformSplitPostingsFormat} with block encoding using ROT13 cypher. */ public class UniformSplitRot13PostingsFormat extends PostingsFormat { public static volatile boolean encoderCalled; @@ -77,20 +74,24 @@ public class UniformSplitRot13PostingsFormat extends PostingsFormat { } } - protected FieldsConsumer createFieldsConsumer(SegmentWriteState segmentWriteState, PostingsWriterBase postingsWriter) throws IOException { - return new UniformSplitTermsWriter(postingsWriter, segmentWriteState, + protected FieldsConsumer createFieldsConsumer( + SegmentWriteState segmentWriteState, PostingsWriterBase postingsWriter) throws IOException { + return new UniformSplitTermsWriter( + postingsWriter, + segmentWriteState, UniformSplitTermsWriter.DEFAULT_TARGET_NUM_BLOCK_LINES, UniformSplitTermsWriter.DEFAULT_DELTA_NUM_LINES, - getBlockEncoder() - ) { + getBlockEncoder()) { @Override protected void writeDictionary(IndexDictionary.Builder dictionaryBuilder) throws IOException { recordBlockEncodingCall(); super.writeDictionary(dictionaryBuilder); recordDictionaryEncodingCall(); } + @Override - protected void writeEncodedFieldsMetadata(ByteBuffersDataOutput fieldsOutput) throws IOException { + protected void writeEncodedFieldsMetadata(ByteBuffersDataOutput fieldsOutput) + throws IOException { super.writeEncodedFieldsMetadata(fieldsOutput); recordFieldsMetadataEncodingCall(); } @@ -151,8 +152,10 @@ public class UniformSplitRot13PostingsFormat extends PostingsFormat { } } - protected FieldsProducer createFieldsProducer(SegmentReadState segmentReadState, PostingsReaderBase postingsReader) throws IOException { - return new UniformSplitTermsReader(postingsReader, segmentReadState, getBlockDecoder(), dictionaryOnHeap); + protected FieldsProducer createFieldsProducer( + SegmentReadState segmentReadState, PostingsReaderBase postingsReader) throws IOException { + return new UniformSplitTermsReader( + postingsReader, segmentReadState, getBlockDecoder(), dictionaryOnHeap); } protected BlockDecoder getBlockDecoder() { diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/sharedterms/STUniformSplitRot13PostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/sharedterms/STUniformSplitRot13PostingsFormat.java index 04f3964e337..d9962165b79 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/sharedterms/STUniformSplitRot13PostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/uniformsplit/sharedterms/STUniformSplitRot13PostingsFormat.java @@ -18,7 +18,6 @@ package org.apache.lucene.codecs.uniformsplit.sharedterms; import java.io.IOException; - import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.PostingsReaderBase; @@ -30,29 +29,31 @@ import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.store.ByteBuffersDataOutput; -/** - * {@link STUniformSplitPostingsFormat} with block encoding using ROT13 cypher. - */ +/** {@link STUniformSplitPostingsFormat} with block encoding using ROT13 cypher. */ public class STUniformSplitRot13PostingsFormat extends UniformSplitRot13PostingsFormat { public STUniformSplitRot13PostingsFormat() { super("STUniformSplitRot13", false); } - protected FieldsConsumer createFieldsConsumer(SegmentWriteState segmentWriteState, PostingsWriterBase postingsWriter) throws IOException { - return new STUniformSplitTermsWriter(postingsWriter, segmentWriteState, + protected FieldsConsumer createFieldsConsumer( + SegmentWriteState segmentWriteState, PostingsWriterBase postingsWriter) throws IOException { + return new STUniformSplitTermsWriter( + postingsWriter, + segmentWriteState, UniformSplitTermsWriter.DEFAULT_TARGET_NUM_BLOCK_LINES, UniformSplitTermsWriter.DEFAULT_DELTA_NUM_LINES, - getBlockEncoder() - ) { + getBlockEncoder()) { @Override protected void writeDictionary(IndexDictionary.Builder dictionaryBuilder) throws IOException { recordBlockEncodingCall(); super.writeDictionary(dictionaryBuilder); recordDictionaryEncodingCall(); } + @Override - protected void writeEncodedFieldsMetadata(ByteBuffersDataOutput fieldsOutput) throws IOException { + protected void writeEncodedFieldsMetadata(ByteBuffersDataOutput fieldsOutput) + throws IOException { recordBlockEncodingCall(); super.writeEncodedFieldsMetadata(fieldsOutput); recordFieldsMetadataEncodingCall(); @@ -60,7 +61,9 @@ public class STUniformSplitRot13PostingsFormat extends UniformSplitRot13Postings }; } - protected FieldsProducer createFieldsProducer(SegmentReadState segmentReadState, PostingsReaderBase postingsReader) throws IOException { - return new STUniformSplitTermsReader(postingsReader, segmentReadState, getBlockDecoder(), dictionaryOnHeap); + protected FieldsProducer createFieldsProducer( + SegmentReadState segmentReadState, PostingsReaderBase postingsReader) throws IOException { + return new STUniformSplitTermsReader( + postingsReader, segmentReadState, getBlockDecoder(), dictionaryOnHeap); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/geo/BaseGeoPointTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/geo/BaseGeoPointTestCase.java index a4b47f0cc05..bd96ce875d2 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/geo/BaseGeoPointTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/geo/BaseGeoPointTestCase.java @@ -25,7 +25,6 @@ import java.util.HashSet; import java.util.Locale; import java.util.Set; import java.util.function.Consumer; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FilterCodec; @@ -70,30 +69,27 @@ import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.bkd.BKDWriter; /** - * Abstract class to do basic tests for a geospatial impl (high level - * fields and queries) - * NOTE: This test focuses on geospatial (distance queries, polygon - * queries, etc) indexing and search, not any underlying storage - * format or encoding: it merely supplies two hooks for the encoding - * so that tests can be exact. The [stretch] goal is for this test to be - * so thorough in testing a new geo impl that if this - * test passes, then all Lucene/Solr tests should also pass. Ie, - * if there is some bug in a given geo impl that this - * test fails to catch then this test needs to be improved! */ + * Abstract class to do basic tests for a geospatial impl (high level fields and queries) NOTE: This + * test focuses on geospatial (distance queries, polygon queries, etc) indexing and search, not any + * underlying storage format or encoding: it merely supplies two hooks for the encoding so that + * tests can be exact. The [stretch] goal is for this test to be so thorough in testing a new geo + * impl that if this test passes, then all Lucene/Solr tests should also pass. Ie, if there is some + * bug in a given geo impl that this test fails to catch then this test needs to be improved! + */ public abstract class BaseGeoPointTestCase extends LuceneTestCase { protected static final String FIELD_NAME = "point"; - + // TODO: remove these hooks once all subclasses can pass with new random! protected double nextLongitude() { return org.apache.lucene.geo.GeoTestUtil.nextLongitude(); } - + protected double nextLatitude() { return org.apache.lucene.geo.GeoTestUtil.nextLatitude(); } - + protected Rectangle nextBox() { return org.apache.lucene.geo.GeoTestUtil.nextBox(); } @@ -101,7 +97,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { protected Circle nextCircle() { return org.apache.lucene.geo.GeoTestUtil.nextCircle(); } - + protected Polygon nextPolygon() { return org.apache.lucene.geo.GeoTestUtil.nextPolygon(); } @@ -135,75 +131,105 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { addPointToDoc("foo", document, -90.0, 180.0); addPointToDoc("foo", document, -90.0, -180.0); } - + /** Invalid values */ public void testIndexOutOfRangeValues() { Document document = new Document(); IllegalArgumentException expected; - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, Math.nextUp(90.0), 50.0); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, Math.nextUp(90.0), 50.0); + }); assertTrue(expected.getMessage().contains("invalid latitude")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, Math.nextDown(-90.0), 50.0); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, Math.nextDown(-90.0), 50.0); + }); assertTrue(expected.getMessage().contains("invalid latitude")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, 90.0, Math.nextUp(180.0)); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, 90.0, Math.nextUp(180.0)); + }); assertTrue(expected.getMessage().contains("invalid longitude")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, 90.0, Math.nextDown(-180.0)); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, 90.0, Math.nextDown(-180.0)); + }); assertTrue(expected.getMessage().contains("invalid longitude")); } - + /** NaN: illegal */ public void testIndexNaNValues() { Document document = new Document(); IllegalArgumentException expected; - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, Double.NaN, 50.0); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, Double.NaN, 50.0); + }); assertTrue(expected.getMessage().contains("invalid latitude")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, 50.0, Double.NaN); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, 50.0, Double.NaN); + }); assertTrue(expected.getMessage().contains("invalid longitude")); } - + /** Inf: illegal */ public void testIndexInfValues() { Document document = new Document(); IllegalArgumentException expected; - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, Double.POSITIVE_INFINITY, 50.0); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, Double.POSITIVE_INFINITY, 50.0); + }); assertTrue(expected.getMessage().contains("invalid latitude")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, Double.NEGATIVE_INFINITY, 50.0); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, Double.NEGATIVE_INFINITY, 50.0); + }); assertTrue(expected.getMessage().contains("invalid latitude")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, 50.0, Double.POSITIVE_INFINITY); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, 50.0, Double.POSITIVE_INFINITY); + }); assertTrue(expected.getMessage().contains("invalid longitude")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, 50.0, Double.NEGATIVE_INFINITY); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, 50.0, Double.NEGATIVE_INFINITY); + }); assertTrue(expected.getMessage().contains("invalid longitude")); } - + /** Add a single point and search for it in a box */ // NOTE: we don't currently supply an exact search, only ranges, because of the lossiness... public void testBoxBasics() throws Exception { @@ -214,7 +240,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); @@ -227,17 +253,22 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { /** null field name not allowed */ public void testBoxNull() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newRectQuery(null, 18, 19, -66, -65); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newRectQuery(null, 18, 19, -66, -65); + }); assertTrue(expected.getMessage().contains("field must not be null")); } // box should not accept invalid lat/lon public void testBoxInvalidCoordinates() throws Exception { - expectThrows(Exception.class, () -> { - newRectQuery("field", -92.0, -91.0, 179.0, 181.0); - }); + expectThrows( + Exception.class, + () -> { + newRectQuery("field", -92.0, -91.0, 179.0, 181.0); + }); } /** test we can search for a point */ @@ -249,7 +280,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); - + // search within 50km and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); @@ -259,57 +290,74 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { writer.close(); dir.close(); } - + /** null field name not allowed */ public void testDistanceNull() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery(null, 18, -65, 50_000); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery(null, 18, -65, 50_000); + }); assertTrue(expected.getMessage().contains("field must not be null")); } - + /** distance query should not accept invalid lat/lon as origin */ public void testDistanceIllegal() throws Exception { - expectThrows(Exception.class, () -> { - newDistanceQuery("field", 92.0, 181.0, 120000); - }); + expectThrows( + Exception.class, + () -> { + newDistanceQuery("field", 92.0, 181.0, 120000); + }); } /** negative distance queries are not allowed */ public void testDistanceNegative() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery("field", 18, 19, -1); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery("field", 18, 19, -1); + }); assertTrue(expected.getMessage().contains("radiusMeters")); assertTrue(expected.getMessage().contains("invalid")); } - + /** NaN distance queries are not allowed */ public void testDistanceNaN() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery("field", 18, 19, Double.NaN); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery("field", 18, 19, Double.NaN); + }); assertTrue(expected.getMessage().contains("radiusMeters")); assertTrue(expected.getMessage().contains("invalid")); } - + /** Inf distance queries are not allowed */ public void testDistanceInf() { IllegalArgumentException expected; - - expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery("field", 18, 19, Double.POSITIVE_INFINITY); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery("field", 18, 19, Double.POSITIVE_INFINITY); + }); assertTrue(expected.getMessage().contains("radiusMeters")); assertTrue(expected.getMessage().contains("invalid")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery("field", 18, 19, Double.NEGATIVE_INFINITY); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery("field", 18, 19, Double.NEGATIVE_INFINITY); + }); assertTrue(expected.getMessage(), expected.getMessage().contains("radiusMeters")); assertTrue(expected.getMessage().contains("invalid")); } - + /** test we can search for a polygon */ public void testPolygonBasics() throws Exception { Directory dir = newDirectory(); @@ -319,19 +367,23 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - assertEquals(1, searcher.count(newPolygonQuery("field", new Polygon( - new double[] { 18, 18, 19, 19, 18 }, - new double[] { -66, -65, -65, -66, -66 })))); + assertEquals( + 1, + searcher.count( + newPolygonQuery( + "field", + new Polygon( + new double[] {18, 18, 19, 19, 18}, new double[] {-66, -65, -65, -66, -66})))); reader.close(); writer.close(); dir.close(); } - + /** test we can search for a polygon with a hole (but still includes the doc) */ public void testPolygonHole() throws Exception { Directory dir = newDirectory(); @@ -341,21 +393,24 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - Polygon inner = new Polygon(new double[] { 18.5, 18.5, 18.7, 18.7, 18.5 }, - new double[] { -65.7, -65.4, -65.4, -65.7, -65.7 }); - Polygon outer = new Polygon(new double[] { 18, 18, 19, 19, 18 }, - new double[] { -66, -65, -65, -66, -66 }, inner); + Polygon inner = + new Polygon( + new double[] {18.5, 18.5, 18.7, 18.7, 18.5}, + new double[] {-65.7, -65.4, -65.4, -65.7, -65.7}); + Polygon outer = + new Polygon( + new double[] {18, 18, 19, 19, 18}, new double[] {-66, -65, -65, -66, -66}, inner); assertEquals(1, searcher.count(newPolygonQuery("field", outer))); reader.close(); writer.close(); dir.close(); } - + /** test we can search for a polygon with a hole (that excludes the doc) */ public void testPolygonHoleExcludes() throws Exception { Directory dir = newDirectory(); @@ -365,21 +420,24 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - Polygon inner = new Polygon(new double[] { 18.2, 18.2, 18.4, 18.4, 18.2 }, - new double[] { -65.3, -65.2, -65.2, -65.3, -65.3 }); - Polygon outer = new Polygon(new double[] { 18, 18, 19, 19, 18 }, - new double[] { -66, -65, -65, -66, -66 }, inner); + Polygon inner = + new Polygon( + new double[] {18.2, 18.2, 18.4, 18.4, 18.2}, + new double[] {-65.3, -65.2, -65.2, -65.3, -65.3}); + Polygon outer = + new Polygon( + new double[] {18, 18, 19, 19, 18}, new double[] {-66, -65, -65, -66, -66}, inner); assertEquals(0, searcher.count(newPolygonQuery("field", outer))); reader.close(); writer.close(); dir.close(); } - + /** test we can search for a multi-polygon */ public void testMultiPolygonBasics() throws Exception { Directory dir = newDirectory(); @@ -389,28 +447,32 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - Polygon a = new Polygon(new double[] { 28, 28, 29, 29, 28 }, - new double[] { -56, -55, -55, -56, -56 }); - Polygon b = new Polygon(new double[] { 18, 18, 19, 19, 18 }, - new double[] { -66, -65, -65, -66, -66 }); + Polygon a = + new Polygon(new double[] {28, 28, 29, 29, 28}, new double[] {-56, -55, -55, -56, -56}); + Polygon b = + new Polygon(new double[] {18, 18, 19, 19, 18}, new double[] {-66, -65, -65, -66, -66}); assertEquals(1, searcher.count(newPolygonQuery("field", a, b))); reader.close(); writer.close(); dir.close(); } - + /** null field name not allowed */ public void testPolygonNullField() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newPolygonQuery(null, new Polygon( - new double[] { 18, 18, 19, 19, 18 }, - new double[] { -66, -65, -65, -66, -66 })); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newPolygonQuery( + null, + new Polygon( + new double[] {18, 18, 19, 19, 18}, new double[] {-66, -65, -65, -66, -66})); + }); assertTrue(expected.getMessage().contains("field must not be null")); } @@ -436,9 +498,9 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { int numPoints = atLeast(1000); int cardinality = TestUtil.nextInt(random(), 2, 20); - double[] diffLons = new double[cardinality]; + double[] diffLons = new double[cardinality]; double[] diffLats = new double[cardinality]; - for (int i = 0; i< cardinality; i++) { + for (int i = 0; i < cardinality; i++) { diffLats[i] = nextLatitude(); diffLons[i] = nextLongitude(); } @@ -462,7 +524,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { boolean haveRealDoc = false; - for(int docID=0;docID rect.maxLat) { return false; } @@ -782,8 +898,8 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); // Else we can get O(N^2) merging: int mbd = iwc.getMaxBufferedDocs(); - if (mbd != -1 && mbd < lats.length/100) { - iwc.setMaxBufferedDocs(lats.length/100); + if (mbd != -1 && mbd < lats.length / 100) { + iwc.setMaxBufferedDocs(lats.length / 100); } Directory dir; if (lats.length > 100000) { @@ -796,7 +912,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { // RandomIndexWriter is too slow here: IndexWriter w = new IndexWriter(dir, iwc); indexPoints(lats, lons, deleted, w); - + final IndexReader r = DirectoryReader.open(w); w.close(); @@ -807,12 +923,12 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0;iter b.append(" rect=").append(rect)); + buildError( + docID, + expected, + id, + lats, + lons, + query, + liveDocs, + (b) -> b.append(" rect=").append(rect)); fail = true; } } @@ -857,8 +981,8 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); // Else we can get O(N^2) merging: int mbd = iwc.getMaxBufferedDocs(); - if (mbd != -1 && mbd < lats.length/100) { - iwc.setMaxBufferedDocs(lats.length/100); + if (mbd != -1 && mbd < lats.length / 100) { + iwc.setMaxBufferedDocs(lats.length / 100); } Directory dir; if (lats.length > 100000) { @@ -871,7 +995,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { // RandomIndexWriter is too slow here: IndexWriter w = new IndexWriter(dir, iwc); indexPoints(lats, lons, deleted, w); - + final IndexReader r = DirectoryReader.open(w); w.close(); @@ -882,7 +1006,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0;iter explain = (b) -> { - if (Double.isNaN(lats[id]) == false) { - double distanceMeters = SloppyMath.haversinMeters(centerLat, centerLon, lats[id], lons[id]); - b.append(" centerLat=").append(centerLat).append(" centerLon=").append(centerLon).append(" distanceMeters=").append(distanceMeters).append(" vs radiusMeters=").append(radiusMeters); - } - }; + Consumer explain = + (b) -> { + if (Double.isNaN(lats[id]) == false) { + double distanceMeters = + SloppyMath.haversinMeters(centerLat, centerLon, lats[id], lons[id]); + b.append(" centerLat=") + .append(centerLat) + .append(" centerLon=") + .append(centerLon) + .append(" distanceMeters=") + .append(distanceMeters) + .append(" vs radiusMeters=") + .append(radiusMeters); + } + }; buildError(docID, expected, id, lats, lons, query, liveDocs, explain); fail = true; } @@ -948,8 +1084,8 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); // Else we can get O(N^2) merging: int mbd = iwc.getMaxBufferedDocs(); - if (mbd != -1 && mbd < lats.length/100) { - iwc.setMaxBufferedDocs(lats.length/100); + if (mbd != -1 && mbd < lats.length / 100) { + iwc.setMaxBufferedDocs(lats.length / 100); } Directory dir; if (lats.length > 100000) { @@ -962,7 +1098,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { // RandomIndexWriter is too slow here: IndexWriter w = new IndexWriter(dir, iwc); indexPoints(lats, lons, deleted, w); - + final IndexReader r = DirectoryReader.open(w); w.close(); @@ -974,7 +1110,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0;iter b.append(" polygon=").append(polygon)); + buildError( + docID, + expected, + id, + lats, + lons, + query, + liveDocs, + (b) -> b.append(" polygon=").append(polygon)); fail = true; } } @@ -1017,15 +1161,15 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { IOUtils.close(r, dir); } - + protected void verifyRandomGeometries(double[] lats, double[] lons) throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); // Else seeds may not reproduce: iwc.setMergeScheduler(new SerialMergeScheduler()); // Else we can get O(N^2) merging: int mbd = iwc.getMaxBufferedDocs(); - if (mbd != -1 && mbd < lats.length/100) { - iwc.setMaxBufferedDocs(lats.length/100); + if (mbd != -1 && mbd < lats.length / 100) { + iwc.setMaxBufferedDocs(lats.length / 100); } Directory dir; if (lats.length > 100000) { @@ -1035,11 +1179,11 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { } Set deleted = new HashSet<>(); - + // RandomIndexWriter is too slow here: IndexWriter w = new IndexWriter(dir, iwc); indexPoints(lats, lons, deleted, w); - + final IndexReader r = DirectoryReader.open(w); w.close(); @@ -1051,7 +1195,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0;iter b.append(" geometry=").append(Arrays.toString(geometries))); + buildError( + docID, + expected, + id, + lats, + lons, + query, + liveDocs, + (b) -> b.append(" geometry=").append(Arrays.toString(geometries))); fail = true; } } @@ -1097,10 +1249,11 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { IOUtils.close(r, dir); } - private void indexPoints(double[] lats, double[] lons, Set deleted, IndexWriter w) throws IOException { - for(int id=0;id deleted, IndexWriter w) + throws IOException { + for (int id = 0; id < lats.length; id++) { Document doc = new Document(); - doc.add(newStringField("id", ""+id, Field.Store.NO)); + doc.add(newStringField("id", "" + id, Field.Store.NO)); doc.add(new NumericDocValuesField("id", id)); if (Double.isNaN(lats[id]) == false) { addPointToDoc(FIELD_NAME, doc, lats[id], lons[id]); @@ -1108,7 +1261,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { w.addDocument(doc); if (id > 0 && random().nextInt(100) == 42) { int idToDelete = random().nextInt(id); - w.deleteDocuments(new Term("id", ""+idToDelete)); + w.deleteDocuments(new Term("id", "" + idToDelete)); deleted.add(idToDelete); if (VERBOSE) { System.out.println(" delete id=" + idToDelete); @@ -1123,30 +1276,39 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { private FixedBitSet searchIndex(IndexSearcher s, Query query, int maxDoc) throws IOException { final FixedBitSet hits = new FixedBitSet(maxDoc); - s.search(query, new SimpleCollector() { + s.search( + query, + new SimpleCollector() { - private int docBase; + private int docBase; - @Override - public ScoreMode scoreMode() { - return ScoreMode.COMPLETE_NO_SCORES; - } + @Override + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; + } - @Override - protected void doSetNextReader(LeafReaderContext context) { - docBase = context.docBase; - } + @Override + protected void doSetNextReader(LeafReaderContext context) { + docBase = context.docBase; + } - @Override - public void collect(int doc) { - hits.set(docBase+doc); - } - }); + @Override + public void collect(int doc) { + hits.set(docBase + doc); + } + }); return hits; } - private void buildError(int docID, boolean expected, int id, double[] lats, double[] lons, Query query, - Bits liveDocs, Consumer explain) { + private void buildError( + int docID, + boolean expected, + int id, + double[] lats, + double[] lons, + Query query, + Bits liveDocs, + Consumer explain) { StringBuilder b = new StringBuilder(); if (expected) { b.append("FAIL: id=").append(id).append(" should match but did not\n"); @@ -1173,23 +1335,29 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { break; } } - // this test works in quantized space: for testing inclusiveness of exact edges it must be aware of index-time quantization! - rect = new Rectangle(quantizeLat(rect.minLat), quantizeLat(rect.maxLat), quantizeLon(rect.minLon), quantizeLon(rect.maxLon)); + // this test works in quantized space: for testing inclusiveness of exact edges it must be aware + // of index-time quantization! + rect = + new Rectangle( + quantizeLat(rect.minLat), + quantizeLat(rect.maxLat), + quantizeLon(rect.minLon), + quantizeLon(rect.maxLon)); Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); // Else seeds may not reproduce: iwc.setMergeScheduler(new SerialMergeScheduler()); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - for(int x=0;x<3;x++) { + for (int x = 0; x < 3; x++) { double lat; if (x == 0) { lat = rect.minLat; } else if (x == 1) { - lat = quantizeLat((rect.minLat+rect.maxLat)/2.0); + lat = quantizeLat((rect.minLat + rect.maxLat) / 2.0); } else { lat = rect.maxLat; } - for(int y=0;y<3;y++) { + for (int y = 0; y < 3; y++) { double lon; if (y == 0) { lon = rect.minLon; @@ -1197,7 +1365,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { if (x == 1) { continue; } - lon = quantizeLon((rect.minLon+rect.maxLon)/2.0); + lon = quantizeLon((rect.minLon + rect.maxLon) / 2.0); } else { lon = rect.maxLon; } @@ -1210,38 +1378,65 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { IndexReader r = w.getReader(); IndexSearcher s = newSearcher(r, false); // exact edge cases - assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minLat, rect.maxLat, rect.minLon, rect.maxLon))); - + assertEquals( + 8, s.count(newRectQuery(FIELD_NAME, rect.minLat, rect.maxLat, rect.minLon, rect.maxLon))); + // expand 1 ulp in each direction if possible and test a slightly larger box! if (rect.minLat != -90) { - assertEquals(8, s.count(newRectQuery(FIELD_NAME, Math.nextDown(rect.minLat), rect.maxLat, rect.minLon, rect.maxLon))); + assertEquals( + 8, + s.count( + newRectQuery( + FIELD_NAME, Math.nextDown(rect.minLat), rect.maxLat, rect.minLon, rect.maxLon))); } if (rect.maxLat != 90) { - assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minLat, Math.nextUp(rect.maxLat), rect.minLon, rect.maxLon))); + assertEquals( + 8, + s.count( + newRectQuery( + FIELD_NAME, rect.minLat, Math.nextUp(rect.maxLat), rect.minLon, rect.maxLon))); } if (rect.minLon != -180) { - assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minLat, rect.maxLat, Math.nextDown(rect.minLon), rect.maxLon))); + assertEquals( + 8, + s.count( + newRectQuery( + FIELD_NAME, rect.minLat, rect.maxLat, Math.nextDown(rect.minLon), rect.maxLon))); } if (rect.maxLon != 180) { - assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minLat, rect.maxLat, rect.minLon, Math.nextUp(rect.maxLon)))); + assertEquals( + 8, + s.count( + newRectQuery( + FIELD_NAME, rect.minLat, rect.maxLat, rect.minLon, Math.nextUp(rect.maxLon)))); } - + // now shrink 1 ulp in each direction if possible: it should not include bogus stuff // we can't shrink if values are already at extremes, and // we can't do this if rectangle is actually a line or we will create a cross-dateline query - if (rect.minLat != 90 && rect.maxLat != -90 && rect.minLon != 80 && rect.maxLon != -180 && rect.minLon != rect.maxLon) { - // note we put points on "sides" not just "corners" so we just shrink all 4 at once for now: it should exclude all points! - assertEquals(0, s.count(newRectQuery(FIELD_NAME, Math.nextUp(rect.minLat), - Math.nextDown(rect.maxLat), - Math.nextUp(rect.minLon), - Math.nextDown(rect.maxLon)))); + if (rect.minLat != 90 + && rect.maxLat != -90 + && rect.minLon != 80 + && rect.maxLon != -180 + && rect.minLon != rect.maxLon) { + // note we put points on "sides" not just "corners" so we just shrink all 4 at once for now: + // it should exclude all points! + assertEquals( + 0, + s.count( + newRectQuery( + FIELD_NAME, + Math.nextUp(rect.minLat), + Math.nextDown(rect.maxLat), + Math.nextUp(rect.minLon), + Math.nextDown(rect.maxLon)))); } r.close(); w.close(); dir.close(); } - + /** Run a few iterations with just 10 docs, hopefully easy to debug */ public void testRandomDistance() throws Exception { int numIters = atLeast(1); @@ -1249,7 +1444,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { doRandomDistanceTest(10, 100); } } - + /** Runs with thousands of docs */ @Nightly public void testRandomDistanceHuge() throws Exception { @@ -1257,7 +1452,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { doRandomDistanceTest(2000, 100); } } - + private void doRandomDistanceTest(int numDocs, int numQueries) throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); @@ -1265,28 +1460,31 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); int pointsInLeaf = 2 + random().nextInt(4); final Codec in = TestUtil.getDefaultCodec(); - iwc.setCodec(new FilterCodec(in.getName(), in) { - @Override - public PointsFormat pointsFormat() { - return new PointsFormat() { + iwc.setCodec( + new FilterCodec(in.getName(), in) { @Override - public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { - return new Lucene86PointsWriter(writeState, pointsInLeaf, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP); + public PointsFormat pointsFormat() { + return new PointsFormat() { + @Override + public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { + return new Lucene86PointsWriter( + writeState, pointsInLeaf, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP); + } + + @Override + public PointsReader fieldsReader(SegmentReadState readState) throws IOException { + return new Lucene86PointsReader(readState); + } + }; } - - @Override - public PointsReader fieldsReader(SegmentReadState readState) throws IOException { - return new Lucene86PointsReader(readState); - } - }; - } - }); + }); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); - + for (int i = 0; i < numDocs; i++) { double latRaw = nextLatitude(); double lonRaw = nextLongitude(); - // pre-normalize up front, so we can just use quantized value for testing and do simple exact comparisons + // pre-normalize up front, so we can just use quantized value for testing and do simple exact + // comparisons double lat = quantizeLat(latRaw); double lon = quantizeLon(lonRaw); Document doc = new Document(); @@ -1297,12 +1495,12 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { } IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - + for (int i = 0; i < numQueries; i++) { double lat = nextLatitude(); double lon = nextLongitude(); double radius = 50000000D * random().nextDouble(); - + BitSet expected = new BitSet(); for (int doc = 0; doc < reader.maxDoc(); doc++) { double docLatitude = reader.document(doc).getField("lat").numericValue().doubleValue(); @@ -1312,13 +1510,15 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { expected.set(doc); } } - - TopDocs topDocs = searcher.search(newDistanceQuery("field", lat, lon, radius), reader.maxDoc(), Sort.INDEXORDER); + + TopDocs topDocs = + searcher.search( + newDistanceQuery("field", lat, lon, radius), reader.maxDoc(), Sort.INDEXORDER); BitSet actual = new BitSet(); for (ScoreDoc doc : topDocs.scoreDocs) { actual.set(doc.doc); } - + try { assertEquals(expected, actual); } catch (AssertionError e) { @@ -1327,7 +1527,8 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { double docLatitude = reader.document(doc).getField("lat").numericValue().doubleValue(); double docLongitude = reader.document(doc).getField("lon").numericValue().doubleValue(); double distance = SloppyMath.haversinMeters(lat, lon, docLatitude, docLongitude); - System.out.println("" + doc + ": (" + docLatitude + "," + docLongitude + "), distance=" + distance); + System.out.println( + "" + doc + ": (" + docLatitude + "," + docLongitude + "), distance=" + distance); } throw e; } @@ -1348,7 +1549,8 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { // for "impossible" ranges LatLonPoint.newBoxQuery will return MatchNoDocsQuery // changing the field is unrelated to that. if (q1 instanceof MatchNoDocsQuery == false) { - assertFalse(q1.equals(newRectQuery("field2", rect.minLat, rect.maxLat, rect.minLon, rect.maxLon))); + assertFalse( + q1.equals(newRectQuery("field2", rect.minLat, rect.maxLat, rect.minLon, rect.maxLon))); } double lat = nextLatitude(); @@ -1375,30 +1577,31 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { assertEquals(q1, q2); assertFalse(q1.equals(newPolygonQuery("field2", new Polygon(lats, lons)))); } - + /** return topdocs over a small set of points in field "point" */ private TopDocs searchSmallSet(Query query, int size) throws Exception { // this is a simple systematic test, indexing these points // TODO: fragile: does not understand quantization in any way yet uses extremely high precision! - double[][] pts = new double[][] { - { 32.763420, -96.774 }, - { 32.7559529921407, -96.7759895324707 }, - { 32.77866942010977, -96.77701950073242 }, - { 32.7756745755423, -96.7706036567688 }, - { 27.703618681345585, -139.73458170890808 }, - { 32.94823588839368, -96.4538113027811 }, - { 33.06047141970814, -96.65084838867188 }, - { 32.778650, -96.7772 }, - { -88.56029371730983, -177.23537676036358 }, - { 33.541429799076354, -26.779373834241003 }, - { 26.774024500421728, -77.35379276106497 }, - { -90.0, -14.796283808944777 }, - { 32.94823588839368, -178.8538113027811 }, - { 32.94823588839368, 178.8538113027811 }, - { 40.720611, -73.998776 }, - { -44.5, -179.5 } - }; - + double[][] pts = + new double[][] { + {32.763420, -96.774}, + {32.7559529921407, -96.7759895324707}, + {32.77866942010977, -96.77701950073242}, + {32.7756745755423, -96.7706036567688}, + {27.703618681345585, -139.73458170890808}, + {32.94823588839368, -96.4538113027811}, + {33.06047141970814, -96.65084838867188}, + {32.778650, -96.7772}, + {-88.56029371730983, -177.23537676036358}, + {33.541429799076354, -26.779373834241003}, + {26.774024500421728, -77.35379276106497}, + {-90.0, -14.796283808944777}, + {32.94823588839368, -178.8538113027811}, + {32.94823588839368, 178.8538113027811}, + {40.720611, -73.998776}, + {-44.5, -179.5} + }; + Directory directory = newDirectory(); // TODO: must these simple tests really rely on docid order? @@ -1410,21 +1613,21 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase { RandomIndexWriter writer = new RandomIndexWriter(random(), directory, iwc); for (double p[] : pts) { - Document doc = new Document(); - addPointToDoc("point", doc, p[0], p[1]); - writer.addDocument(doc); + Document doc = new Document(); + addPointToDoc("point", doc, p[0], p[1]); + writer.addDocument(doc); } // add explicit multi-valued docs - for (int i=0; i { - addPointToDoc("foo", document, Float.NaN, 50.0f); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, Float.NaN, 50.0f); + }); assertTrue(expected.getMessage().contains("invalid value")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, 50.0f, Float.NaN); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, 50.0f, Float.NaN); + }); assertTrue(expected.getMessage().contains("invalid value")); } - + /** Inf: illegal */ public void testIndexInfValues() { Document document = new Document(); IllegalArgumentException expected; - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, Float.POSITIVE_INFINITY, 0.0f); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, Float.POSITIVE_INFINITY, 0.0f); + }); assertTrue(expected.getMessage().contains("invalid value")); - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, Float.NEGATIVE_INFINITY, 0.0f); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, Float.NEGATIVE_INFINITY, 0.0f); + }); assertTrue(expected.getMessage().contains("invalid value")); - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, 0.0f, Float.POSITIVE_INFINITY); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, 0.0f, Float.POSITIVE_INFINITY); + }); assertTrue(expected.getMessage().contains("invalid value")); - expected = expectThrows(IllegalArgumentException.class, () -> { - addPointToDoc("foo", document, 0.0f, Float.NEGATIVE_INFINITY); - }); + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + addPointToDoc("foo", document, 0.0f, Float.NEGATIVE_INFINITY); + }); assertTrue(expected.getMessage().contains("invalid value")); } - + /** Add a single point and search for it in a box */ public void testBoxBasics() throws Exception { Directory dir = newDirectory(); @@ -171,7 +186,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694f, -65.227444f); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); @@ -184,17 +199,22 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { /** null field name not allowed */ public void testBoxNull() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newRectQuery(null, 18, 19, -66, -65); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newRectQuery(null, 18, 19, -66, -65); + }); assertTrue(expected.getMessage().contains("field must not be null")); } // box should not accept invalid x/y public void testBoxInvalidCoordinates() throws Exception { - expectThrows(Exception.class, () -> { - newRectQuery("field", Float.NaN, Float.NaN,Float.NaN, Float.NaN); - }); + expectThrows( + Exception.class, + () -> { + newRectQuery("field", Float.NaN, Float.NaN, Float.NaN, Float.NaN); + }); } /** test we can search for a point */ @@ -206,7 +226,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694f, -65.227444f); writer.addDocument(document); - + // search within 50km and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); @@ -216,56 +236,73 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { writer.close(); dir.close(); } - + /** null field name not allowed */ public void testDistanceNull() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery(null, 18, -65, 50_000); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery(null, 18, -65, 50_000); + }); assertTrue(expected.getMessage().contains("field must not be null")); } - + /** distance query should not accept invalid x/y as origin */ public void testDistanceIllegal() throws Exception { - expectThrows(Exception.class, () -> { - newDistanceQuery("field", Float.NaN, Float.NaN, 120000); - }); + expectThrows( + Exception.class, + () -> { + newDistanceQuery("field", Float.NaN, Float.NaN, 120000); + }); } /** negative distance queries are not allowed */ public void testDistanceNegative() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery("field", 18, 19, -1); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery("field", 18, 19, -1); + }); assertTrue(expected.getMessage().contains("radius")); } - + /** NaN distance queries are not allowed */ public void testDistanceNaN() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery("field", 18, 19, Float.NaN); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery("field", 18, 19, Float.NaN); + }); assertTrue(expected.getMessage().contains("radius")); assertTrue(expected.getMessage().contains("NaN")); } - + /** Inf distance queries are not allowed */ public void testDistanceInf() { IllegalArgumentException expected; - - expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery("field", 18, 19, Float.POSITIVE_INFINITY); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery("field", 18, 19, Float.POSITIVE_INFINITY); + }); assertTrue(expected.getMessage(), expected.getMessage().contains("radius")); assertTrue(expected.getMessage(), expected.getMessage().contains("finite")); - - expected = expectThrows(IllegalArgumentException.class, () -> { - newDistanceQuery("field", 18, 19, Float.NEGATIVE_INFINITY); - }); + + expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newDistanceQuery("field", 18, 19, Float.NEGATIVE_INFINITY); + }); assertTrue(expected.getMessage(), expected.getMessage().contains("radius")); assertTrue(expected.getMessage(), expected.getMessage().contains("bigger than 0")); } - + /** test we can search for a polygon */ public void testPolygonBasics() throws Exception { Directory dir = newDirectory(); @@ -275,19 +312,23 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694f, -65.227444f); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - assertEquals(1, searcher.count(newPolygonQuery("field", new XYPolygon( - new float[] { 18, 18, 19, 19, 18 }, - new float[] { -66, -65, -65, -66, -66 })))); + assertEquals( + 1, + searcher.count( + newPolygonQuery( + "field", + new XYPolygon( + new float[] {18, 18, 19, 19, 18}, new float[] {-66, -65, -65, -66, -66})))); reader.close(); writer.close(); dir.close(); } - + /** test we can search for a polygon with a hole (but still includes the doc) */ public void testPolygonHole() throws Exception { Directory dir = newDirectory(); @@ -297,21 +338,24 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694f, -65.227444f); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - XYPolygon inner = new XYPolygon(new float[] { 18.5f, 18.5f, 18.7f, 18.7f, 18.5f }, - new float[] { -65.7f, -65.4f, -65.4f, -65.7f, -65.7f }); - XYPolygon outer = new XYPolygon(new float[] { 18, 18, 19, 19, 18 }, - new float[] { -66, -65, -65, -66, -66 }, inner); + XYPolygon inner = + new XYPolygon( + new float[] {18.5f, 18.5f, 18.7f, 18.7f, 18.5f}, + new float[] {-65.7f, -65.4f, -65.4f, -65.7f, -65.7f}); + XYPolygon outer = + new XYPolygon( + new float[] {18, 18, 19, 19, 18}, new float[] {-66, -65, -65, -66, -66}, inner); assertEquals(1, searcher.count(newPolygonQuery("field", outer))); reader.close(); writer.close(); dir.close(); } - + /** test we can search for a polygon with a hole (that excludes the doc) */ public void testPolygonHoleExcludes() throws Exception { Directory dir = newDirectory(); @@ -321,21 +365,24 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694f, -65.227444f); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - XYPolygon inner = new XYPolygon(new float[] { 18.2f, 18.2f, 18.4f, 18.4f, 18.2f }, - new float[] { -65.3f, -65.2f, -65.2f, -65.3f, -65.3f }); - XYPolygon outer = new XYPolygon(new float[] { 18, 18, 19, 19, 18 }, - new float[] { -66, -65, -65, -66, -66 }, inner); + XYPolygon inner = + new XYPolygon( + new float[] {18.2f, 18.2f, 18.4f, 18.4f, 18.2f}, + new float[] {-65.3f, -65.2f, -65.2f, -65.3f, -65.3f}); + XYPolygon outer = + new XYPolygon( + new float[] {18, 18, 19, 19, 18}, new float[] {-66, -65, -65, -66, -66}, inner); assertEquals(0, searcher.count(newPolygonQuery("field", outer))); reader.close(); writer.close(); dir.close(); } - + /** test we can search for a multi-polygon */ public void testMultiPolygonBasics() throws Exception { Directory dir = newDirectory(); @@ -345,28 +392,32 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Document document = new Document(); addPointToDoc("field", document, 18.313694f, -65.227444f); writer.addDocument(document); - + // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); - XYPolygon a = new XYPolygon(new float[] { 28, 28, 29, 29, 28 }, - new float[] { -56, -55, -55, -56, -56 }); - XYPolygon b = new XYPolygon(new float[] { 18, 18, 19, 19, 18 }, - new float[] { -66, -65, -65, -66, -66 }); + XYPolygon a = + new XYPolygon(new float[] {28, 28, 29, 29, 28}, new float[] {-56, -55, -55, -56, -56}); + XYPolygon b = + new XYPolygon(new float[] {18, 18, 19, 19, 18}, new float[] {-66, -65, -65, -66, -66}); assertEquals(1, searcher.count(newPolygonQuery("field", a, b))); reader.close(); writer.close(); dir.close(); } - + /** null field name not allowed */ public void testPolygonNullField() { - IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { - newPolygonQuery(null, new XYPolygon( - new float[] { 18, 18, 19, 19, 18 }, - new float[] { -66, -65, -65, -66, -66 })); - }); + IllegalArgumentException expected = + expectThrows( + IllegalArgumentException.class, + () -> { + newPolygonQuery( + null, + new XYPolygon( + new float[] {18, 18, 19, 19, 18}, new float[] {-66, -65, -65, -66, -66})); + }); assertTrue(expected.getMessage().contains("field must not be null")); } @@ -392,9 +443,9 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { int numPoints = atLeast(1000); int cardinality = TestUtil.nextInt(random(), 2, 20); - float[] diffXs = new float[cardinality]; + float[] diffXs = new float[cardinality]; float[] diffYs = new float[cardinality]; - for (int i = 0; i< cardinality; i++) { + for (int i = 0; i < cardinality; i++) { diffXs[i] = nextX(); diffYs[i] = nextY(); } @@ -417,7 +468,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { boolean haveRealDoc = false; - for(int docID=0;docID 100000) { @@ -735,12 +818,12 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0;iter b.append(" rect=").append(rect)); + buildError( + docID, + expected, + id, + xs, + ys, + query, + liveDocs, + (b) -> b.append(" rect=").append(rect)); fail = true; } } @@ -785,8 +876,8 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); // Else we can get O(N^2) merging: int mbd = iwc.getMaxBufferedDocs(); - if (mbd != -1 && mbd < xs.length/100) { - iwc.setMaxBufferedDocs(xs.length/100); + if (mbd != -1 && mbd < xs.length / 100) { + iwc.setMaxBufferedDocs(xs.length / 100); } Directory dir; if (xs.length > 100000) { @@ -809,7 +900,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0;iter explain = (b) -> { - if (Double.isNaN(xs[id]) == false) { - double distance = cartesianDistance(centerX, centerY, xs[id], ys[id]); - b.append(" centerX=").append(centerX).append(" centerY=").append(centerY).append(" distance=") - .append(distance).append(" vs radius=").append(radius); - } - }; + Consumer explain = + (b) -> { + if (Double.isNaN(xs[id]) == false) { + double distance = cartesianDistance(centerX, centerY, xs[id], ys[id]); + b.append(" centerX=") + .append(centerX) + .append(" centerY=") + .append(centerY) + .append(" distance=") + .append(distance) + .append(" vs radius=") + .append(radius); + } + }; buildError(docID, expected, id, xs, ys, query, liveDocs, explain); fail = true; } @@ -876,8 +975,8 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); // Else we can get O(N^2) merging: int mbd = iwc.getMaxBufferedDocs(); - if (mbd != -1 && mbd < xs.length/100) { - iwc.setMaxBufferedDocs(xs.length/100); + if (mbd != -1 && mbd < xs.length / 100) { + iwc.setMaxBufferedDocs(xs.length / 100); } Directory dir; if (xs.length > 100000) { @@ -901,7 +1000,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0;iter b.append(" polygon=").append(polygon)); + buildError( + docID, + expected, + id, + xs, + ys, + query, + liveDocs, + (b) -> b.append(" polygon=").append(polygon)); fail = true; } } @@ -951,8 +1058,8 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); // Else we can get O(N^2) merging: int mbd = iwc.getMaxBufferedDocs(); - if (mbd != -1 && mbd < xs.length/100) { - iwc.setMaxBufferedDocs(xs.length/100); + if (mbd != -1 && mbd < xs.length / 100) { + iwc.setMaxBufferedDocs(xs.length / 100); } Directory dir; if (xs.length > 100000) { @@ -976,7 +1083,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0;iter b.append(" geometry=").append(Arrays.toString(geometries))); + buildError( + docID, + expected, + id, + xs, + ys, + query, + liveDocs, + (b) -> b.append(" geometry=").append(Arrays.toString(geometries))); fail = true; } } @@ -1021,10 +1136,11 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { IOUtils.close(r, dir); } - private void indexPoints(float[] xs, float[] ys, Set deleted, IndexWriter w) throws IOException { - for(int id=0;id deleted, IndexWriter w) + throws IOException { + for (int id = 0; id < xs.length; id++) { Document doc = new Document(); - doc.add(newStringField("id", ""+id, Field.Store.NO)); + doc.add(newStringField("id", "" + id, Field.Store.NO)); doc.add(new NumericDocValuesField("id", id)); if (Float.isNaN(xs[id]) == false && Float.isNaN(ys[id]) == false) { addPointToDoc(FIELD_NAME, doc, xs[id], ys[id]); @@ -1032,7 +1148,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { w.addDocument(doc); if (id > 0 && random().nextInt(100) == 42) { int idToDelete = random().nextInt(id); - w.deleteDocuments(new Term("id", ""+idToDelete)); + w.deleteDocuments(new Term("id", "" + idToDelete)); deleted.add(idToDelete); if (VERBOSE) { System.out.println(" delete id=" + idToDelete); @@ -1047,30 +1163,39 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { private FixedBitSet searchIndex(IndexSearcher s, Query query, int maxDoc) throws IOException { final FixedBitSet hits = new FixedBitSet(maxDoc); - s.search(query, new SimpleCollector() { + s.search( + query, + new SimpleCollector() { - private int docBase; + private int docBase; - @Override - public ScoreMode scoreMode() { - return ScoreMode.COMPLETE_NO_SCORES; - } + @Override + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; + } - @Override - protected void doSetNextReader(LeafReaderContext context) { - docBase = context.docBase; - } + @Override + protected void doSetNextReader(LeafReaderContext context) { + docBase = context.docBase; + } - @Override - public void collect(int doc) { - hits.set(docBase+doc); - } - }); + @Override + public void collect(int doc) { + hits.set(docBase + doc); + } + }); return hits; } - private void buildError(int docID, boolean expected, int id, float[] xs, float[] ys, Query query, - Bits liveDocs, Consumer explain) { + private void buildError( + int docID, + boolean expected, + int id, + float[] xs, + float[] ys, + Query query, + Bits liveDocs, + Consumer explain) { StringBuilder b = new StringBuilder(); if (expected) { b.append("FAIL: id=").append(id).append(" should match but did not\n"); @@ -1095,7 +1220,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { // Else seeds may not reproduce: iwc.setMergeScheduler(new SerialMergeScheduler()); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - for(int i = 0; i < 3; i++) { + for (int i = 0; i < 3; i++) { float y; if (i == 0) { y = rect.minY; @@ -1104,7 +1229,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { } else { y = rect.maxY; } - for(int j = 0; j < 3; j++) { + for (int j = 0; j < 3; j++) { float x; if (j == 0) { x = rect.minX; @@ -1128,16 +1253,28 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minX, rect.maxX, rect.minY, rect.maxY))); // expand 1 ulp in each direction if possible and test a slightly larger box! if (rect.minX != -Float.MAX_VALUE) { - assertEquals(8, s.count(newRectQuery(FIELD_NAME, Math.nextDown(rect.minX), rect.maxX, rect.minY, rect.maxY))); + assertEquals( + 8, + s.count( + newRectQuery(FIELD_NAME, Math.nextDown(rect.minX), rect.maxX, rect.minY, rect.maxY))); } if (rect.maxX != Float.MAX_VALUE) { - assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minX, Math.nextUp(rect.maxX), rect.minY, rect.maxY))); + assertEquals( + 8, + s.count( + newRectQuery(FIELD_NAME, rect.minX, Math.nextUp(rect.maxX), rect.minY, rect.maxY))); } if (rect.minY != -Float.MAX_VALUE) { - assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minX, rect.maxX, Math.nextDown(rect.minY), rect.maxY))); + assertEquals( + 8, + s.count( + newRectQuery(FIELD_NAME, rect.minX, rect.maxX, Math.nextDown(rect.minY), rect.maxY))); } if (rect.maxY != Float.MAX_VALUE) { - assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minX, rect.maxX, rect.minY, Math.nextUp(rect.maxY)))); + assertEquals( + 8, + s.count( + newRectQuery(FIELD_NAME, rect.minX, rect.maxX, rect.minY, Math.nextUp(rect.maxY)))); } r.close(); @@ -1168,28 +1305,31 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { iwc.setMergeScheduler(new SerialMergeScheduler()); int pointsInLeaf = 2 + random().nextInt(4); Codec in = TestUtil.getDefaultCodec(); - iwc.setCodec(new FilterCodec(in.getName(), in) { - @Override - public PointsFormat pointsFormat() { - return new PointsFormat() { + iwc.setCodec( + new FilterCodec(in.getName(), in) { @Override - public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { - return new Lucene86PointsWriter(writeState, pointsInLeaf, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP); - } + public PointsFormat pointsFormat() { + return new PointsFormat() { + @Override + public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { + return new Lucene86PointsWriter( + writeState, pointsInLeaf, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP); + } - @Override - public PointsReader fieldsReader(SegmentReadState readState) throws IOException { - return new Lucene86PointsReader(readState); + @Override + public PointsReader fieldsReader(SegmentReadState readState) throws IOException { + return new Lucene86PointsReader(readState); + } + }; } - }; - } - }); + }); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); for (int i = 0; i < numDocs; i++) { float x = nextX(); float y = nextY(); - // pre-normalize up front, so we can just use quantized value for testing and do simple exact comparisons + // pre-normalize up front, so we can just use quantized value for testing and do simple exact + // comparisons Document doc = new Document(); addPointToDoc("field", doc, x, y); @@ -1215,7 +1355,9 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { } } - TopDocs topDocs = searcher.search(newDistanceQuery("field", x, y, radius), reader.maxDoc(), Sort.INDEXORDER); + TopDocs topDocs = + searcher.search( + newDistanceQuery("field", x, y, radius), reader.maxDoc(), Sort.INDEXORDER); BitSet actual = new BitSet(); for (ScoreDoc doc : topDocs.scoreDocs) { actual.set(doc.doc); @@ -1239,7 +1381,7 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { dir.close(); } - public void testEquals() { + public void testEquals() { Query q1, q2; XYRectangle rect = nextBox(); @@ -1248,7 +1390,6 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { q2 = newRectQuery("field", rect.minX, rect.maxX, rect.minY, rect.maxY); assertEquals(q1, q2); - float x = nextX(); float y = nextY(); q1 = newDistanceQuery("field", x, y, 10000.0f); @@ -1277,24 +1418,25 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { /** return topdocs over a small set of points in field "point" */ private TopDocs searchSmallSet(Query query, int size) throws Exception { // this is a simple systematic test, indexing these points - double[][] pts = new double[][] { - { 32.763420, -96.774 }, - { 32.7559529921407, -96.7759895324707 }, - { 32.77866942010977, -96.77701950073242 }, - { 32.7756745755423, -96.7706036567688 }, - { 27.703618681345585, -139.73458170890808 }, - { 32.94823588839368, -96.4538113027811 }, - { 33.06047141970814, -96.65084838867188 }, - { 32.778650, -96.7772 }, - { -88.56029371730983, -177.23537676036358 }, - { 33.541429799076354, -26.779373834241003 }, - { 26.774024500421728, -77.35379276106497 }, - { -90.0, -14.796283808944777 }, - { 32.94823588839368, -178.8538113027811 }, - { 32.94823588839368, 178.8538113027811 }, - { 40.720611, -73.998776 }, - { -44.5, -179.5 } - }; + double[][] pts = + new double[][] { + {32.763420, -96.774}, + {32.7559529921407, -96.7759895324707}, + {32.77866942010977, -96.77701950073242}, + {32.7756745755423, -96.7706036567688}, + {27.703618681345585, -139.73458170890808}, + {32.94823588839368, -96.4538113027811}, + {33.06047141970814, -96.65084838867188}, + {32.778650, -96.7772}, + {-88.56029371730983, -177.23537676036358}, + {33.541429799076354, -26.779373834241003}, + {26.774024500421728, -77.35379276106497}, + {-90.0, -14.796283808944777}, + {32.94823588839368, -178.8538113027811}, + {32.94823588839368, 178.8538113027811}, + {40.720611, -73.998776}, + {-44.5, -179.5} + }; Directory directory = newDirectory(); @@ -1307,21 +1449,21 @@ public abstract class BaseXYPointTestCase extends LuceneTestCase { RandomIndexWriter writer = new RandomIndexWriter(random(), directory, iwc); for (double p[] : pts) { - Document doc = new Document(); - addPointToDoc("point", doc, (float) p[0], (float) p[1]); - writer.addDocument(doc); + Document doc = new Document(); + addPointToDoc("point", doc, (float) p[0], (float) p[1]); + writer.addDocument(doc); } // add explicit multi-valued docs - for (int i=0; iJust instantiate this class, add the things you want plotted, and call {@link #finish} to get + * the resulting HTML that you should save and load with a browser. + */ public class EarthDebugger { final StringBuilder b = new StringBuilder(); private int nextShape; @@ -46,7 +47,13 @@ public class EarthDebugger { b.append(" \n"); b.append(" \n"); b.append(" \n"); b.append(" WebGL Earth API: Hello World\n"); b.append(" \n"); @@ -201,7 +235,8 @@ public class EarthDebugger { return b.toString(); } - private static void inverseHaversin(StringBuilder b, double centerLat, double centerLon, double radiusMeters) { + private static void inverseHaversin( + StringBuilder b, double centerLat, double centerLon, double radiusMeters) { double angle = 0; int steps = 100; @@ -213,7 +248,7 @@ public class EarthDebugger { double step = 1.0; int last = 0; double lastDistanceMeters = 0.0; - //System.out.println("angle " + angle + " slope=" + slope); + // System.out.println("angle " + angle + " slope=" + slope); while (true) { double lat = wrapLat(centerLat + y * factor); double lon = wrapLon(centerLon + x * factor); @@ -221,72 +256,73 @@ public class EarthDebugger { if (last == 1 && distanceMeters < lastDistanceMeters) { // For large enough circles, some angles are not possible: - //System.out.println(" done: give up on angle " + angle); - angle += 360./steps; + // System.out.println(" done: give up on angle " + angle); + angle += 360. / steps; continue newAngle; } if (last == -1 && distanceMeters > lastDistanceMeters) { // For large enough circles, some angles are not possible: - //System.out.println(" done: give up on angle " + angle); - angle += 360./steps; + // System.out.println(" done: give up on angle " + angle); + angle += 360. / steps; continue newAngle; } lastDistanceMeters = distanceMeters; - //System.out.println(" iter lat=" + lat + " lon=" + lon + " distance=" + distanceMeters + " vs " + radiusMeters); + // System.out.println(" iter lat=" + lat + " lon=" + lon + " distance=" + distanceMeters + + // " vs " + radiusMeters); if (Math.abs(distanceMeters - radiusMeters) < 0.1) { b.append(" [").append(lat).append(", ").append(lon).append("],\n"); break; } if (distanceMeters > radiusMeters) { // too big - //System.out.println(" smaller"); + // System.out.println(" smaller"); factor -= step; if (last == 1) { - //System.out.println(" half-step"); + // System.out.println(" half-step"); step /= 2.0; } last = -1; } else if (distanceMeters < radiusMeters) { // too small - //System.out.println(" bigger"); + // System.out.println(" bigger"); factor += step; if (last == -1) { - //System.out.println(" half-step"); + // System.out.println(" half-step"); step /= 2.0; } last = 1; } } - angle += 360./steps; + angle += 360. / steps; } } // craziness for plotting stuff :) private static double wrapLat(double lat) { - //System.out.println("wrapLat " + lat); + // System.out.println("wrapLat " + lat); if (lat > 90) { - //System.out.println(" " + (180 - lat)); + // System.out.println(" " + (180 - lat)); return 180 - lat; } else if (lat < -90) { - //System.out.println(" " + (-180 - lat)); + // System.out.println(" " + (-180 - lat)); return -180 - lat; } else { - //System.out.println(" " + lat); + // System.out.println(" " + lat); return lat; } } private static double wrapLon(double lon) { - //System.out.println("wrapLon " + lon); + // System.out.println("wrapLon " + lon); if (lon > 180) { - //System.out.println(" " + (lon - 360)); + // System.out.println(" " + (lon - 360)); return lon - 360; } else if (lon < -180) { - //System.out.println(" " + (lon + 360)); + // System.out.println(" " + (lon + 360)); return lon + 360; } else { - //System.out.println(" " + lon); + // System.out.println(" " + lon); return lon; } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/geo/GeoTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/geo/GeoTestUtil.java index f052ccc40cc..2676a45e3f0 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/geo/GeoTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/geo/GeoTestUtil.java @@ -16,6 +16,7 @@ */ package org.apache.lucene.geo; +import com.carrotsearch.randomizedtesting.RandomizedContext; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; @@ -27,13 +28,10 @@ import java.util.Arrays; import java.util.List; import java.util.Random; import java.util.zip.GZIPInputStream; - import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.SloppyMath; import org.apache.lucene.util.TestUtil; -import com.carrotsearch.randomizedtesting.RandomizedContext; - /** static methods for testing geo */ public class GeoTestUtil { @@ -46,14 +44,16 @@ public class GeoTestUtil { public static double nextLongitude() { return nextDoubleInternal(-180, 180); } - + /** * Returns next double within range. - *

    - * Don't pass huge numbers or infinity or anything like that yet. may have bugs! + * + *

    Don't pass huge numbers or infinity or anything like that yet. may have bugs! */ - // the goal is to adjust random number generation to test edges, create more duplicates, create "one-offs" in floating point space, etc. - // we do this by first picking a good "base value" (explicitly targeting edges, zero if allowed, or "discrete values"). but it also + // the goal is to adjust random number generation to test edges, create more duplicates, create + // "one-offs" in floating point space, etc. + // we do this by first picking a good "base value" (explicitly targeting edges, zero if allowed, + // or "discrete values"). but it also // ensures we pick any double in the range and generally still produces randomish looking numbers. // then we sometimes perturb that by one ulp. private static double nextDoubleInternal(double low, double high) { @@ -62,7 +62,7 @@ public class GeoTestUtil { assert Double.isFinite(low); assert Double.isFinite(high); assert high >= low : "low=" + low + " high=" + high; - + // if they are equal, not much we can do if (low == high) { return low; @@ -184,7 +184,7 @@ public class GeoTestUtil { return nextDoubleInternal(lower, upper); } } - + /** Returns the next point around a line (more or less) */ private static double[] nextPointAroundLine(double lat1, double lon1, double lat2, double lon2) { double x1 = lon1; @@ -196,12 +196,16 @@ public class GeoTestUtil { double minY = Math.min(y1, y2); double maxY = Math.max(y1, y2); if (minX == maxX) { - return new double[] { nextLatitudeBetween(minY, maxY), nextLongitudeNear(minX, 0.01 * (maxY - minY)) }; + return new double[] { + nextLatitudeBetween(minY, maxY), nextLongitudeNear(minX, 0.01 * (maxY - minY)) + }; } else if (minY == maxY) { - return new double[] { nextLatitudeNear(minY, 0.01 * (maxX - minX)), nextLongitudeBetween(minX, maxX) }; + return new double[] { + nextLatitudeNear(minY, 0.01 * (maxX - minX)), nextLongitudeBetween(minX, maxX) + }; } else { double x = nextLongitudeBetween(minX, maxX); - double y = (y1 - y2) / (x1 - x2) * (x-x1) + y1; + double y = (y1 - y2) / (x1 - x2) * (x - x1) + y1; if (Double.isFinite(y) == false) { // this can happen due to underflow when delta between x values is wonderfully tiny! y = Math.copySign(90, x1); @@ -210,18 +214,20 @@ public class GeoTestUtil { // our formula may put the targeted Y out of bounds y = Math.min(90, y); y = Math.max(-90, y); - return new double[] { nextLatitudeNear(y, delta), x }; + return new double[] {nextLatitudeNear(y, delta), x}; } } - + /** Returns next point (lat/lon) for testing near a Box. It may cross the dateline */ public static double[] nextPointNear(Rectangle rectangle) { if (rectangle.crossesDateline()) { // pick a "side" of the two boxes we really are if (random().nextBoolean()) { - return nextPointNear(new Rectangle(rectangle.minLat, rectangle.maxLat, -180, rectangle.maxLon)); + return nextPointNear( + new Rectangle(rectangle.minLat, rectangle.maxLat, -180, rectangle.maxLon)); } else { - return nextPointNear(new Rectangle(rectangle.minLat, rectangle.maxLat, rectangle.minLon, 180)); + return nextPointNear( + new Rectangle(rectangle.minLat, rectangle.maxLat, rectangle.minLon, 180)); } } else { return nextPointNear(boxPolygon(rectangle)); @@ -229,7 +235,8 @@ public class GeoTestUtil { } /** Returns next point (lat/lon) for testing near a Polygon */ - // see http://www-ma2.upc.es/geoc/Schirra-pointPolygon.pdf for more info on some of these strategies + // see http://www-ma2.upc.es/geoc/Schirra-pointPolygon.pdf for more info on some of these + // strategies public static double[] nextPointNear(Polygon polygon) { double polyLats[] = polygon.getPolyLats(); double polyLons[] = polygon.getPolyLons(); @@ -243,44 +250,53 @@ public class GeoTestUtil { int surpriseMe = random().nextInt(97); if (surpriseMe == 0) { // purely random - return new double[] { nextLatitude(), nextLongitude() }; + return new double[] {nextLatitude(), nextLongitude()}; } else if (surpriseMe < 5) { // purely random within bounding box - return new double[] { nextLatitudeBetween(polygon.minLat, polygon.maxLat), nextLongitudeBetween(polygon.minLon, polygon.maxLon) }; + return new double[] { + nextLatitudeBetween(polygon.minLat, polygon.maxLat), + nextLongitudeBetween(polygon.minLon, polygon.maxLon) + }; } else if (surpriseMe < 20) { // target a vertex int vertex = random().nextInt(polyLats.length - 1); - return new double[] { nextLatitudeNear(polyLats[vertex], polyLats[vertex+1] - polyLats[vertex]), - nextLongitudeNear(polyLons[vertex], polyLons[vertex+1] - polyLons[vertex]) }; + return new double[] { + nextLatitudeNear(polyLats[vertex], polyLats[vertex + 1] - polyLats[vertex]), + nextLongitudeNear(polyLons[vertex], polyLons[vertex + 1] - polyLons[vertex]) + }; } else if (surpriseMe < 30) { // target points around the bounding box edges - Polygon container = boxPolygon(new Rectangle(polygon.minLat, polygon.maxLat, polygon.minLon, polygon.maxLon)); + Polygon container = + boxPolygon(new Rectangle(polygon.minLat, polygon.maxLat, polygon.minLon, polygon.maxLon)); double containerLats[] = container.getPolyLats(); double containerLons[] = container.getPolyLons(); int startVertex = random().nextInt(containerLats.length - 1); - return nextPointAroundLine(containerLats[startVertex], containerLons[startVertex], - containerLats[startVertex+1], containerLons[startVertex+1]); + return nextPointAroundLine( + containerLats[startVertex], containerLons[startVertex], + containerLats[startVertex + 1], containerLons[startVertex + 1]); } else { // target points around diagonals between vertices int startVertex = random().nextInt(polyLats.length - 1); // but favor edges heavily - int endVertex = random().nextBoolean() ? startVertex + 1 : random().nextInt(polyLats.length - 1); - return nextPointAroundLine(polyLats[startVertex], polyLons[startVertex], - polyLats[endVertex], polyLons[endVertex]); + int endVertex = + random().nextBoolean() ? startVertex + 1 : random().nextInt(polyLats.length - 1); + return nextPointAroundLine( + polyLats[startVertex], polyLons[startVertex], + polyLats[endVertex], polyLons[endVertex]); } } - + /** Returns next box for testing near a Polygon */ public static Rectangle nextBoxNear(Polygon polygon) { final double point1[]; final double point2[]; - + // if there are any holes, target them aggressively Polygon holes[] = polygon.getHoles(); if (holes.length > 0 && random().nextInt(3) == 0) { return nextBoxNear(holes[random().nextInt(holes.length)]); } - + int surpriseMe = random().nextInt(97); if (surpriseMe == 0) { // formed from two interesting points @@ -294,13 +310,13 @@ public class GeoTestUtil { double polyLats[] = polygon.getPolyLats(); double polyLons[] = polygon.getPolyLons(); int vertex = random().nextInt(polyLats.length - 1); - double deltaX = polyLons[vertex+1] - polyLons[vertex]; - double deltaY = polyLats[vertex+1] - polyLats[vertex]; + double deltaX = polyLons[vertex + 1] - polyLons[vertex]; + double deltaY = polyLats[vertex + 1] - polyLats[vertex]; double edgeLength = Math.sqrt(deltaX * deltaX + deltaY * deltaY); point2[0] = nextLatitudeNear(point1[0], edgeLength); point2[1] = nextLongitudeNear(point1[1], edgeLength); } - + // form a box from the two points double minLat = Math.min(point1[0], point2[0]); double maxLat = Math.max(point1[0], point2[0]); @@ -316,32 +332,37 @@ public class GeoTestUtil { /** returns next pseudorandom box: does not cross the 180th meridian */ public static Rectangle nextBoxNotCrossingDateline() { - return nextBoxInternal( false); + return nextBoxInternal(false); } - /** Makes an n-gon, centered at the provided lat/lon, and each vertex approximately - * distanceMeters away from the center. + /** + * Makes an n-gon, centered at the provided lat/lon, and each vertex approximately distanceMeters + * away from the center. * - * Do not invoke me across the dateline or a pole!! */ - public static Polygon createRegularPolygon(double centerLat, double centerLon, double radiusMeters, int gons) { + *

    Do not invoke me across the dateline or a pole!! + */ + public static Polygon createRegularPolygon( + double centerLat, double centerLon, double radiusMeters, int gons) { - // System.out.println("MAKE POLY: centerLat=" + centerLat + " centerLon=" + centerLon + " radiusMeters=" + radiusMeters + " gons=" + gons); + // System.out.println("MAKE POLY: centerLat=" + centerLat + " centerLon=" + centerLon + " + // radiusMeters=" + radiusMeters + " gons=" + gons); double[][] result = new double[2][]; - result[0] = new double[gons+1]; - result[1] = new double[gons+1]; - //System.out.println("make gon=" + gons); - for(int i=0;i radiusMeters) { // too big - //System.out.println(" smaller"); + // System.out.println(" smaller"); factor -= step; if (last == 1) { - //System.out.println(" half-step"); + // System.out.println(" half-step"); step /= 2.0; } last = -1; } else if (distanceMeters < radiusMeters) { // too small - //System.out.println(" bigger"); + // System.out.println(" bigger"); factor += step; if (last == -1) { - //System.out.println(" half-step"); + // System.out.println(" half-step"); step /= 2.0; } last = 1; @@ -385,8 +407,8 @@ public class GeoTestUtil { result[0][gons] = result[0][0]; result[1][gons] = result[1][0]; - //System.out.println(" polyLats=" + Arrays.toString(result[0])); - //System.out.println(" polyLons=" + Arrays.toString(result[1])); + // System.out.println(" polyLats=" + Arrays.toString(result[0])); + // System.out.println(" polyLons=" + Arrays.toString(result[1])); return new Polygon(result[0], result[1]); } @@ -407,11 +429,12 @@ public class GeoTestUtil { } return new Line(lats, lons); } - + public static Circle nextCircle() { double lat = nextLatitude(); double lon = nextLongitude(); - double radiusMeters = random().nextDouble() * GeoUtils.EARTH_MEAN_RADIUS_METERS * Math.PI / 2.0 + 1.0; + double radiusMeters = + random().nextDouble() * GeoUtils.EARTH_MEAN_RADIUS_METERS * Math.PI / 2.0 + 1.0; return new Circle(lat, lon, radiusMeters); } @@ -424,7 +447,8 @@ public class GeoTestUtil { while (true) { int gons = TestUtil.nextInt(random(), 4, 500); // So the poly can cover at most 50% of the earth's surface: - double radiusMeters = random().nextDouble() * GeoUtils.EARTH_MEAN_RADIUS_METERS * Math.PI / 2.0 + 1.0; + double radiusMeters = + random().nextDouble() * GeoUtils.EARTH_MEAN_RADIUS_METERS * Math.PI / 2.0 + 1.0; try { return createRegularPolygon(nextLatitude(), nextLongitude(), radiusMeters, gons); } catch (IllegalArgumentException iae) { @@ -508,7 +532,7 @@ public class GeoTestUtil { // repeat until we get a poly that doesn't cross dateline: newPoly: while (true) { - //System.out.println("\nPOLY ITER"); + // System.out.println("\nPOLY ITER"); double centerLat = nextLatitude(); double centerLon = nextLongitude(); double radius = 0.1 + 20 * random().nextDouble(); @@ -518,24 +542,24 @@ public class GeoTestUtil { ArrayList lons = new ArrayList<>(); double angle = 0.0; while (true) { - angle += random().nextDouble()*40.0; - //System.out.println(" angle " + angle); + angle += random().nextDouble() * 40.0; + // System.out.println(" angle " + angle); if (angle > 360) { break; } double len = radius * (1.0 - radiusDelta + radiusDelta * random().nextDouble()); - //System.out.println(" len=" + len); + // System.out.println(" len=" + len); double lat = centerLat + len * Math.cos(Math.toRadians(angle)); double lon = centerLon + len * Math.sin(Math.toRadians(angle)); - if (lon <= GeoUtils.MIN_LON_INCL || lon >= GeoUtils.MAX_LON_INCL || - lat > 90 || lat < -90) { + if (lon <= GeoUtils.MIN_LON_INCL || lon >= GeoUtils.MAX_LON_INCL || lat > 90 || lat < -90) { // cannot cross dateline or pole: try again! continue newPoly; } lats.add(lat); lons.add(lon); - //System.out.println(" lat=" + lats.get(lats.size()-1) + " lon=" + lons.get(lons.size()-1)); + // System.out.println(" lat=" + lats.get(lats.size()-1) + " lon=" + + // lons.get(lons.size()-1)); } // close it @@ -544,7 +568,7 @@ public class GeoTestUtil { double[] latsArray = new double[lats.size()]; double[] lonsArray = new double[lons.size()]; - for(int i=0;i - * You can pass any number of objects: - * Polygon: polygon with optional holes - * Polygon[]: arrays of polygons for convenience - * Rectangle: for a box - * double[2]: as latitude,longitude for a point - *

    - * At least one object must be a polygon. The viewBox is formed around all polygons - * found in the arguments. + /** + * Returns svg of polygon for debugging. + * + *

    You can pass any number of objects: Polygon: polygon with optional holes Polygon[]: arrays + * of polygons for convenience Rectangle: for a box double[2]: as latitude,longitude for a point + * + *

    At least one object must be a polygon. The viewBox is formed around all polygons found in + * the arguments. */ - public static String toSVG(Object ...objects) { + public static String toSVG(Object... objects) { List flattened = new ArrayList<>(); for (Object o : objects) { if (o instanceof Polygon[]) { @@ -586,18 +607,20 @@ public class GeoTestUtil { for (Object o : flattened) { final Rectangle r; if (o instanceof Polygon) { - r = Rectangle.fromPolygon(new Polygon[] { (Polygon) o }); + r = Rectangle.fromPolygon(new Polygon[] {(Polygon) o}); minLat = Math.min(minLat, r.minLat); maxLat = Math.max(maxLat, r.maxLat); minLon = Math.min(minLon, r.minLon); maxLon = Math.max(maxLon, r.maxLon); } } - if (Double.isFinite(minLat) == false || Double.isFinite(maxLat) == false || - Double.isFinite(minLon) == false || Double.isFinite(maxLon) == false) { + if (Double.isFinite(minLat) == false + || Double.isFinite(maxLat) == false + || Double.isFinite(minLon) == false + || Double.isFinite(maxLon) == false) { throw new IllegalArgumentException("you must pass at least one polygon"); } - + // add some additional padding so we can really see what happens on the edges too double xpadding = (maxLon - minLon) / 64; double ypadding = (maxLat - minLat) / 64; @@ -607,12 +630,12 @@ public class GeoTestUtil { StringBuilder sb = new StringBuilder(); sb.append("\n"); // encode each object @@ -637,10 +660,13 @@ public class GeoTestUtil { opacity = "0.3"; } else if (o instanceof double[]) { double point[] = (double[]) o; - gon = boxPolygon(new Rectangle(Math.max(-90, point[0]-pointY), - Math.min(90, point[0]+pointY), - Math.max(-180, point[1]-pointX), - Math.min(180, point[1]+pointX))); + gon = + boxPolygon( + new Rectangle( + Math.max(-90, point[0] - pointY), + Math.min(90, point[0] + pointY), + Math.max(-180, point[1] - pointX), + Math.min(180, point[1] + pointX))); style = "fill:red;stroke:red;stroke-width:0.1%;"; opacity = "0.7"; } else { @@ -656,9 +682,7 @@ public class GeoTestUtil { if (i > 0) { sb.append(" "); } - sb.append(polyLons[i]) - .append(",") - .append(90 - polyLats[i]); + sb.append(polyLons[i]).append(",").append(90 - polyLats[i]); } sb.append("\" style=\"").append(style).append("\"/>\n"); for (Polygon hole : gon.getHoles()) { @@ -669,9 +693,7 @@ public class GeoTestUtil { if (i > 0) { sb.append(" "); } - sb.append(holeLons[i]) - .append(",") - .append(90 - holeLats[i]); + sb.append(holeLons[i]).append(",").append(90 - holeLats[i]); } sb.append("\" style=\"fill:lightgray\"/>\n"); } @@ -680,34 +702,41 @@ public class GeoTestUtil { return sb.toString(); } - /** - * Simple slow point in polygon check (for testing) - */ + /** Simple slow point in polygon check (for testing) */ // direct port of PNPOLY C code (https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html) // this allows us to improve the code yet still ensure we have its properties - // it is under the BSD license (https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#License%20to%20Use) + // it is under the BSD license + // (https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#License%20to%20Use) // // Copyright (c) 1970-2003, Wm. Randolph Franklin // - // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated - // documentation files (the "Software"), to deal in the Software without restriction, including without limitation - // the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and - // to permit persons to whom the Software is furnished to do so, subject to the following conditions: + // Permission is hereby granted, free of charge, to any person obtaining a copy of this software + // and associated + // documentation files (the "Software"), to deal in the Software without restriction, including + // without limitation + // the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and + // to permit persons to whom the Software is furnished to do so, subject to the following + // conditions: // - // 1. Redistributions of source code must retain the above copyright + // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimers. - // 2. Redistributions in binary form must reproduce the above copyright - // notice in the documentation and/or other materials provided with + // 2. Redistributions in binary form must reproduce the above copyright + // notice in the documentation and/or other materials provided with // the distribution. - // 3. The name of W. Randolph Franklin may not be used to endorse or - // promote products derived from this Software without specific - // prior written permission. + // 3. The name of W. Randolph Franklin may not be used to endorse or + // promote products derived from this Software without specific + // prior written permission. // - // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED - // TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - // CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - // IN THE SOFTWARE. + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + // BUT NOT LIMITED + // TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL + // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF + // CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE + // OR OTHER DEALINGS + // IN THE SOFTWARE. public static boolean containsSlowly(Polygon polygon, double latitude, double longitude) { if (polygon.getHoles().length > 0) { throw new UnsupportedOperationException("this testing method does not support holes"); @@ -715,10 +744,13 @@ public class GeoTestUtil { double polyLats[] = polygon.getPolyLats(); double polyLons[] = polygon.getPolyLons(); // bounding box check required due to rounding errors (we don't solve that problem) - if (latitude < polygon.minLat || latitude > polygon.maxLat || longitude < polygon.minLon || longitude > polygon.maxLon) { + if (latitude < polygon.minLat + || latitude > polygon.maxLat + || longitude < polygon.minLon + || longitude > polygon.maxLon) { return false; } - + boolean c = false; int i, j; int nvert = polyLats.length; @@ -727,15 +759,17 @@ public class GeoTestUtil { double testy = latitude; double testx = longitude; for (i = 0, j = 1; j < nvert; ++i, ++j) { - if (testy == verty[j] && testy == verty[i] || - ((testy <= verty[j] && testy >= verty[i]) != (testy >= verty[j] && testy <= verty[i]))) { - if ((testx == vertx[j] && testx == vertx[i]) || - ((testx <= vertx[j] && testx >= vertx[i]) != (testx >= vertx[j] && testx <= vertx[i]) && - GeoUtils.orient(vertx[i], verty[i], vertx[j], verty[j], testx, testy) == 0)) { + if (testy == verty[j] && testy == verty[i] + || ((testy <= verty[j] && testy >= verty[i]) + != (testy >= verty[j] && testy <= verty[i]))) { + if ((testx == vertx[j] && testx == vertx[i]) + || ((testx <= vertx[j] && testx >= vertx[i]) != (testx >= vertx[j] && testx <= vertx[i]) + && GeoUtils.orient(vertx[i], verty[i], vertx[j], verty[j], testx, testy) == 0)) { // return true if point is on boundary return true; - } else if ( ((verty[i] > testy) != (verty[j] > testy)) && - (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) / (verty[j]-verty[i]) + vertx[i]) ) { + } else if (((verty[i] > testy) != (verty[j] > testy)) + && (testx + < (vertx[j] - vertx[i]) * (testy - verty[i]) / (verty[j] - verty[i]) + vertx[i])) { c = !c; } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/geo/ShapeTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/geo/ShapeTestUtil.java index 8f447e5c092..e79bb7270e5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/geo/ShapeTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/geo/ShapeTestUtil.java @@ -16,11 +16,10 @@ */ package org.apache.lucene.geo; -import java.util.ArrayList; -import java.util.Random; - import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.generators.BiasedNumbers; +import java.util.ArrayList; +import java.util.Random; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -156,18 +155,24 @@ public class ShapeTestUtil { ArrayList yList = new ArrayList<>(); double angle = 0.0; while (true) { - angle += random.nextDouble()*40.0; + angle += random.nextDouble() * 40.0; if (angle > 360) { break; } double len = radius * (1.0 - radiusDelta + radiusDelta * random.nextDouble()); - float maxX = StrictMath.min(StrictMath.abs(Float.MAX_VALUE - centerX), StrictMath.abs(-Float.MAX_VALUE - centerX)); - float maxY = StrictMath.min(StrictMath.abs(Float.MAX_VALUE - centerY), StrictMath.abs(-Float.MAX_VALUE - centerY)); + float maxX = + StrictMath.min( + StrictMath.abs(Float.MAX_VALUE - centerX), + StrictMath.abs(-Float.MAX_VALUE - centerX)); + float maxY = + StrictMath.min( + StrictMath.abs(Float.MAX_VALUE - centerY), + StrictMath.abs(-Float.MAX_VALUE - centerY)); len = StrictMath.min(len, StrictMath.min(maxX, maxY)); - float x = (float)(centerX + len * Math.cos(Math.toRadians(angle))); - float y = (float)(centerY + len * Math.sin(Math.toRadians(angle))); + float x = (float) (centerX + len * Math.cos(Math.toRadians(angle))); + float y = (float) (centerY + len * Math.sin(Math.toRadians(angle))); xList.add(x); yList.add(y); @@ -179,7 +184,7 @@ public class ShapeTestUtil { float[] xArray = new float[xList.size()]; float[] yArray = new float[yList.size()]; - for(int i=0;iDo not invoke me across the dateline or a pole!! + */ + public static XYPolygon createRegularPolygon( + double centerX, double centerY, double radius, int gons) { - double maxX = StrictMath.min(StrictMath.abs(Float.MAX_VALUE - centerX), StrictMath.abs(-Float.MAX_VALUE - centerX)); - double maxY = StrictMath.min(StrictMath.abs(Float.MAX_VALUE - centerY), StrictMath.abs(-Float.MAX_VALUE - centerY)); + double maxX = + StrictMath.min( + StrictMath.abs(Float.MAX_VALUE - centerX), StrictMath.abs(-Float.MAX_VALUE - centerX)); + double maxY = + StrictMath.min( + StrictMath.abs(Float.MAX_VALUE - centerY), StrictMath.abs(-Float.MAX_VALUE - centerY)); radius = StrictMath.min(radius, StrictMath.min(maxX, maxY)); float[][] result = new float[2][]; - result[0] = new float[gons+1]; - result[1] = new float[gons+1]; - //System.out.println("make gon=" + gons); - for(int i=0;i 0) { throw new UnsupportedOperationException("this testing method does not support holes"); } double polyXs[] = XYEncodingUtils.floatArrayToDoubleArray(polygon.getPolyX()); - double polyYs[] =XYEncodingUtils.floatArrayToDoubleArray(polygon.getPolyY()); + double polyYs[] = XYEncodingUtils.floatArrayToDoubleArray(polygon.getPolyY()); // bounding box check required due to rounding errors (we don't solve that problem) if (x < polygon.minX || x > polygon.maxX || y < polygon.minY || y > polygon.maxY) { return false; @@ -274,15 +293,17 @@ public class ShapeTestUtil { double testy = y; double testx = x; for (i = 0, j = 1; j < nvert; ++i, ++j) { - if (testy == verty[j] && testy == verty[i] || - ((testy <= verty[j] && testy >= verty[i]) != (testy >= verty[j] && testy <= verty[i]))) { - if ((testx == vertx[j] && testx == vertx[i]) || - ((testx <= vertx[j] && testx >= vertx[i]) != (testx >= vertx[j] && testx <= vertx[i]) && - GeoUtils.orient(vertx[i], verty[i], vertx[j], verty[j], testx, testy) == 0)) { + if (testy == verty[j] && testy == verty[i] + || ((testy <= verty[j] && testy >= verty[i]) + != (testy >= verty[j] && testy <= verty[i]))) { + if ((testx == vertx[j] && testx == vertx[i]) + || ((testx <= vertx[j] && testx >= vertx[i]) != (testx >= vertx[j] && testx <= vertx[i]) + && GeoUtils.orient(vertx[i], verty[i], vertx[j], verty[j], testx, testy) == 0)) { // return true if point is on boundary return true; - } else if ( ((verty[i] > testy) != (verty[j] > testy)) && - (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) / (verty[j]-verty[i]) + vertx[i]) ) { + } else if (((verty[i] > testy) != (verty[j] > testy)) + && (testx + < (vertx[j] - vertx[i]) * (testy - verty[i]) / (verty[j] - verty[i]) + vertx[i])) { c = !c; } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AlcoholicMergePolicy.java b/lucene/test-framework/src/java/org/apache/lucene/index/AlcoholicMergePolicy.java index 10812a40c8a..22ec838b888 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/AlcoholicMergePolicy.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/AlcoholicMergePolicy.java @@ -22,44 +22,39 @@ import java.util.GregorianCalendar; import java.util.Locale; import java.util.Random; import java.util.TimeZone; - import org.apache.lucene.util.TestUtil; -/** - *

    - * Merge policy for testing, it is like an alcoholic. - * It drinks (merges) at night, and randomly decides what to drink. - * During the daytime it sleeps. - *

    - *

    - * if tests pass with this, then they are likely to pass with any - * bizarro merge policy users might write. - *

    - *

    - * It is a fine bottle of champagne (Ordered by Martijn). - *

    +/** + * Merge policy for testing, it is like an alcoholic. It drinks (merges) at night, and randomly + * decides what to drink. During the daytime it sleeps. + * + *

    if tests pass with this, then they are likely to pass with any bizarro merge policy users + * might write. + * + *

    It is a fine bottle of champagne (Ordered by Martijn). */ public class AlcoholicMergePolicy extends LogMergePolicy { - + private final Random random; private final Calendar calendar; - + public AlcoholicMergePolicy(TimeZone tz, Random random) { this.calendar = new GregorianCalendar(tz, Locale.ROOT); calendar.setTimeInMillis(TestUtil.nextLong(random, 0, Long.MAX_VALUE)); this.random = random; maxMergeSize = TestUtil.nextInt(random, 1024 * 1024, Integer.MAX_VALUE); } - + @Override - //@BlackMagic(level=Voodoo); + // @BlackMagic(level=Voodoo); protected long size(SegmentCommitInfo info, MergeContext mergeContext) throws IOException { int hourOfDay = calendar.get(Calendar.HOUR_OF_DAY); - if (hourOfDay < 6 || - hourOfDay > 20 || + if (hourOfDay < 6 + || hourOfDay > 20 + || // it's 5 o'clock somewhere random.nextInt(23) == 5) { - + Drink[] values = Drink.values(); // pick a random drink during the day return values[random.nextInt(values.length)].drunkFactor * info.sizeInBytes(); @@ -67,16 +62,18 @@ public class AlcoholicMergePolicy extends LogMergePolicy { return info.sizeInBytes(); } - + private static enum Drink { - - Beer(15), Wine(17), Champagne(21), WhiteRussian(22), SingleMalt(30); - + Beer(15), + Wine(17), + Champagne(21), + WhiteRussian(22), + SingleMalt(30); + long drunkFactor; - + Drink(long drunkFactor) { this.drunkFactor = drunkFactor; } } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AllDeletedFilterReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AllDeletedFilterReader.java index f11d2e32361..26e23d94ca3 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/AllDeletedFilterReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/AllDeletedFilterReader.java @@ -18,12 +18,10 @@ package org.apache.lucene.index; import org.apache.lucene.util.Bits; -/** - * Filters the incoming reader and makes all documents appear deleted. - */ +/** Filters the incoming reader and makes all documents appear deleted. */ public class AllDeletedFilterReader extends FilterLeafReader { final Bits liveDocs; - + public AllDeletedFilterReader(LeafReader in) { super(in); liveDocs = new Bits.MatchNoBits(in.maxDoc()); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingDirectoryReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingDirectoryReader.java index 232672699b4..8274cf09795 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingDirectoryReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingDirectoryReader.java @@ -18,10 +18,7 @@ package org.apache.lucene.index; import java.io.IOException; -/** - * A {@link DirectoryReader} that wraps all its subreaders with - * {@link AssertingLeafReader} - */ +/** A {@link DirectoryReader} that wraps all its subreaders with {@link AssertingLeafReader} */ public class AssertingDirectoryReader extends FilterDirectoryReader { static class AssertingSubReaderWrapper extends SubReaderWrapper { @@ -44,5 +41,4 @@ public class AssertingDirectoryReader extends FilterDirectoryReader { public CacheHelper getReaderCacheHelper() { return in.getReaderCacheHelper(); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java index d8a1c0c9e93..62792b15d87 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java @@ -21,7 +21,6 @@ import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Objects; - import org.apache.lucene.index.PointValues.IntersectVisitor; import org.apache.lucene.index.PointValues.Relation; import org.apache.lucene.search.DocIdSetIterator; @@ -30,17 +29,19 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.VirtualMethod; import org.apache.lucene.util.automaton.CompiledAutomaton; -/** - * A {@link FilterLeafReader} that can be used to apply - * additional checks for tests. - */ +/** A {@link FilterLeafReader} that can be used to apply additional checks for tests. */ public class AssertingLeafReader extends FilterLeafReader { private static void assertThread(String object, Thread creationThread) { if (creationThread != Thread.currentThread()) { - throw new AssertionError(object + " are only supposed to be consumed in " - + "the thread in which they have been acquired. But was acquired in " - + creationThread + " and consumed in " + Thread.currentThread() + "."); + throw new AssertionError( + object + + " are only supposed to be consumed in " + + "the thread in which they have been acquired. But was acquired in " + + creationThread + + " and consumed in " + + Thread.currentThread() + + "."); } } @@ -54,20 +55,28 @@ public class AssertingLeafReader extends FilterLeafReader { CacheHelper coreCacheHelper = in.getCoreCacheHelper(); if (coreCacheHelper != null) { - coreCacheHelper.addClosedListener(cacheKey -> { - final Object expectedKey = coreCacheHelper.getKey(); - assert expectedKey == cacheKey - : "Core closed listener called on a different key " + expectedKey + " <> " + cacheKey; - }); + coreCacheHelper.addClosedListener( + cacheKey -> { + final Object expectedKey = coreCacheHelper.getKey(); + assert expectedKey == cacheKey + : "Core closed listener called on a different key " + + expectedKey + + " <> " + + cacheKey; + }); } CacheHelper readerCacheHelper = in.getReaderCacheHelper(); if (readerCacheHelper != null) { - readerCacheHelper.addClosedListener(cacheKey -> { - final Object expectedKey = readerCacheHelper.getKey(); - assert expectedKey == cacheKey - : "Core closed listener called on a different key " + expectedKey + " <> " + cacheKey; - }); + readerCacheHelper.addClosedListener( + cacheKey -> { + final Object expectedKey = readerCacheHelper.getKey(); + assert expectedKey == cacheKey + : "Core closed listener called on a different key " + + expectedKey + + " <> " + + cacheKey; + }); } } @@ -83,9 +92,7 @@ public class AssertingLeafReader extends FilterLeafReader { return fields == null ? null : new AssertingFields(fields); } - /** - * Wraps a Fields but with additional asserts - */ + /** Wraps a Fields but with additional asserts */ public static class AssertingFields extends FilterFields { public AssertingFields(Fields in) { super(in); @@ -104,10 +111,8 @@ public class AssertingLeafReader extends FilterLeafReader { return terms == null ? null : new AssertingTerms(terms); } } - - /** - * Wraps a Terms but with additional asserts - */ + + /** Wraps a Terms but with additional asserts */ public static class AssertingTerms extends FilterTerms { public AssertingTerms(Terms in) { super(in); @@ -171,12 +176,19 @@ public class AssertingLeafReader extends FilterLeafReader { return "AssertingTerms(" + in + ")"; } } - - static final VirtualMethod SEEK_EXACT = new VirtualMethod<>(TermsEnum.class, "seekExact", BytesRef.class); + + static final VirtualMethod SEEK_EXACT = + new VirtualMethod<>(TermsEnum.class, "seekExact", BytesRef.class); static class AssertingTermsEnum extends FilterTermsEnum { private final Thread creationThread = Thread.currentThread(); - private enum State {INITIAL, POSITIONED, UNPOSITIONED}; + + private enum State { + INITIAL, + POSITIONED, + UNPOSITIONED + }; + private State state = State.INITIAL; private final boolean delegateOverridesSeekExact; private final boolean hasFreqs; @@ -190,7 +202,7 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { assertThread("Terms enums", creationThread); - assert state == State.POSITIONED: "docs(...) called on unpositioned TermsEnum"; + assert state == State.POSITIONED : "docs(...) called on unpositioned TermsEnum"; // reuse if the codec reused final PostingsEnum actualReuse; @@ -203,7 +215,7 @@ public class AssertingLeafReader extends FilterLeafReader { assert docs != null; if (docs == actualReuse) { // codec reused, reset asserting state - ((AssertingPostingsEnum)reuse).reset(); + ((AssertingPostingsEnum) reuse).reset(); return reuse; } else { return new AssertingPostingsEnum(docs); @@ -213,7 +225,7 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public ImpactsEnum impacts(int flags) throws IOException { assertThread("Terms enums", creationThread); - assert state == State.POSITIONED: "docs(...) called on unpositioned TermsEnum"; + assert state == State.POSITIONED : "docs(...) called on unpositioned TermsEnum"; assert (flags & PostingsEnum.FREQS) != 0 : "Freqs should be requested on impacts"; return new AssertingImpactsEnum(super.impacts(flags)); @@ -224,7 +236,8 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public BytesRef next() throws IOException { assertThread("Terms enums", creationThread); - assert state == State.INITIAL || state == State.POSITIONED: "next() called on unpositioned TermsEnum"; + assert state == State.INITIAL || state == State.POSITIONED + : "next() called on unpositioned TermsEnum"; BytesRef result = super.next(); if (result == null) { state = State.UNPOSITIONED; @@ -330,13 +343,17 @@ public class AssertingLeafReader extends FilterLeafReader { public String toString() { return "AssertingTermsEnum(" + in + ")"; } - + void reset() { state = State.INITIAL; } } - - static enum DocsEnumState { START, ITERATING, FINISHED }; + + static enum DocsEnumState { + START, + ITERATING, + FINISHED + }; /** Wraps a docsenum with additional checks */ public static class AssertingPostingsEnum extends FilterPostingsEnum { @@ -391,7 +408,8 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public int docID() { assertThread("Docs enums", creationThread); - assert doc == super.docID() : " invalid docID() in " + in.getClass() + " " + super.docID() + " instead of " + doc; + assert doc == super.docID() + : " invalid docID() in " + in.getClass() + " " + super.docID() + " instead of " + doc; return doc; } @@ -438,10 +456,11 @@ public class AssertingLeafReader extends FilterLeafReader { assert state != DocsEnumState.FINISHED : "getPayload() called after NO_MORE_DOCS"; assert positionCount > 0 : "getPayload() called before nextPosition()!"; BytesRef payload = super.getPayload(); - assert payload == null || payload.length > 0 : "getPayload() returned payload with invalid length!"; + assert payload == null || payload.length > 0 + : "getPayload() returned payload with invalid length!"; return payload; } - + void reset() { state = DocsEnumState.START; doc = in.docID(); @@ -464,7 +483,11 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public void advanceShallow(int target) throws IOException { - assert target >= lastShallowTarget : "called on decreasing targets: target = " + target + " < last target = " + lastShallowTarget; + assert target >= lastShallowTarget + : "called on decreasing targets: target = " + + target + + " < last target = " + + lastShallowTarget; assert target >= docID() : "target = " + target + " < docID = " + docID(); lastShallowTarget = target; in.advanceShallow(target); @@ -472,7 +495,8 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public Impacts getImpacts() throws IOException { - assert docID() >= 0 || lastShallowTarget >= 0 : "Cannot get impacts until the iterator is positioned or advanceShallow has been called"; + assert docID() >= 0 || lastShallowTarget >= 0 + : "Cannot get impacts until the iterator is positioned or advanceShallow has been called"; Impacts impacts = in.getImpacts(); CheckIndex.checkImpacts(impacts, Math.max(docID(), lastShallowTarget)); return new AssertingImpacts(impacts, this); @@ -510,13 +534,15 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public int nextDoc() throws IOException { - assert docID() + 1 >= lastShallowTarget : "target = " + (docID() + 1) + " < last shallow target = " + lastShallowTarget; + assert docID() + 1 >= lastShallowTarget + : "target = " + (docID() + 1) + " < last shallow target = " + lastShallowTarget; return assertingPostings.nextDoc(); } @Override public int advance(int target) throws IOException { - assert target >= lastShallowTarget : "target = " + target + " < last shallow target = " + lastShallowTarget; + assert target >= lastShallowTarget + : "target = " + target + " < last shallow target = " + lastShallowTarget; return assertingPostings.advance(target); } @@ -540,22 +566,24 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public int numLevels() { - assert validFor == Math.max(impactsEnum.docID(), impactsEnum.lastShallowTarget) : "Cannot reuse impacts after advancing the iterator"; + assert validFor == Math.max(impactsEnum.docID(), impactsEnum.lastShallowTarget) + : "Cannot reuse impacts after advancing the iterator"; return in.numLevels(); } @Override public int getDocIdUpTo(int level) { - assert validFor == Math.max(impactsEnum.docID(), impactsEnum.lastShallowTarget) : "Cannot reuse impacts after advancing the iterator"; + assert validFor == Math.max(impactsEnum.docID(), impactsEnum.lastShallowTarget) + : "Cannot reuse impacts after advancing the iterator"; return in.getDocIdUpTo(level); } @Override public List getImpacts(int level) { - assert validFor == Math.max(impactsEnum.docID(), impactsEnum.lastShallowTarget) : "Cannot reuse impacts after advancing the iterator"; + assert validFor == Math.max(impactsEnum.docID(), impactsEnum.lastShallowTarget) + : "Cannot reuse impacts after advancing the iterator"; return in.getImpacts(level); } - } /** Wraps a NumericDocValues but with additional asserts */ @@ -565,7 +593,7 @@ public class AssertingLeafReader extends FilterLeafReader { private final int maxDoc; private int lastDocID = -1; private boolean exists; - + public AssertingNumericDocValues(NumericDocValues in, int maxDoc) { this.in = in; this.maxDoc = maxDoc; @@ -629,14 +657,14 @@ public class AssertingLeafReader extends FilterLeafReader { assertThread("Numeric doc values", creationThread); assert exists; return in.longValue(); - } + } @Override public String toString() { return "AssertingNumericDocValues(" + in + ")"; } } - + /** Wraps a BinaryDocValues but with additional asserts */ public static class AssertingBinaryDocValues extends BinaryDocValues { private final Thread creationThread = Thread.currentThread(); @@ -644,7 +672,7 @@ public class AssertingLeafReader extends FilterLeafReader { private final int maxDoc; private int lastDocID = -1; private boolean exists; - + public AssertingBinaryDocValues(BinaryDocValues in, int maxDoc) { this.in = in; this.maxDoc = maxDoc; @@ -724,7 +752,7 @@ public class AssertingLeafReader extends FilterLeafReader { private final int valueCount; private int lastDocID = -1; private boolean exists; - + public AssertingSortedDocValues(SortedDocValues in, int maxDoc) { this.in = in; this.maxDoc = maxDoc; @@ -847,7 +875,8 @@ public class AssertingLeafReader extends FilterLeafReader { if (singleDocValues == null) { return new AssertingSortedNumericDocValues(in, maxDoc); } else { - NumericDocValues assertingDocValues = new AssertingNumericDocValues(singleDocValues, maxDoc); + NumericDocValues assertingDocValues = + new AssertingNumericDocValues(singleDocValues, maxDoc); return DocValues.singleton(assertingDocValues); } } @@ -910,7 +939,8 @@ public class AssertingLeafReader extends FilterLeafReader { public long nextValue() throws IOException { assertThread("Sorted numeric doc values", creationThread); assert exists; - assert valueUpto < in.docValueCount(): "valueUpto=" + valueUpto + " in.docValueCount()=" + in.docValueCount(); + assert valueUpto < in.docValueCount() + : "valueUpto=" + valueUpto + " in.docValueCount()=" + in.docValueCount(); valueUpto++; return in.nextValue(); } @@ -921,9 +951,9 @@ public class AssertingLeafReader extends FilterLeafReader { assert exists; assert in.docValueCount() > 0; return in.docValueCount(); - } + } } - + /** Wraps a SortedSetDocValues but with additional asserts */ public static class AssertingSortedSetDocValues extends SortedSetDocValues { private final Thread creationThread = Thread.currentThread(); @@ -933,7 +963,7 @@ public class AssertingLeafReader extends FilterLeafReader { private int lastDocID = -1; private long lastOrd = NO_MORE_ORDS; private boolean exists; - + private AssertingSortedSetDocValues(SortedSetDocValues in, int maxDoc) { this.in = in; this.maxDoc = maxDoc; @@ -1005,7 +1035,7 @@ public class AssertingLeafReader extends FilterLeafReader { assert cost >= 0; return cost; } - + @Override public long nextOrd() throws IOException { assertThread("Sorted set doc values", creationThread); @@ -1057,7 +1087,9 @@ public class AssertingLeafReader extends FilterLeafReader { assertStats(maxDoc); } - public PointValues getWrapped() { return in; } + public PointValues getWrapped() { + return in; + } private void assertStats(int maxDoc) { assert in.size() > 0; @@ -1069,7 +1101,12 @@ public class AssertingLeafReader extends FilterLeafReader { @Override public void intersect(IntersectVisitor visitor) throws IOException { assertThread("Points", creationThread); - in.intersect(new AssertingIntersectVisitor(in.getNumDimensions(), in.getNumIndexDimensions(), in.getBytesPerDimension(), visitor)); + in.intersect( + new AssertingIntersectVisitor( + in.getNumDimensions(), + in.getNumIndexDimensions(), + in.getBytesPerDimension(), + visitor)); } @Override @@ -1121,10 +1158,12 @@ public class AssertingLeafReader extends FilterLeafReader { assertThread("Points", creationThread); return in.getDocCount(); } - } - /** Validates in the 1D case that all points are visited in order, and point values are in bounds of the last cell checked */ + /** + * Validates in the 1D case that all points are visited in order, and point values are in bounds + * of the last cell checked + */ static class AssertingIntersectVisitor implements IntersectVisitor { final IntersectVisitor in; final int numDataDims; @@ -1137,13 +1176,14 @@ public class AssertingLeafReader extends FilterLeafReader { private int lastDocID = -1; private int docBudget; - AssertingIntersectVisitor(int numDataDims, int numIndexDims, int bytesPerDim, IntersectVisitor in) { + AssertingIntersectVisitor( + int numDataDims, int numIndexDims, int bytesPerDim, IntersectVisitor in) { this.in = in; this.numDataDims = numDataDims; this.numIndexDims = numIndexDims; this.bytesPerDim = bytesPerDim; - lastMaxPackedValue = new byte[numDataDims*bytesPerDim]; - lastMinPackedValue = new byte[numDataDims*bytesPerDim]; + lastMaxPackedValue = new byte[numDataDims * bytesPerDim]; + lastMinPackedValue = new byte[numDataDims * bytesPerDim]; if (numDataDims == 1) { lastDocValue = new byte[bytesPerDim]; } else { @@ -1155,7 +1195,8 @@ public class AssertingLeafReader extends FilterLeafReader { public void visit(int docID) throws IOException { assert --docBudget >= 0 : "called add() more times than the last call to grow() reserved"; - // This method, not filtering each hit, should only be invoked when the cell is inside the query shape: + // This method, not filtering each hit, should only be invoked when the cell is inside the + // query shape: assert lastCompareResult == Relation.CELL_INSIDE_QUERY; in.visit(docID); } @@ -1164,26 +1205,44 @@ public class AssertingLeafReader extends FilterLeafReader { public void visit(int docID, byte[] packedValue) throws IOException { assert --docBudget >= 0 : "called add() more times than the last call to grow() reserved"; - // This method, to filter each doc's value, should only be invoked when the cell crosses the query shape: + // This method, to filter each doc's value, should only be invoked when the cell crosses the + // query shape: assert lastCompareResult == PointValues.Relation.CELL_CROSSES_QUERY; // This doc's packed value should be contained in the last cell passed to compare: - for(int dim=0;dim { - cfs.createOutput("bogus", IOContext.DEFAULT); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + cfs.createOutput("bogus", IOContext.DEFAULT); + }); cfs.close(); dir.close(); } - + // test that cfs reader is read-only public void testDeleteFileDisabled() throws IOException { final String testfile = "_123.test"; @@ -249,19 +252,21 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest IndexOutput out = dir.createOutput(testfile, IOContext.DEFAULT); out.writeInt(3); out.close(); - + SegmentInfo si = newSegmentInfo(dir, "_123"); si.setFiles(Collections.emptyList()); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); - expectThrows(UnsupportedOperationException.class, () -> { - cfs.deleteFile(testfile); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + cfs.deleteFile(testfile); + }); cfs.close(); dir.close(); } - + // test that cfs reader is read-only public void testRenameFileDisabled() throws IOException { final String testfile = "_123.test"; @@ -270,19 +275,21 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest IndexOutput out = dir.createOutput(testfile, IOContext.DEFAULT); out.writeInt(3); out.close(); - + SegmentInfo si = newSegmentInfo(dir, "_123"); si.setFiles(Collections.emptyList()); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); - expectThrows(UnsupportedOperationException.class, () -> { - cfs.rename(testfile, "bogus"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + cfs.rename(testfile, "bogus"); + }); cfs.close(); dir.close(); } - + // test that cfs reader is read-only public void testSyncDisabled() throws IOException { final String testfile = "_123.test"; @@ -291,19 +298,21 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest IndexOutput out = dir.createOutput(testfile, IOContext.DEFAULT); out.writeInt(3); out.close(); - + SegmentInfo si = newSegmentInfo(dir, "_123"); si.setFiles(Collections.emptyList()); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); - expectThrows(UnsupportedOperationException.class, () -> { - cfs.sync(Collections.singleton(testfile)); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + cfs.sync(Collections.singleton(testfile)); + }); cfs.close(); dir.close(); } - + // test that cfs reader is read-only public void testMakeLockDisabled() throws IOException { final String testfile = "_123.test"; @@ -312,25 +321,26 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest IndexOutput out = dir.createOutput(testfile, IOContext.DEFAULT); out.writeInt(3); out.close(); - + SegmentInfo si = newSegmentInfo(dir, "_123"); si.setFiles(Collections.emptyList()); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); - expectThrows(UnsupportedOperationException.class, () -> { - cfs.obtainLock("foobar"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + cfs.obtainLock("foobar"); + }); cfs.close(); dir.close(); } - - /** - * This test creates a compound file based on a large number of files of - * various length. The file content is generated randomly. The sizes range - * from 0 to 1Mb. Some of the sizes are selected to test the buffering - * logic in the file reading code. For this the chunk variable is set to - * the length of the buffer used internally by the compound file logic. + + /** + * This test creates a compound file based on a large number of files of various length. The file + * content is generated randomly. The sizes range from 0 to 1Mb. Some of the sizes are selected to + * test the buffering logic in the file reading code. For this the chunk variable is set to the + * length of the buffer used internally by the compound file logic. */ public void testRandomFiles() throws IOException { Directory dir = newDirectory(); @@ -350,18 +360,18 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest createRandomFile(dir, segment + ".big5", 3 * chunk - 1, segId); createRandomFile(dir, segment + ".big6", 3 * chunk + 1, segId); createRandomFile(dir, segment + ".big7", 1000 * chunk, segId); - + List files = new ArrayList<>(); for (String file : dir.listAll()) { if (file.startsWith(segment)) { files.add(file); } } - + si.setFiles(files); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); - + for (String file : files) { IndexInput check = dir.openInput(file, newIOContext(random())); IndexInput test = cfs.openInput(file, newIOContext(random())); @@ -373,14 +383,14 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest cfs.close(); dir.close(); } - + // Make sure we don't somehow use more than 1 descriptor // when reading a CFS with many subs: public void testManySubFiles() throws IOException { final MockDirectoryWrapper dir = newMockFSDirectory(createTempDir("CFSManySubFiles")); - + final int FILE_COUNT = atLeast(500); - + List files = new ArrayList<>(); SegmentInfo si = newSegmentInfo(dir, "_123"); for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { @@ -392,70 +402,71 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest CodecUtil.writeFooter(out); } } - + assertEquals(0, dir.getFileHandleCount()); - + si.setFiles(files); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); - + final IndexInput[] ins = new IndexInput[FILE_COUNT]; for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { ins[fileIdx] = cfs.openInput("_123." + fileIdx, newIOContext(random())); CodecUtil.checkIndexHeader(ins[fileIdx], "Foo", 0, 0, si.getId(), "suffix"); } - + assertEquals(1, dir.getFileHandleCount()); for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { assertEquals((byte) fileIdx, ins[fileIdx].readByte()); } - + assertEquals(1, dir.getFileHandleCount()); - - for(int fileIdx=0;fileIdx { - cr.openInput("bogus", newIOContext(random())); - }); - + expectThrows( + IOException.class, + () -> { + cr.openInput("bogus", newIOContext(random())); + }); + cr.close(); dir.close(); } - + public void testReadPastEOF() throws IOException { Directory dir = newDirectory(); Directory cr = createLargeCFS(dir); @@ -624,65 +638,85 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest is.readBytes(b, 0, 10); // Single byte read past end of file - expectThrows(IOException.class, () -> { - is.readByte(); - }); + expectThrows( + IOException.class, + () -> { + is.readByte(); + }); is.seek(is.length() - 10); // Block read past end of file - expectThrows(IOException.class, () -> { - is.readBytes(b, 0, 50); - }); - + expectThrows( + IOException.class, + () -> { + is.readBytes(b, 0, 50); + }); + is.close(); cr.close(); dir.close(); } - + /** Returns a new fake segment */ protected static SegmentInfo newSegmentInfo(Directory dir, String name) { Version minVersion = random().nextBoolean() ? null : Version.LATEST; - return new SegmentInfo(dir, Version.LATEST, minVersion, name, 10000, false, Codec.getDefault(), Collections.emptyMap(), StringHelper.randomId(), Collections.emptyMap(), null); + return new SegmentInfo( + dir, + Version.LATEST, + minVersion, + name, + 10000, + false, + Codec.getDefault(), + Collections.emptyMap(), + StringHelper.randomId(), + Collections.emptyMap(), + null); } - + /** Creates a file of the specified size with random data. */ - protected static void createRandomFile(Directory dir, String name, int size, byte[] segId) throws IOException { + protected static void createRandomFile(Directory dir, String name, int size, byte[] segId) + throws IOException { Random rnd = random(); try (IndexOutput os = dir.createOutput(name, newIOContext(random()))) { CodecUtil.writeIndexHeader(os, "Foo", 0, segId, "suffix"); - for (int i=0; i 0) { int readLen = (int) Math.min(remainder, expectedBuffer.length); @@ -692,55 +726,57 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest remainder -= readLen; } } - - protected static void assertSameStreams(String msg, IndexInput expected, IndexInput actual, long seekTo) throws IOException { + + protected static void assertSameStreams( + String msg, IndexInput expected, IndexInput actual, long seekTo) throws IOException { if (seekTo >= 0 && seekTo < expected.length()) { expected.seek(seekTo); actual.seek(seekTo); assertSameStreams(msg + ", seek(mid)", expected, actual); } } - - protected static void assertSameSeekBehavior(String msg, IndexInput expected, IndexInput actual) throws IOException { + + protected static void assertSameSeekBehavior(String msg, IndexInput expected, IndexInput actual) + throws IOException { // seek to 0 long point = 0; assertSameStreams(msg + ", seek(0)", expected, actual, point); - + // seek to middle point = expected.length() / 2l; assertSameStreams(msg + ", seek(mid)", expected, actual, point); - + // seek to end - 2 point = expected.length() - 2; assertSameStreams(msg + ", seek(end-2)", expected, actual, point); - + // seek to end - 1 point = expected.length() - 1; assertSameStreams(msg + ", seek(end-1)", expected, actual, point); - + // seek to the end point = expected.length(); assertSameStreams(msg + ", seek(end)", expected, actual, point); - + // seek past end point = expected.length() + 1; assertSameStreams(msg + ", seek(end+1)", expected, actual, point); } - - protected static void assertEqualArrays(String msg, byte[] expected, byte[] test, int start, int len) { + + protected static void assertEqualArrays( + String msg, byte[] expected, byte[] test, int start, int len) { assertNotNull(msg + " null expected", expected); assertNotNull(msg + " null test", test); - - for (int i=start; i files = new ArrayList<>(); @@ -749,7 +785,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest createSequenceFile(dir, "_123.f" + i, (byte) 0, 2000, si.getId(), "suffix"); files.add("_123.f" + i); } - + si.setFiles(files); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); @@ -766,19 +802,22 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest assumeTrue("test does not work with CFS", true); } - // LUCENE-6311: make sure the resource name inside a compound file confesses that it's inside a compound file + // LUCENE-6311: make sure the resource name inside a compound file confesses that it's inside a + // compound file public void testResourceNameInsideCompoundFile() throws Exception { Directory dir = newDirectory(); String subFile = "_123.xyz"; SegmentInfo si = newSegmentInfo(dir, "_123"); createSequenceFile(dir, subFile, (byte) 0, 10, si.getId(), "suffix"); - + si.setFiles(Collections.singletonList(subFile)); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); IndexInput in = cfs.openInput(subFile, IOContext.DEFAULT); String desc = in.toString(); - assertTrue("resource description hides that it's inside a compound file: " + desc, desc.contains("[slice=" + subFile + "]")); + assertTrue( + "resource description hides that it's inside a compound file: " + desc, + desc.contains("[slice=" + subFile + "]")); cfs.close(); dir.close(); } @@ -789,14 +828,17 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest // missing codec header try (IndexOutput os = dir.createOutput(subFile, newIOContext(random()))) { - for (int i=0; i < 1024; i++) { + for (int i = 0; i < 1024; i++) { os.writeByte((byte) i); } } SegmentInfo si = newSegmentInfo(dir, "_123"); si.setFiles(Collections.singletonList(subFile)); - Exception e = expectThrows(CorruptIndexException.class, () -> si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT)); + Exception e = + expectThrows( + CorruptIndexException.class, + () -> si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT)); assertTrue(e.getMessage().contains("codec header mismatch")); dir.close(); } @@ -809,7 +851,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest SegmentInfo si = newSegmentInfo(dir, "_123"); try (IndexOutput os = dir.createOutput(subFile, newIOContext(random()))) { CodecUtil.writeIndexHeader(os, "Foo", 0, si.getId(), "suffix"); - for (int i=0; i < 1024; i++) { + for (int i = 0; i < 1024; i++) { os.writeByte((byte) i); } @@ -818,11 +860,14 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest os.writeInt(0); long checksum = os.getChecksum(); - os.writeLong(checksum+1); + os.writeLong(checksum + 1); } si.setFiles(Collections.singletonList(subFile)); - Exception e = expectThrows(CorruptIndexException.class, () -> si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT)); + Exception e = + expectThrows( + CorruptIndexException.class, + () -> si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT)); assertTrue(e.getMessage().contains("checksum failed (hardware problem?)")); dir.close(); } @@ -843,22 +888,26 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest } si.setFiles(Collections.singletonList(subFile)); - + FileTrackingDirectoryWrapper writeTrackingDir = new FileTrackingDirectoryWrapper(dir); si.getCodec().compoundFormat().write(writeTrackingDir, si, IOContext.DEFAULT); final Set createdFiles = writeTrackingDir.getFiles(); ReadBytesDirectoryWrapper readTrackingDir = new ReadBytesDirectoryWrapper(dir); - CompoundDirectory compoundDir = si.getCodec().compoundFormat().getCompoundReader(readTrackingDir, si, IOContext.READ); + CompoundDirectory compoundDir = + si.getCodec().compoundFormat().getCompoundReader(readTrackingDir, si, IOContext.READ); compoundDir.checkIntegrity(); - Map readBytes = readTrackingDir.getReadBytes(); + Map readBytes = readTrackingDir.getReadBytes(); assertEquals(createdFiles, readBytes.keySet()); for (Map.Entry entry : readBytes.entrySet()) { final String file = entry.getKey(); final FixedBitSet set = entry.getValue().clone(); set.flip(0, set.length()); final int next = set.nextSetBit(0); - assertEquals("Byte at offset " + next + " of " + file + " was not read", DocIdSetIterator.NO_MORE_DOCS, next); + assertEquals( + "Byte at offset " + next + " of " + file + " was not read", + DocIdSetIterator.NO_MORE_DOCS, + next); } compoundDir.close(); dir.close(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java index 17a9d192cbe..fc6d23c6a55 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java @@ -16,10 +16,10 @@ */ package org.apache.lucene.index; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.NumericDocValuesField; @@ -28,8 +28,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.packed.PackedInts; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; - /** Extends {@link BaseDocValuesFormatTestCase} to add compression checks. */ public abstract class BaseCompressingDocValuesFormatTestCase extends BaseDocValuesFormatTestCase { diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java index c66142d69b0..db0688fba1d 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java @@ -16,6 +16,10 @@ */ package org.apache.lucene.index; +import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -25,14 +29,13 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map.Entry; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.CountDownLatch; import java.util.function.LongSupplier; import java.util.function.Supplier; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; @@ -65,22 +68,16 @@ import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.TestUtil; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; -import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; -import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; - /** - * Abstract class to do basic tests for a docvalues format. - * NOTE: This test focuses on the docvalues impl, nothing else. - * The [stretch] goal is for this test to be - * so thorough in testing a new DocValuesFormat that if this - * test passes, then all Lucene/Solr tests should also pass. Ie, - * if there is some bug in a given DocValuesFormat that this - * test fails to catch then this test needs to be improved! */ + * Abstract class to do basic tests for a docvalues format. NOTE: This test focuses on the docvalues + * impl, nothing else. The [stretch] goal is for this test to be so thorough in testing a new + * DocValuesFormat that if this test passes, then all Lucene/Solr tests should also pass. Ie, if + * there is some bug in a given DocValuesFormat that this test fails to catch then this test needs + * to be improved! + */ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTestCase { @Override @@ -88,15 +85,20 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes if (usually()) { doc.add(new NumericDocValuesField("ndv", random().nextInt(1 << 12))); doc.add(new BinaryDocValuesField("bdv", new BytesRef(TestUtil.randomSimpleString(random())))); - doc.add(new SortedDocValuesField("sdv", new BytesRef(TestUtil.randomSimpleString(random(), 2)))); + doc.add( + new SortedDocValuesField("sdv", new BytesRef(TestUtil.randomSimpleString(random(), 2)))); } int numValues = random().nextInt(5); for (int i = 0; i < numValues; ++i) { - doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(TestUtil.randomSimpleString(random(), 2)))); + doc.add( + new SortedSetDocValuesField( + "ssdv", new BytesRef(TestUtil.randomSimpleString(random(), 2)))); } numValues = random().nextInt(5); for (int i = 0; i < numValues; ++i) { - doc.add(new SortedNumericDocValuesField("sndv", TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE))); + doc.add( + new SortedNumericDocValuesField( + "sndv", TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE))); } } @@ -104,14 +106,15 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm" + - "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm" + + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new NumericDocValuesField("dv", 5)); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -139,13 +142,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new FloatDocValuesField("dv", 5.7f)); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -160,7 +164,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes Document hitDoc = isearcher.doc(docID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; - + NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv"); assertEquals(docID, dv.advance(docID)); assertEquals(Float.floatToRawIntBits(5.7f), dv.longValue()); @@ -169,19 +173,20 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testTwoNumbers() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new NumericDocValuesField("dv1", 5)); doc.add(new NumericDocValuesField("dv2", 17)); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -212,14 +217,15 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new BinaryDocValuesField("dv1", new BytesRef(longTerm))); doc.add(new BinaryDocValuesField("dv2", new BytesRef(text))); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -247,20 +253,20 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testVariouslyCompressibleBinaryValues() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); int numDocs = 1 + random().nextInt(100); - HashMap writtenValues = new HashMap<>(numDocs); - - // Small vocabulary ranges will be highly compressible + HashMap writtenValues = new HashMap<>(numDocs); + + // Small vocabulary ranges will be highly compressible int vocabRange = 1 + random().nextInt(Byte.MAX_VALUE - 1); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - + // Generate random-sized byte array with random choice of bytes in vocab range byte[] value = new byte[500 + random().nextInt(1024)]; for (int j = 0; j < value.length; j++) { @@ -297,20 +303,21 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); - } + } public void testTwoFieldsMixed() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new NumericDocValuesField("dv1", 5)); doc.add(new BinaryDocValuesField("dv2", new BytesRef("hello world"))); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -336,12 +343,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testThreeFieldsMixed() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new SortedDocValuesField("dv1", new BytesRef("hello hello"))); @@ -349,7 +357,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new BinaryDocValuesField("dv3", new BytesRef("hello world"))); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -380,12 +388,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testThreeFieldsMixed2() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new BinaryDocValuesField("dv1", new BytesRef("hello world"))); @@ -393,7 +402,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new NumericDocValuesField("dv3", 5)); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -425,7 +434,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testTwoDocumentsNumeric() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -441,7 +450,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -454,7 +463,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testTwoDocumentsMerged() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -473,12 +482,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv"); - for(int i=0;i<2;i++) { + for (int i = 0; i < 2; i++) { Document doc2 = ireader.leaves().get(0).reader().document(i); long expected; if (doc2.get("id").equals("0")) { @@ -509,7 +518,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -522,7 +531,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testBigNumericRange2() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -538,7 +547,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -551,7 +560,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testBytes() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -559,13 +568,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes IndexWriterConfig conf = newIndexWriterConfig(analyzer); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new BinaryDocValuesField("dv", new BytesRef("hello world"))); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -588,7 +598,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testBytesTwoDocumentsMerged() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -607,12 +617,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv"); - for(int i=0;i<2;i++) { + for (int i = 0; i < 2; i++) { Document doc2 = ireader.leaves().get(0).reader().document(i); String expected; if (doc2.get("id").equals("0")) { @@ -627,17 +637,17 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testBytesMergeAwayAllValues() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new StringField("id", "0", Field.Store.NO)); - iwriter.addDocument(doc); + iwriter.addDocument(doc); doc = new Document(); doc.add(new StringField("id", "1", Field.Store.NO)); doc.add(new BinaryDocValuesField("field", new BytesRef("hi"))); @@ -645,13 +655,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.commit(); iwriter.deleteDocuments(new Term("id", "1")); iwriter.forceMerge(1); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + BinaryDocValues dv = getOnlyLeafReader(ireader).getBinaryDocValues("field"); assertEquals(NO_MORE_DOCS, dv.nextDoc()); - + ireader.close(); directory.close(); } @@ -663,13 +673,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes IndexWriterConfig conf = newIndexWriterConfig(analyzer); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf); Document doc = new Document(); - String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; + String longTerm = + "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; doc.add(newTextField("fieldname", text, Field.Store.YES)); doc.add(new SortedDocValuesField("dv", new BytesRef("hello world"))); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); @@ -710,7 +721,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -726,7 +737,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testSortedBytesThreeDocuments() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -745,7 +756,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -784,7 +795,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -795,7 +806,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes assertEquals(new BytesRef("hello world 1"), scratch); scratch = dv.lookupOrd(1); assertEquals(new BytesRef("hello world 2"), scratch); - for(int i=0;i<2;i++) { + for (int i = 0; i < 2; i++) { Document doc2 = ireader.leaves().get(0).reader().document(i); String expected; if (doc2.get("id").equals("0")) { @@ -813,17 +824,17 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testSortedMergeAwayAllValues() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new StringField("id", "0", Field.Store.NO)); - iwriter.addDocument(doc); + iwriter.addDocument(doc); doc = new Document(); doc.add(new StringField("id", "1", Field.Store.NO)); doc.add(new SortedDocValuesField("field", new BytesRef("hello"))); @@ -831,13 +842,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.commit(); iwriter.deleteDocuments(new Term("id", "1")); iwriter.forceMerge(1); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedDocValues dv = getOnlyLeafReader(ireader).getSortedDocValues("field"); assertEquals(NO_MORE_DOCS, dv.nextDoc()); - + ireader.close(); directory.close(); } @@ -853,7 +864,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new BinaryDocValuesField("dv", new BytesRef("hello\nworld\r1"))); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -878,7 +889,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes // 2nd doc missing the DV field iwriter.addDocument(new Document()); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -890,18 +901,18 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testSortedTermsEnum() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new SortedDocValuesField("field", new BytesRef("hello"))); iwriter.addDocument(doc); - + doc = new Document(); doc.add(new SortedDocValuesField("field", new BytesRef("world"))); iwriter.addDocument(doc); @@ -910,15 +921,15 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new SortedDocValuesField("field", new BytesRef("beer"))); iwriter.addDocument(doc); iwriter.forceMerge(1); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); SortedDocValues dv = getOnlyLeafReader(ireader).getSortedDocValues("field"); assertEquals(3, dv.getValueCount()); - + TermsEnum termsEnum = dv.termsEnum(); - + // next() assertEquals("beer", termsEnum.next().utf8ToString()); assertEquals(0, termsEnum.ord()); @@ -926,7 +937,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes assertEquals(1, termsEnum.ord()); assertEquals("world", termsEnum.next().utf8ToString()); assertEquals(2, termsEnum.ord()); - + // seekCeil() assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("ha!"))); assertEquals("hello", termsEnum.term().utf8ToString()); @@ -937,7 +948,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("zzz"))); assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("aba"))); assertEquals(0, termsEnum.ord()); - + // seekExact() assertTrue(termsEnum.seekExact(new BytesRef("beer"))); assertEquals("beer", termsEnum.term().utf8ToString()); @@ -978,7 +989,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testEmptySortedBytes() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -994,7 +1005,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -1009,7 +1020,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testEmptyBytes() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -1025,7 +1036,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.addDocument(doc); iwriter.forceMerge(1); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -1038,7 +1049,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testVeryLargeButLegalBytes() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -1053,7 +1064,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new BinaryDocValuesField("dv", b)); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -1064,7 +1075,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testVeryLargeButLegalSortedBytes() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -1079,7 +1090,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new SortedDocValuesField("dv", b)); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -1089,7 +1100,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testCodecUsesOwnBytes() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -1101,7 +1112,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new BinaryDocValuesField("dv", new BytesRef("boo!"))); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -1112,7 +1123,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + public void testCodecUsesOwnSortedBytes() throws IOException { Analyzer analyzer = new MockAnalyzer(random()); @@ -1124,7 +1135,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(new SortedDocValuesField("dv", new BytesRef("boo!"))); iwriter.addDocument(doc); iwriter.close(); - + // Now search the index: IndexReader ireader = DirectoryReader.open(directory); // read-only=true assert ireader.leaves().size() == 1; @@ -1137,7 +1148,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ireader.close(); directory.close(); } - + /* * Simple test case to show how to use the API */ @@ -1160,7 +1171,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes DirectoryReader reader = DirectoryReader.open(dir); assertEquals(1, reader.leaves().size()); - + IndexSearcher searcher = new IndexSearcher(reader); BooleanQuery.Builder query = new BooleanQuery.Builder(); @@ -1182,7 +1193,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes reader.close(); dir.close(); } - + public void testRandomSortedBytes() throws IOException { Directory dir = newDirectory(); IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random())); @@ -1241,7 +1252,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes for (Entry entry : entrySet) { // pk lookup - PostingsEnum termPostingsEnum = TestUtil.docs(random(), reader, "id", new BytesRef(entry.getKey()), null, 0); + PostingsEnum termPostingsEnum = + TestUtil.docs(random(), reader, "id", new BytesRef(entry.getKey()), null, 0); int docId = termPostingsEnum.nextDoc(); expected = new BytesRef(entry.getValue()); docValues = MultiDocValues.getSortedValues(reader, "field"); @@ -1258,7 +1270,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes private void doTestNumericsVsStoredFields(double density, LongSupplier longs) throws Exception { doTestNumericsVsStoredFields(density, longs, 256); } - private void doTestNumericsVsStoredFields(double density, LongSupplier longs, int minDocs) throws Exception { + + private void doTestNumericsVsStoredFields(double density, LongSupplier longs, int minDocs) + throws Exception { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf); @@ -1269,9 +1283,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(idField); doc.add(storedField); doc.add(dvField); - + // index some docs - int numDocs = atLeast((int) (minDocs*1.172)); + int numDocs = atLeast((int) (minDocs * 1.172)); // numDocs should be always > 256 so that in case of a codec that optimizes // for numbers of values <= 256, all storage layouts are tested assert numDocs > 256; @@ -1289,9 +1303,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.commit(); } } - + // delete some docs - int numDeletions = random().nextInt(numDocs/10); + int numDeletions = random().nextInt(numDocs / 10); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); @@ -1330,11 +1344,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); } - private void doTestSortedNumericsVsStoredFields(LongSupplier counts, LongSupplier values) throws Exception { + private void doTestSortedNumericsVsStoredFields(LongSupplier counts, LongSupplier values) + throws Exception { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf); - + // index some docs int numDocs = atLeast(300); // numDocs should be always > 256 so that in case of a codec that optimizes @@ -1343,7 +1358,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(new StringField("id", Integer.toString(i), Field.Store.NO)); - + int valueCount = (int) counts.getAsLong(); long valueArray[] = new long[valueCount]; for (int j = 0; j < valueCount; j++) { @@ -1360,9 +1375,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.commit(); } } - + // delete some docs - int numDeletions = random().nextInt(numDocs/10); + int numDeletions = random().nextInt(numDocs / 10); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); @@ -1373,7 +1388,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.forceMerge(numDocs / 256); writer.close(); - + // compare DirectoryReader ir = DirectoryReader.open(dir); TestUtil.checkReader(ir); @@ -1399,7 +1414,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); dir.close(); } - + public void testBooleanNumericsVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { @@ -1417,28 +1432,33 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes public void testByteNumericsVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - doTestNumericsVsStoredFields(1, () -> TestUtil.nextInt(random(), Byte.MIN_VALUE, Byte.MAX_VALUE)); + doTestNumericsVsStoredFields( + 1, () -> TestUtil.nextInt(random(), Byte.MIN_VALUE, Byte.MAX_VALUE)); } } public void testSparseByteNumericsVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - doTestNumericsVsStoredFields(random().nextDouble(), () -> TestUtil.nextInt(random(), Byte.MIN_VALUE, Byte.MAX_VALUE)); + doTestNumericsVsStoredFields( + random().nextDouble(), () -> TestUtil.nextInt(random(), Byte.MIN_VALUE, Byte.MAX_VALUE)); } } public void testShortNumericsVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - doTestNumericsVsStoredFields(1, () -> TestUtil.nextInt(random(), Short.MIN_VALUE, Short.MAX_VALUE)); + doTestNumericsVsStoredFields( + 1, () -> TestUtil.nextInt(random(), Short.MIN_VALUE, Short.MAX_VALUE)); } } public void testSparseShortNumericsVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - doTestNumericsVsStoredFields(random().nextDouble(), () -> TestUtil.nextInt(random(), Short.MIN_VALUE, Short.MAX_VALUE)); + doTestNumericsVsStoredFields( + random().nextDouble(), + () -> TestUtil.nextInt(random(), Short.MIN_VALUE, Short.MAX_VALUE)); } } @@ -1448,21 +1468,21 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doTestNumericsVsStoredFields(1, random()::nextInt); } } - + public void testSparseIntNumericsVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { doTestNumericsVsStoredFields(random().nextDouble(), random()::nextInt); } } - + public void testLongNumericsVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { doTestNumericsVsStoredFields(1, random()::nextLong); } } - + public void testSparseLongNumericsVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { @@ -1481,7 +1501,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(idField); doc.add(storedField); doc.add(dvField); - + // index some docs int numDocs = atLeast(300); for (int i = 0; i < numDocs; i++) { @@ -1498,14 +1518,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.commit(); } } - + // delete some docs - int numDeletions = random().nextInt(numDocs/10); + int numDeletions = random().nextInt(numDocs / 10); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); } - + // compare DirectoryReader ir = writer.getReader(); TestUtil.checkReader(ir); @@ -1526,7 +1546,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes assertEquals(DocIdSetIterator.NO_MORE_DOCS, docValues.docID()); } ir.close(); - + // compare again writer.forceMerge(1); ir = writer.getReader(); @@ -1551,7 +1571,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.close(); dir.close(); } - + public void testBinaryFixedLengthVsStoredFields() throws Exception { doTestBinaryFixedLengthVsStoredFields(1); } @@ -1564,11 +1584,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { int fixedLength = TestUtil.nextInt(random(), 0, 10); - doTestBinaryVsStoredFields(density, () -> { - byte buffer[] = new byte[fixedLength]; - random().nextBytes(buffer); - return buffer; - }); + doTestBinaryVsStoredFields( + density, + () -> { + byte buffer[] = new byte[fixedLength]; + random().nextBytes(buffer); + return buffer; + }); } } @@ -1583,16 +1605,19 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes public void doTestBinaryVariableLengthVsStoredFields(double density) throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - doTestBinaryVsStoredFields(density, () -> { - final int length = random().nextInt(10); - byte buffer[] = new byte[length]; - random().nextBytes(buffer); - return buffer; - }); + doTestBinaryVsStoredFields( + density, + () -> { + final int length = random().nextInt(10); + byte buffer[] = new byte[length]; + random().nextBytes(buffer); + return buffer; + }); } } - - protected void doTestSortedVsStoredFields(int numDocs, double density, Supplier bytes) throws Exception { + + protected void doTestSortedVsStoredFields(int numDocs, double density, Supplier bytes) + throws Exception { Directory dir = newFSDirectory(createTempDir("dvduel")); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf); @@ -1603,7 +1628,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(idField); doc.add(storedField); doc.add(dvField); - + // index some docs for (int i = 0; i < numDocs; i++) { if (random().nextDouble() > density) { @@ -1619,14 +1644,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.commit(); } } - + // delete some docs - int numDeletions = random().nextInt(numDocs/10); + int numDeletions = random().nextInt(numDocs / 10); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); } - + // compare DirectoryReader ir = writer.getReader(); TestUtil.checkReader(ir); @@ -1648,7 +1673,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes } ir.close(); writer.forceMerge(1); - + // compare again ir = writer.getReader(); TestUtil.checkReader(ir); @@ -1672,7 +1697,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.close(); dir.close(); } - + public void testSortedFixedLengthVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { @@ -1680,7 +1705,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doTestSortedVsStoredFields(atLeast(300), 1, fixedLength, fixedLength); } } - + public void testSparseSortedFixedLengthVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { @@ -1688,7 +1713,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doTestSortedVsStoredFields(atLeast(300), random().nextDouble(), fixedLength, fixedLength); } } - + public void testSortedVariableLengthVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { @@ -1703,358 +1728,362 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes } } - protected void doTestSortedVsStoredFields(int numDocs, double density, int minLength, int maxLength) throws Exception { - doTestSortedVsStoredFields(numDocs, density, () -> { - int length = TestUtil.nextInt(random(), minLength, maxLength); - byte[] buffer = new byte[length]; - random().nextBytes(buffer); - return buffer; - }); + protected void doTestSortedVsStoredFields( + int numDocs, double density, int minLength, int maxLength) throws Exception { + doTestSortedVsStoredFields( + numDocs, + density, + () -> { + int length = TestUtil.nextInt(random(), minLength, maxLength); + byte[] buffer = new byte[length]; + random().nextBytes(buffer); + return buffer; + }); } public void testSortedSetOneValue() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); iwriter.addDocument(doc); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(0, dv.nextDoc()); assertEquals(0, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); ireader.close(); directory.close(); } - + public void testSortedSetTwoFields() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); doc.add(new SortedSetDocValuesField("field2", new BytesRef("world"))); iwriter.addDocument(doc); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(0, dv.nextDoc()); - + assertEquals(0, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); - + dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field2"); assertEquals(0, dv.nextDoc()); assertEquals(0, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + bytes = dv.lookupOrd(0); assertEquals(new BytesRef("world"), bytes); - + ireader.close(); directory.close(); } - + public void testSortedSetTwoDocumentsMerged() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); iwriter.addDocument(doc); iwriter.commit(); - + doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("world"))); iwriter.addDocument(doc); iwriter.forceMerge(1); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(2, dv.getValueCount()); - + assertEquals(0, dv.nextDoc()); assertEquals(0, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); - + assertEquals(1, dv.nextDoc()); assertEquals(1, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + bytes = dv.lookupOrd(1); - assertEquals(new BytesRef("world"), bytes); + assertEquals(new BytesRef("world"), bytes); ireader.close(); directory.close(); } - + public void testSortedSetTwoValues() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); doc.add(new SortedSetDocValuesField("field", new BytesRef("world"))); iwriter.addDocument(doc); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(0, dv.nextDoc()); - + assertEquals(0, dv.nextOrd()); assertEquals(1, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); - + bytes = dv.lookupOrd(1); assertEquals(new BytesRef("world"), bytes); ireader.close(); directory.close(); } - + public void testSortedSetTwoValuesUnordered() throws IOException { Directory directory = newDirectory(); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("world"))); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); iwriter.addDocument(doc); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(0, dv.nextDoc()); - + assertEquals(0, dv.nextOrd()); assertEquals(1, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); - + bytes = dv.lookupOrd(1); assertEquals(new BytesRef("world"), bytes); ireader.close(); directory.close(); } - + public void testSortedSetThreeValuesTwoDocs() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); doc.add(new SortedSetDocValuesField("field", new BytesRef("world"))); iwriter.addDocument(doc); iwriter.commit(); - + doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); doc.add(new SortedSetDocValuesField("field", new BytesRef("beer"))); iwriter.addDocument(doc); iwriter.forceMerge(1); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(3, dv.getValueCount()); - + assertEquals(0, dv.nextDoc()); assertEquals(1, dv.nextOrd()); assertEquals(2, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + assertEquals(1, dv.nextDoc()); assertEquals(0, dv.nextOrd()); assertEquals(1, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("beer"), bytes); - + bytes = dv.lookupOrd(1); assertEquals(new BytesRef("hello"), bytes); - + bytes = dv.lookupOrd(2); assertEquals(new BytesRef("world"), bytes); ireader.close(); directory.close(); } - + public void testSortedSetTwoDocumentsLastMissing() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); iwriter.addDocument(doc); - + doc = new Document(); iwriter.addDocument(doc); iwriter.forceMerge(1); DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(1, dv.getValueCount()); - + assertEquals(0, dv.nextDoc()); assertEquals(0, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); - + ireader.close(); directory.close(); } - + public void testSortedSetTwoDocumentsLastMissingMerge() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); iwriter.addDocument(doc); iwriter.commit(); - + doc = new Document(); iwriter.addDocument(doc); iwriter.forceMerge(1); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(1, dv.getValueCount()); assertEquals(0, dv.nextDoc()); assertEquals(0, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); - + ireader.close(); directory.close(); } - + public void testSortedSetTwoDocumentsFirstMissing() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); iwriter.addDocument(doc); - + doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); iwriter.addDocument(doc); - + iwriter.forceMerge(1); DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(1, dv.getValueCount()); assertEquals(1, dv.nextDoc()); assertEquals(0, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); - + ireader.close(); directory.close(); } - + public void testSortedSetTwoDocumentsFirstMissingMerge() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); iwriter.addDocument(doc); iwriter.commit(); - + doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); iwriter.addDocument(doc); iwriter.forceMerge(1); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(1, dv.getValueCount()); assertEquals(1, dv.nextDoc()); assertEquals(0, dv.nextOrd()); assertEquals(NO_MORE_ORDS, dv.nextOrd()); - + BytesRef bytes = dv.lookupOrd(0); assertEquals(new BytesRef("hello"), bytes); - + ireader.close(); directory.close(); } - + public void testSortedSetMergeAwayAllValues() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new StringField("id", "0", Field.Store.NO)); - iwriter.addDocument(doc); + iwriter.addDocument(doc); doc = new Document(); doc.add(new StringField("id", "1", Field.Store.NO)); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); @@ -2062,38 +2091,38 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iwriter.commit(); iwriter.deleteDocuments(new Term("id", "1")); iwriter.forceMerge(1); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); - + SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(0, dv.getValueCount()); - + ireader.close(); directory.close(); } - + public void testSortedSetTermsEnum() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); - + Document doc = new Document(); doc.add(new SortedSetDocValuesField("field", new BytesRef("hello"))); doc.add(new SortedSetDocValuesField("field", new BytesRef("world"))); doc.add(new SortedSetDocValuesField("field", new BytesRef("beer"))); iwriter.addDocument(doc); - + DirectoryReader ireader = iwriter.getReader(); iwriter.close(); SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field"); assertEquals(3, dv.getValueCount()); - + TermsEnum termsEnum = dv.termsEnum(); - + // next() assertEquals("beer", termsEnum.next().utf8ToString()); assertEquals(0, termsEnum.ord()); @@ -2101,7 +2130,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes assertEquals(1, termsEnum.ord()); assertEquals("world", termsEnum.next().utf8ToString()); assertEquals(2, termsEnum.ord()); - + // seekCeil() assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("ha!"))); assertEquals("hello", termsEnum.term().utf8ToString()); @@ -2110,7 +2139,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes assertEquals("beer", termsEnum.term().utf8ToString()); assertEquals(0, termsEnum.ord()); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("zzz"))); - + // seekExact() assertTrue(termsEnum.seekExact(new BytesRef("beer"))); assertEquals("beer", termsEnum.term().utf8ToString()); @@ -2152,7 +2181,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes directory.close(); } - protected void doTestSortedSetVsStoredFields(int numDocs, int minLength, int maxLength, int maxValuesPerDoc, int maxUniqueValues) throws Exception { + protected void doTestSortedSetVsStoredFields( + int numDocs, int minLength, int maxLength, int maxValuesPerDoc, int maxUniqueValues) + throws Exception { Directory dir = newFSDirectory(createTempDir("dvduel")); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf); @@ -2196,9 +2227,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.commit(); } } - + // delete some docs - int numDeletions = random().nextInt(numDocs/10); + int numDeletions = random().nextInt(numDocs / 10); if (VERBOSE) { System.out.println("\nTEST: now delete " + numDeletions + " docs"); } @@ -2206,7 +2237,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); } - + // compare if (VERBOSE) { System.out.println("\nTEST: now get reader"); @@ -2244,7 +2275,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes System.out.println("TEST: force merge"); } writer.forceMerge(1); - + // compare again ir = writer.getReader(); TestUtil.checkReader(ir); @@ -2282,7 +2313,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes } dir.close(); } - + public void testSortedSetFixedLengthVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { @@ -2290,24 +2321,18 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doTestSortedSetVsStoredFields(atLeast(300), fixedLength, fixedLength, 16, 100); } } - + public void testSortedNumericsSingleValuedVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - doTestSortedNumericsVsStoredFields( - () -> 1, - random()::nextLong - ); + doTestSortedNumericsVsStoredFields(() -> 1, random()::nextLong); } } - + public void testSortedNumericsSingleValuedMissingVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - doTestSortedNumericsVsStoredFields( - () -> random().nextBoolean() ? 0 : 1, - random()::nextLong - ); + doTestSortedNumericsVsStoredFields(() -> random().nextBoolean() ? 0 : 1, random()::nextLong); } } @@ -2315,9 +2340,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { doTestSortedNumericsVsStoredFields( - () -> TestUtil.nextLong(random(), 0, 50), - random()::nextLong - ); + () -> TestUtil.nextLong(random(), 0, 50), random()::nextLong); } } @@ -2329,9 +2352,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { doTestSortedNumericsVsStoredFields( - () -> TestUtil.nextLong(random(), 0, 6), - () -> values[random().nextInt(values.length)] - ); + () -> TestUtil.nextLong(random(), 0, 6), () -> values[random().nextInt(values.length)]); } } @@ -2349,7 +2370,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doTestSortedSetVsStoredFields(atLeast(300), fixedLength, fixedLength, 1, 100); } } - + public void testSortedSetVariableLengthSingleValuedVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { @@ -2396,11 +2417,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes private void doTestGCDCompression(double density) throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - final long min = - (((long) random().nextInt(1 << 30)) << 32); + final long min = -(((long) random().nextInt(1 << 30)) << 32); final long mul = random().nextInt() & 0xFFFFFFFFL; - final LongSupplier longs = () -> { - return min + mul * random().nextInt(1 << 20); - }; + final LongSupplier longs = + () -> { + return min + mul * random().nextInt(1 << 20); + }; doTestNumericsVsStoredFields(density, longs); } } @@ -2418,13 +2440,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes // the GCD of 0 and MIN_VALUE is negative int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - final LongSupplier longs = () -> { - return random().nextBoolean() ? 0 : Long.MIN_VALUE; - }; + final LongSupplier longs = + () -> { + return random().nextBoolean() ? 0 : Long.MIN_VALUE; + }; doTestNumericsVsStoredFields(1, longs); } } - + public void testTwoNumbersOneMissing() throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); @@ -2439,7 +2462,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iw.addDocument(doc); iw.forceMerge(1); iw.close(); - + IndexReader ir = DirectoryReader.open(directory); assertEquals(1, ir.leaves().size()); LeafReader ar = ir.leaves().get(0).reader(); @@ -2450,7 +2473,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); directory.close(); } - + public void testTwoNumbersOneMissingWithMerging() throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); @@ -2466,7 +2489,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iw.addDocument(doc); iw.forceMerge(1); iw.close(); - + IndexReader ir = DirectoryReader.open(directory); assertEquals(1, ir.leaves().size()); LeafReader ar = ir.leaves().get(0).reader(); @@ -2477,7 +2500,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); directory.close(); } - + public void testThreeNumbersOneMissingWithMerging() throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); @@ -2497,7 +2520,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iw.addDocument(doc); iw.forceMerge(1); iw.close(); - + IndexReader ir = DirectoryReader.open(directory); assertEquals(1, ir.leaves().size()); LeafReader ar = ir.leaves().get(0).reader(); @@ -2509,7 +2532,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); directory.close(); } - + public void testTwoBytesOneMissing() throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); @@ -2524,7 +2547,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iw.addDocument(doc); iw.forceMerge(1); iw.close(); - + IndexReader ir = DirectoryReader.open(directory); assertEquals(1, ir.leaves().size()); LeafReader ar = ir.leaves().get(0).reader(); @@ -2535,7 +2558,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); directory.close(); } - + public void testTwoBytesOneMissingWithMerging() throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); @@ -2551,7 +2574,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iw.addDocument(doc); iw.forceMerge(1); iw.close(); - + IndexReader ir = DirectoryReader.open(directory); assertEquals(1, ir.leaves().size()); LeafReader ar = ir.leaves().get(0).reader(); @@ -2562,7 +2585,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); directory.close(); } - + public void testThreeBytesOneMissingWithMerging() throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); @@ -2582,7 +2605,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes iw.addDocument(doc); iw.forceMerge(1); iw.close(); - + IndexReader ir = DirectoryReader.open(directory); assertEquals(1, ir.leaves().size()); LeafReader ar = ir.leaves().get(0).reader(); @@ -2595,7 +2618,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); directory.close(); } - + /** Tests dv against stored fields with threads (binary/numeric/sorted, no missing) */ public void testThreads() throws Exception { Directory dir = newDirectory(); @@ -2614,7 +2637,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes doc.add(dvSortedField); doc.add(storedNumericField); doc.add(dvNumericField); - + // index some docs int numDocs = atLeast(300); for (int i = 0; i < numDocs; i++) { @@ -2633,51 +2656,52 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.commit(); } } - + // delete some docs - int numDeletions = random().nextInt(numDocs/10); + int numDeletions = random().nextInt(numDocs / 10); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); } writer.close(); - + // compare final DirectoryReader ir = DirectoryReader.open(dir); int numThreads = TestUtil.nextInt(random(), 2, 7); Thread threads[] = new Thread[numThreads]; final CountDownLatch startingGun = new CountDownLatch(1); - + for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - startingGun.await(); - for (LeafReaderContext context : ir.leaves()) { - LeafReader r = context.reader(); - BinaryDocValues binaries = r.getBinaryDocValues("dvBin"); - SortedDocValues sorted = r.getSortedDocValues("dvSorted"); - NumericDocValues numerics = r.getNumericDocValues("dvNum"); - for (int j = 0; j < r.maxDoc(); j++) { - BytesRef binaryValue = r.document(j).getBinaryValue("storedBin"); - assertEquals(j, binaries.nextDoc()); - BytesRef scratch = binaries.binaryValue(); - assertEquals(binaryValue, scratch); - assertEquals(j, sorted.nextDoc()); - scratch = sorted.binaryValue(); - assertEquals(binaryValue, scratch); - String expected = r.document(j).get("storedNum"); - assertEquals(j, numerics.nextDoc()); - assertEquals(Long.parseLong(expected), numerics.longValue()); + threads[i] = + new Thread() { + @Override + public void run() { + try { + startingGun.await(); + for (LeafReaderContext context : ir.leaves()) { + LeafReader r = context.reader(); + BinaryDocValues binaries = r.getBinaryDocValues("dvBin"); + SortedDocValues sorted = r.getSortedDocValues("dvSorted"); + NumericDocValues numerics = r.getNumericDocValues("dvNum"); + for (int j = 0; j < r.maxDoc(); j++) { + BytesRef binaryValue = r.document(j).getBinaryValue("storedBin"); + assertEquals(j, binaries.nextDoc()); + BytesRef scratch = binaries.binaryValue(); + assertEquals(binaryValue, scratch); + assertEquals(j, sorted.nextDoc()); + scratch = sorted.binaryValue(); + assertEquals(binaryValue, scratch); + String expected = r.document(j).get("storedNum"); + assertEquals(j, numerics.nextDoc()); + assertEquals(Long.parseLong(expected), numerics.longValue()); + } + } + TestUtil.checkReader(ir); + } catch (Exception e) { + throw new RuntimeException(e); } } - TestUtil.checkReader(ir); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; + }; threads[i].start(); } startingGun.countDown(); @@ -2687,7 +2711,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); dir.close(); } - + /** Tests dv against stored fields with threads (all types + missing) */ @Nightly public void testThreads2() throws Exception { @@ -2700,7 +2724,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes Field dvSortedField = new SortedDocValuesField("dvSorted", new BytesRef()); Field storedNumericField = new StoredField("storedNum", ""); Field dvNumericField = new NumericDocValuesField("dvNum", 0); - + // index some docs int numDocs = TestUtil.nextInt(random(), 1025, 2047); for (int i = 0; i < numDocs; i++) { @@ -2748,86 +2772,88 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes writer.commit(); } } - + // delete some docs - int numDeletions = random().nextInt(numDocs/10); + int numDeletions = random().nextInt(numDocs / 10); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); } writer.close(); - + // compare final DirectoryReader ir = DirectoryReader.open(dir); int numThreads = TestUtil.nextInt(random(), 2, 7); Thread threads[] = new Thread[numThreads]; final CountDownLatch startingGun = new CountDownLatch(1); - + for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - startingGun.await(); - for (LeafReaderContext context : ir.leaves()) { - LeafReader r = context.reader(); - BinaryDocValues binaries = r.getBinaryDocValues("dvBin"); - SortedDocValues sorted = r.getSortedDocValues("dvSorted"); - NumericDocValues numerics = r.getNumericDocValues("dvNum"); - SortedSetDocValues sortedSet = r.getSortedSetDocValues("dvSortedSet"); - SortedNumericDocValues sortedNumeric = r.getSortedNumericDocValues("dvSortedNumeric"); - for (int j = 0; j < r.maxDoc(); j++) { - BytesRef binaryValue = r.document(j).getBinaryValue("storedBin"); - if (binaryValue != null) { - if (binaries != null) { - assertEquals(j, binaries.nextDoc()); - BytesRef scratch = binaries.binaryValue(); - assertEquals(binaryValue, scratch); - assertEquals(j, sorted.nextDoc()); - scratch = sorted.binaryValue(); - assertEquals(binaryValue, scratch); - } - } - - String number = r.document(j).get("storedNum"); - if (number != null) { - if (numerics != null) { - assertEquals(j, numerics.advance(j)); - assertEquals(Long.parseLong(number), numerics.longValue()); - } - } - - String values[] = r.document(j).getValues("storedSortedSet"); - if (values.length > 0) { - assertNotNull(sortedSet); - assertEquals(j, sortedSet.nextDoc()); - for (int k = 0; k < values.length; k++) { - long ord = sortedSet.nextOrd(); - assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS); - BytesRef value = sortedSet.lookupOrd(ord); - assertEquals(values[k], value.utf8ToString()); - } - assertEquals(SortedSetDocValues.NO_MORE_ORDS, sortedSet.nextOrd()); - } - - String numValues[] = r.document(j).getValues("storedSortedNumeric"); - if (numValues.length > 0) { - assertNotNull(sortedNumeric); - assertEquals(j, sortedNumeric.nextDoc()); - assertEquals(numValues.length, sortedNumeric.docValueCount()); - for (int k = 0; k < numValues.length; k++) { - long v = sortedNumeric.nextValue(); - assertEquals(numValues[k], Long.toString(v)); + threads[i] = + new Thread() { + @Override + public void run() { + try { + startingGun.await(); + for (LeafReaderContext context : ir.leaves()) { + LeafReader r = context.reader(); + BinaryDocValues binaries = r.getBinaryDocValues("dvBin"); + SortedDocValues sorted = r.getSortedDocValues("dvSorted"); + NumericDocValues numerics = r.getNumericDocValues("dvNum"); + SortedSetDocValues sortedSet = r.getSortedSetDocValues("dvSortedSet"); + SortedNumericDocValues sortedNumeric = + r.getSortedNumericDocValues("dvSortedNumeric"); + for (int j = 0; j < r.maxDoc(); j++) { + BytesRef binaryValue = r.document(j).getBinaryValue("storedBin"); + if (binaryValue != null) { + if (binaries != null) { + assertEquals(j, binaries.nextDoc()); + BytesRef scratch = binaries.binaryValue(); + assertEquals(binaryValue, scratch); + assertEquals(j, sorted.nextDoc()); + scratch = sorted.binaryValue(); + assertEquals(binaryValue, scratch); + } + } + + String number = r.document(j).get("storedNum"); + if (number != null) { + if (numerics != null) { + assertEquals(j, numerics.advance(j)); + assertEquals(Long.parseLong(number), numerics.longValue()); + } + } + + String values[] = r.document(j).getValues("storedSortedSet"); + if (values.length > 0) { + assertNotNull(sortedSet); + assertEquals(j, sortedSet.nextDoc()); + for (int k = 0; k < values.length; k++) { + long ord = sortedSet.nextOrd(); + assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS); + BytesRef value = sortedSet.lookupOrd(ord); + assertEquals(values[k], value.utf8ToString()); + } + assertEquals(SortedSetDocValues.NO_MORE_ORDS, sortedSet.nextOrd()); + } + + String numValues[] = r.document(j).getValues("storedSortedNumeric"); + if (numValues.length > 0) { + assertNotNull(sortedNumeric); + assertEquals(j, sortedNumeric.nextDoc()); + assertEquals(numValues.length, sortedNumeric.docValueCount()); + for (int k = 0; k < numValues.length; k++) { + long v = sortedNumeric.nextValue(); + assertEquals(numValues[k], Long.toString(v)); + } + } } } + TestUtil.checkReader(ir); + } catch (Exception e) { + throw new RuntimeException(e); } } - TestUtil.checkReader(ir); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; + }; threads[i].start(); } startingGun.countDown(); @@ -2837,62 +2863,73 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes ir.close(); dir.close(); } - + @Nightly public void testThreads3() throws Exception { Directory dir = newFSDirectory(createTempDir()); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf); - + int numSortedSets = random().nextInt(21); int numBinaries = random().nextInt(21); int numSortedNums = random().nextInt(21); - + int numDocs = TestUtil.nextInt(random(), 2025, 2047); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - + for (int j = 0; j < numSortedSets; j++) { - doc.add(new SortedSetDocValuesField("ss" + j, new BytesRef(TestUtil.randomSimpleString(random())))); - doc.add(new SortedSetDocValuesField("ss" + j, new BytesRef(TestUtil.randomSimpleString(random())))); + doc.add( + new SortedSetDocValuesField( + "ss" + j, new BytesRef(TestUtil.randomSimpleString(random())))); + doc.add( + new SortedSetDocValuesField( + "ss" + j, new BytesRef(TestUtil.randomSimpleString(random())))); } - + for (int j = 0; j < numBinaries; j++) { - doc.add(new BinaryDocValuesField("b" + j, new BytesRef(TestUtil.randomSimpleString(random())))); + doc.add( + new BinaryDocValuesField("b" + j, new BytesRef(TestUtil.randomSimpleString(random())))); } - + for (int j = 0; j < numSortedNums; j++) { - doc.add(new SortedNumericDocValuesField("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE))); - doc.add(new SortedNumericDocValuesField("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE))); + doc.add( + new SortedNumericDocValuesField( + "sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE))); + doc.add( + new SortedNumericDocValuesField( + "sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE))); } writer.addDocument(doc); } writer.close(); - + // now check with threads for (int i = 0; i < 10; i++) { final DirectoryReader r = DirectoryReader.open(dir); final CountDownLatch startingGun = new CountDownLatch(1); Thread threads[] = new Thread[TestUtil.nextInt(random(), 4, 10)]; for (int tid = 0; tid < threads.length; tid++) { - threads[tid] = new Thread() { - @Override - public void run() { - try { - ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); - PrintStream infoStream = new PrintStream(bos, false, IOUtils.UTF_8); - startingGun.await(); - for (LeafReaderContext leaf : r.leaves()) { - DocValuesStatus status = CheckIndex.testDocValues((SegmentReader)leaf.reader(), infoStream, true); - if (status.error != null) { - throw status.error; + threads[tid] = + new Thread() { + @Override + public void run() { + try { + ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); + PrintStream infoStream = new PrintStream(bos, false, IOUtils.UTF_8); + startingGun.await(); + for (LeafReaderContext leaf : r.leaves()) { + DocValuesStatus status = + CheckIndex.testDocValues((SegmentReader) leaf.reader(), infoStream, true); + if (status.error != null) { + throw status.error; + } + } + } catch (Throwable e) { + throw new RuntimeException(e); } } - } catch (Throwable e) { - throw new RuntimeException(e); - } - } - }; + }; } for (int tid = 0; tid < threads.length; tid++) { threads[tid].start(); @@ -2911,16 +2948,16 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes public void testEmptyBinaryValueOnPageSizes() throws Exception { // Test larger and larger power-of-two sized values, // followed by empty string value: - for(int i=0;i<20;i++) { + for (int i = 0; i < 20; i++) { if (i > 14 && codecAcceptsHugeBinaryValues("field") == false) { break; } Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); BytesRef bytes = new BytesRef(); - bytes.bytes = new byte[1< missingSet = new HashSet<>(); - for(int i=0;i sparseChance) { @@ -3501,15 +3542,15 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes // Now search the index: IndexReader r = w.getReader(); BitSet missing = new FixedBitSet(r.maxDoc()); - for(int docID=0;docID attributes = infos2.fieldInfo("field").attributes(); + Map attributes = infos2.fieldInfo("field").attributes(); // shouldn't be able to modify attributes - expectThrows(UnsupportedOperationException.class, () -> { - attributes.put("bogus", "bogus"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + attributes.put("bogus", "bogus"); + }); dir.close(); } - - /** - * Test field infos write that hits exception immediately on open. - * make sure we get our exception back, no file handle leaks, etc. + + /** + * Test field infos write that hits exception immediately on open. make sure we get our exception + * back, no file handle leaks, etc. */ public void testExceptionOnCreateOutput() throws Exception { - Failure fail = new Failure() { - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail && callStackContainsAnyOf("createOutput")) { - throw new FakeIOException(); - } - } - }; - + Failure fail = + new Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail && callStackContainsAnyOf("createOutput")) { + throw new FakeIOException(); + } + } + }; + MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); @@ -117,30 +117,33 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes fi.setIndexOptions(TextField.TYPE_STORED.indexOptions()); addAttributes(fi); FieldInfos infos = builder.finish(); - + fail.setDoFail(); - expectThrows(FakeIOException.class, () -> { - codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); - }); + expectThrows( + FakeIOException.class, + () -> { + codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); + }); fail.clearDoFail(); - + dir.close(); } - - /** - * Test field infos write that hits exception on close. - * make sure we get our exception back, no file handle leaks, etc. + + /** + * Test field infos write that hits exception on close. make sure we get our exception back, no + * file handle leaks, etc. */ public void testExceptionOnCloseOutput() throws Exception { - Failure fail = new Failure() { - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail && callStackContainsAnyOf("close")) { - throw new FakeIOException(); - } - } - }; - + Failure fail = + new Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail && callStackContainsAnyOf("close")) { + throw new FakeIOException(); + } + } + }; + MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); @@ -150,30 +153,33 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes fi.setIndexOptions(TextField.TYPE_STORED.indexOptions()); addAttributes(fi); FieldInfos infos = builder.finish(); - + fail.setDoFail(); - expectThrows(FakeIOException.class, () -> { - codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); - }); + expectThrows( + FakeIOException.class, + () -> { + codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); + }); fail.clearDoFail(); - + dir.close(); } - - /** - * Test field infos read that hits exception immediately on open. - * make sure we get our exception back, no file handle leaks, etc. + + /** + * Test field infos read that hits exception immediately on open. make sure we get our exception + * back, no file handle leaks, etc. */ public void testExceptionOnOpenInput() throws Exception { - Failure fail = new Failure() { - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail && callStackContainsAnyOf("openInput")) { - throw new FakeIOException(); - } - } - }; - + Failure fail = + new Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail && callStackContainsAnyOf("openInput")) { + throw new FakeIOException(); + } + } + }; + MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); @@ -184,30 +190,33 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes addAttributes(fi); FieldInfos infos = builder.finish(); codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); - + fail.setDoFail(); - expectThrows(FakeIOException.class, () -> { - codec.fieldInfosFormat().read(dir, segmentInfo, "", IOContext.DEFAULT); - }); + expectThrows( + FakeIOException.class, + () -> { + codec.fieldInfosFormat().read(dir, segmentInfo, "", IOContext.DEFAULT); + }); fail.clearDoFail(); - + dir.close(); } - - /** - * Test field infos read that hits exception on close. - * make sure we get our exception back, no file handle leaks, etc. + + /** + * Test field infos read that hits exception on close. make sure we get our exception back, no + * file handle leaks, etc. */ public void testExceptionOnCloseInput() throws Exception { - Failure fail = new Failure() { - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail && callStackContainsAnyOf("close")) { - throw new FakeIOException(); - } - } - }; - + Failure fail = + new Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail && callStackContainsAnyOf("close")) { + throw new FakeIOException(); + } + } + }; + MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); @@ -218,24 +227,26 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes addAttributes(fi); FieldInfos infos = builder.finish(); codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); - + fail.setDoFail(); - expectThrows(FakeIOException.class, () -> { - codec.fieldInfosFormat().read(dir, segmentInfo, "", IOContext.DEFAULT); - }); + expectThrows( + FakeIOException.class, + () -> { + codec.fieldInfosFormat().read(dir, segmentInfo, "", IOContext.DEFAULT); + }); fail.clearDoFail(); - + dir.close(); } - + // TODO: more tests - + /** Test field infos read/write with random fields, with different values. */ public void testRandom() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); SegmentInfo segmentInfo = newSegmentInfo(dir, "_123"); - + // generate a bunch of fields int numFields = atLeast(2000); Set fieldNames = new HashSet<>(); @@ -249,12 +260,13 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes IndexOptions indexOptions = fieldType.indexOptions(); if (indexOptions != IndexOptions.NONE) { fi.setIndexOptions(indexOptions); - if (fieldType.omitNorms()) { + if (fieldType.omitNorms()) { fi.setOmitsNorms(); } } fi.setDocValuesType(fieldType.docValuesType()); - if (fieldType.indexOptions() != IndexOptions.NONE && fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) { + if (fieldType.indexOptions() != IndexOptions.NONE + && fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) { if (random().nextBoolean()) { fi.setStorePayloads(); } @@ -267,15 +279,15 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes assertEquals(infos, infos2); dir.close(); } - + private final IndexableFieldType randomFieldType(Random r) { FieldType type = new FieldType(); - + if (r.nextBoolean()) { IndexOptions values[] = IndexOptions.values(); type.setIndexOptions(values[r.nextInt(values.length)]); type.setOmitNorms(r.nextBoolean()); - + if (r.nextBoolean()) { type.setStoreTermVectors(true); if (type.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) { @@ -287,32 +299,29 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes } } } - + if (r.nextBoolean()) { DocValuesType values[] = getDocValuesTypes(); type.setDocValuesType(values[r.nextInt(values.length)]); } - + return type; } - - /** - * Hook to add any codec attributes to fieldinfo - * instances added in this test. - */ - protected void addAttributes(FieldInfo fi) { - } - - /** - * Docvalues types to test. - * @deprecated only for Only available to ancient codecs can - * limit this to the subset of types they support. + + /** Hook to add any codec attributes to fieldinfo instances added in this test. */ + protected void addAttributes(FieldInfo fi) {} + + /** + * Docvalues types to test. + * + * @deprecated only for Only available to ancient codecs can limit this to the subset of types + * they support. */ @Deprecated protected DocValuesType[] getDocValuesTypes() { return DocValuesType.values(); } - + /** equality for entirety of fieldinfos */ protected void assertEquals(FieldInfos expected, FieldInfos actual) { assertEquals(expected.size(), actual.size()); @@ -322,7 +331,7 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes assertEquals(expectedField, actualField); } } - + /** equality for two individual fieldinfo objects */ protected void assertEquals(FieldInfo expected, FieldInfo actual) { assertEquals(expected.number, actual.number); @@ -335,13 +344,24 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes assertEquals(expected.omitsNorms(), actual.omitsNorms()); assertEquals(expected.getDocValuesGen(), actual.getDocValuesGen()); } - + /** Returns a new fake segment */ protected static SegmentInfo newSegmentInfo(Directory dir, String name) { Version minVersion = random().nextBoolean() ? null : Version.LATEST; - return new SegmentInfo(dir, Version.LATEST, minVersion, name, 10000, false, Codec.getDefault(), Collections.emptyMap(), StringHelper.randomId(), Collections.emptyMap(), null); + return new SegmentInfo( + dir, + Version.LATEST, + minVersion, + name, + 10000, + false, + Codec.getDefault(), + Collections.emptyMap(), + StringHelper.randomId(), + Collections.emptyMap(), + null); } - + @Override protected void addRandomFields(Document doc) { doc.add(new StoredField("foobar", TestUtil.randomSimpleString(random()))); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java index 5a5e4fb4fb1..74fb4bf2c9d 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java @@ -34,7 +34,6 @@ import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.function.IntConsumer; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; @@ -77,13 +76,12 @@ import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; -/** - * Common tests to all index formats. - */ +/** Common tests to all index formats. */ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { // metadata or Directory-level objects - private static final Set> EXCLUDED_CLASSES = Collections.newSetFromMap(new IdentityHashMap,Boolean>()); + private static final Set> EXCLUDED_CLASSES = + Collections.newSetFromMap(new IdentityHashMap, Boolean>()); static { // Directory objects, don't take into account eg. the NIO buffers @@ -118,7 +116,11 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { this.root = root; } - public long accumulateObject(Object o, long shallowSize, Map fieldValues, Collection queue) { + public long accumulateObject( + Object o, + long shallowSize, + Map fieldValues, + Collection queue) { for (Class clazz = o.getClass(); clazz != null; clazz = clazz.getSuperclass()) { if (EXCLUDED_CLASSES.contains(clazz) && o != root) { return 0; @@ -134,7 +136,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { queue.addAll((Collection) o); v = (long) coll.size() * RamUsageEstimator.NUM_BYTES_OBJECT_REF; } else if (o instanceof Map) { - final Map map = (Map) o; + final Map map = (Map) o; queue.addAll(map.keySet()); queue.addAll(map.values()); v = 2L * map.size() * RamUsageEstimator.NUM_BYTES_OBJECT_REF; @@ -142,7 +144,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { List references = new ArrayList<>(); v = super.accumulateObject(o, shallowSize, fieldValues, references); for (Object r : references) { - // AssertingCodec adds Thread references to make sure objects are consumed in the right thread + // AssertingCodec adds Thread references to make sure objects are consumed in the right + // thread if (r instanceof Thread == false) { queue.add(r); } @@ -152,14 +155,13 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { } @Override - public long accumulateArray(Object array, long shallowSize, - List values, Collection queue) { + public long accumulateArray( + Object array, long shallowSize, List values, Collection queue) { long v = super.accumulateArray(array, shallowSize, values, queue); // System.out.println(array.getClass() + "=" + v); return v; } - - }; + } /** Returns the codec to run tests against */ protected abstract Codec getCodec(); @@ -172,7 +174,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { /** Set the created version of the given {@link Directory} and return it. */ protected final D applyCreatedVersionMajor(D d) throws IOException { if (SegmentInfos.getLastCommitGeneration(d) != -1) { - throw new IllegalArgumentException("Cannot set the created version on a Directory that already has segments"); + throw new IllegalArgumentException( + "Cannot set the created version on a Directory that already has segments"); } if (getCreatedVersionMajor() != Version.LATEST.major || random().nextBoolean()) { new SegmentInfos(getCreatedVersionMajor()).commit(d); @@ -202,7 +205,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { for (String file : d.listAll()) { if (IndexFileNames.CODEC_FILE_PATTERN.matcher(file).matches()) { final String ext = IndexFileNames.getExtension(file); - final long previousLength = bytesUsedByExtension.containsKey(ext) ? bytesUsedByExtension.get(ext) : 0; + final long previousLength = + bytesUsedByExtension.containsKey(ext) ? bytesUsedByExtension.get(ext) : 0; bytesUsedByExtension.put(ext, previousLength + d.fileLength(file)); } } @@ -212,20 +216,26 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { } /** - * Return the list of extensions that should be excluded from byte counts when - * comparing indices that store the same content. + * Return the list of extensions that should be excluded from byte counts when comparing indices + * that store the same content. */ protected Collection excludedExtensionsFromByteCounts() { - return new HashSet(Arrays.asList(new String[] { - // segment infos store various pieces of information that don't solely depend - // on the content of the index in the diagnostics (such as a timestamp) so we - // exclude this file from the bytes counts - "si", - // lock files are 0 bytes (one directory in the test could be RAMDir, the other FSDir) - "lock" })); + return new HashSet( + Arrays.asList( + new String[] { + // segment infos store various pieces of information that don't solely depend + // on the content of the index in the diagnostics (such as a timestamp) so we + // exclude this file from the bytes counts + "si", + // lock files are 0 bytes (one directory in the test could be RAMDir, the other FSDir) + "lock" + })); } - /** The purpose of this test is to make sure that bulk merge doesn't accumulate useless data over runs. */ + /** + * The purpose of this test is to make sure that bulk merge doesn't accumulate useless data over + * runs. + */ public void testMergeStability() throws Exception { assumeTrue("merge is not stable", mergeIsStable()); Directory dir = applyCreatedVersionMajor(newDirectory()); @@ -234,7 +244,10 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { // do not use RIW which will change things up! MergePolicy mp = newTieredMergePolicy(); mp.setNoCFSRatio(0); - IndexWriterConfig cfg = new IndexWriterConfig(new MockAnalyzer(random())).setUseCompoundFile(false).setMergePolicy(mp); + IndexWriterConfig cfg = + new IndexWriterConfig(new MockAnalyzer(random())) + .setUseCompoundFile(false) + .setMergePolicy(mp); IndexWriter w = new IndexWriter(dir, cfg); final int numDocs = atLeast(500); for (int i = 0; i < numDocs; ++i) { @@ -250,7 +263,10 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { Directory dir2 = applyCreatedVersionMajor(newDirectory()); mp = newTieredMergePolicy(); mp.setNoCFSRatio(0); - cfg = new IndexWriterConfig(new MockAnalyzer(random())).setUseCompoundFile(false).setMergePolicy(mp); + cfg = + new IndexWriterConfig(new MockAnalyzer(random())) + .setUseCompoundFile(false) + .setMergePolicy(mp); w = new IndexWriter(dir2, cfg); TestUtil.addIndexesSlowly(w, reader); @@ -314,14 +330,18 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { final long absoluteError = Math.abs(measuredBytes - reportedBytes); final double relativeError = (double) absoluteError / measuredBytes; - final String message = String.format(Locale.ROOT, - "RamUsageTester reports %d bytes but ramBytesUsed() returned %d (%.1f error). " + - " [Measured: %d, %d. Reported: %d, %d]", - measuredBytes, - reportedBytes, - (100 * relativeError), - act1, act2, - reported1, reported2); + final String message = + String.format( + Locale.ROOT, + "RamUsageTester reports %d bytes but ramBytesUsed() returned %d (%.1f error). " + + " [Measured: %d, %d. Reported: %d, %d]", + measuredBytes, + reportedBytes, + (100 * relativeError), + act1, + act2, + reported1, + reported2); assertTrue(message, relativeError < 0.20d || absoluteError < 1000); @@ -329,12 +349,13 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { reader2.close(); dir.close(); } - + /** Calls close multiple times on closeable codec apis */ public void testMultiClose() throws IOException { // first make a one doc index Directory oneDocIndex = applyCreatedVersionMajor(newDirectory()); - IndexWriter iw = new IndexWriter(oneDocIndex, new IndexWriterConfig(new MockAnalyzer(random()))); + IndexWriter iw = + new IndexWriter(oneDocIndex, new IndexWriterConfig(new MockAnalyzer(random()))); Document oneDoc = new Document(); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.setStoreTermVectors(true); @@ -344,69 +365,97 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { iw.addDocument(oneDoc); LeafReader oneDocReader = getOnlyLeafReader(DirectoryReader.open(iw)); iw.close(); - + // now feed to codec apis manually - // we use FSDir, things like ramdir are not guaranteed to cause fails if you write to them after close(), etc + // we use FSDir, things like ramdir are not guaranteed to cause fails if you write to them after + // close(), etc Directory dir = newFSDirectory(createTempDir("justSoYouGetSomeChannelErrors")); Codec codec = getCodec(); - - SegmentInfo segmentInfo = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "_0", 1, false, codec, Collections.emptyMap(), StringHelper.randomId(), Collections.emptyMap(), null); + + SegmentInfo segmentInfo = + new SegmentInfo( + dir, + Version.LATEST, + Version.LATEST, + "_0", + 1, + false, + codec, + Collections.emptyMap(), + StringHelper.randomId(), + Collections.emptyMap(), + null); FieldInfo proto = oneDocReader.getFieldInfos().fieldInfo("field"); - FieldInfo field = new FieldInfo(proto.name, proto.number, proto.hasVectors(), proto.omitsNorms(), proto.hasPayloads(), - proto.getIndexOptions(), proto.getDocValuesType(), proto.getDocValuesGen(), new HashMap<>(), - proto.getPointDimensionCount(), proto.getPointIndexDimensionCount(), proto.getPointNumBytes(), - proto.getVectorDimension(), proto.getVectorSearchStrategy(), proto.isSoftDeletesField()); + FieldInfo field = + new FieldInfo( + proto.name, + proto.number, + proto.hasVectors(), + proto.omitsNorms(), + proto.hasPayloads(), + proto.getIndexOptions(), + proto.getDocValuesType(), + proto.getDocValuesGen(), + new HashMap<>(), + proto.getPointDimensionCount(), + proto.getPointIndexDimensionCount(), + proto.getPointNumBytes(), + proto.getVectorDimension(), + proto.getVectorSearchStrategy(), + proto.isSoftDeletesField()); - FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { field } ); + FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] {field}); + + SegmentWriteState writeState = + new SegmentWriteState( + null, dir, segmentInfo, fieldInfos, null, new IOContext(new FlushInfo(1, 20))); - SegmentWriteState writeState = new SegmentWriteState(null, dir, - segmentInfo, fieldInfos, - null, new IOContext(new FlushInfo(1, 20))); - SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, IOContext.READ); // PostingsFormat - NormsProducer fakeNorms = new NormsProducer() { + NormsProducer fakeNorms = + new NormsProducer() { - @Override - public void close() throws IOException {} + @Override + public void close() throws IOException {} - @Override - public long ramBytesUsed() { - return 0; - } + @Override + public long ramBytesUsed() { + return 0; + } - @Override - public NumericDocValues getNorms(FieldInfo field) throws IOException { - if (field.hasNorms() == false) { - return null; - } - return oneDocReader.getNormValues(field.name); - } + @Override + public NumericDocValues getNorms(FieldInfo field) throws IOException { + if (field.hasNorms() == false) { + return null; + } + return oneDocReader.getNormValues(field.name); + } - @Override - public void checkIntegrity() throws IOException {} - - }; + @Override + public void checkIntegrity() throws IOException {} + }; try (FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(writeState)) { - final Fields fields = new Fields() { - TreeSet indexedFields = new TreeSet<>(FieldInfos.getIndexedFields(oneDocReader)); + final Fields fields = + new Fields() { + TreeSet indexedFields = + new TreeSet<>(FieldInfos.getIndexedFields(oneDocReader)); - @Override - public Iterator iterator() { - return indexedFields.iterator(); - } + @Override + public Iterator iterator() { + return indexedFields.iterator(); + } - @Override - public Terms terms(String field) throws IOException { - return oneDocReader.terms(field); - } + @Override + public Terms terms(String field) throws IOException { + return oneDocReader.terms(field); + } - @Override - public int size() { - return indexedFields.size(); - } - }; + @Override + public int size() { + return indexedFields.size(); + } + }; consumer.write(fields, fakeNorms); IOUtils.close(consumer); IOUtils.close(consumer); @@ -415,58 +464,59 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { IOUtils.close(producer); IOUtils.close(producer); } - + // DocValuesFormat try (DocValuesConsumer consumer = codec.docValuesFormat().fieldsConsumer(writeState)) { - consumer.addNumericField(field, - new EmptyDocValuesProducer() { - @Override - public NumericDocValues getNumeric(FieldInfo field) { - return new NumericDocValues() { - int docID = -1; - - @Override - public int docID() { - return docID; - } - - @Override - public int nextDoc() { - docID++; - if (docID == 1) { - docID = NO_MORE_DOCS; - } - return docID; - } + consumer.addNumericField( + field, + new EmptyDocValuesProducer() { + @Override + public NumericDocValues getNumeric(FieldInfo field) { + return new NumericDocValues() { + int docID = -1; - @Override - public int advance(int target) { - if (docID <= 0 && target == 0) { - docID = 0; - } else { - docID = NO_MORE_DOCS; - } - return docID; - } + @Override + public int docID() { + return docID; + } - @Override - public boolean advanceExact(int target) throws IOException { - docID = target; - return target == 0; - } + @Override + public int nextDoc() { + docID++; + if (docID == 1) { + docID = NO_MORE_DOCS; + } + return docID; + } - @Override - public long cost() { - return 1; - } + @Override + public int advance(int target) { + if (docID <= 0 && target == 0) { + docID = 0; + } else { + docID = NO_MORE_DOCS; + } + return docID; + } - @Override - public long longValue() { - return 5; - } - }; - } - }); + @Override + public boolean advanceExact(int target) throws IOException { + docID = target; + return target == 0; + } + + @Override + public long cost() { + return 1; + } + + @Override + public long longValue() { + return 5; + } + }; + } + }); IOUtils.close(consumer); IOUtils.close(consumer); } @@ -474,71 +524,70 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { IOUtils.close(producer); IOUtils.close(producer); } - + // NormsFormat try (NormsConsumer consumer = codec.normsFormat().normsConsumer(writeState)) { - consumer.addNormsField(field, - new NormsProducer() { - @Override - public NumericDocValues getNorms(FieldInfo field) { - return new NumericDocValues() { - int docID = -1; - - @Override - public int docID() { - return docID; - } - - @Override - public int nextDoc() { - docID++; - if (docID == 1) { - docID = NO_MORE_DOCS; - } - return docID; - } + consumer.addNormsField( + field, + new NormsProducer() { + @Override + public NumericDocValues getNorms(FieldInfo field) { + return new NumericDocValues() { + int docID = -1; - @Override - public int advance(int target) { - if (docID <= 0 && target == 0) { - docID = 0; - } else { - docID = NO_MORE_DOCS; - } - return docID; - } + @Override + public int docID() { + return docID; + } - @Override - public boolean advanceExact(int target) throws IOException { - docID = target; - return target == 0; - } + @Override + public int nextDoc() { + docID++; + if (docID == 1) { + docID = NO_MORE_DOCS; + } + return docID; + } - @Override - public long cost() { - return 1; - } + @Override + public int advance(int target) { + if (docID <= 0 && target == 0) { + docID = 0; + } else { + docID = NO_MORE_DOCS; + } + return docID; + } - @Override - public long longValue() { - return 5; - } - }; - } + @Override + public boolean advanceExact(int target) throws IOException { + docID = target; + return target == 0; + } - @Override - public void checkIntegrity() { - } + @Override + public long cost() { + return 1; + } - @Override - public void close() { - } + @Override + public long longValue() { + return 5; + } + }; + } - @Override - public long ramBytesUsed() { - return 0; - } - }); + @Override + public void checkIntegrity() {} + + @Override + public void close() {} + + @Override + public long ramBytesUsed() { + return 0; + } + }); IOUtils.close(consumer); IOUtils.close(consumer); } @@ -546,9 +595,10 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { IOUtils.close(producer); IOUtils.close(producer); } - + // TermVectorsFormat - try (TermVectorsWriter consumer = codec.termVectorsFormat().vectorsWriter(dir, segmentInfo, writeState.context)) { + try (TermVectorsWriter consumer = + codec.termVectorsFormat().vectorsWriter(dir, segmentInfo, writeState.context)) { consumer.startDocument(1); consumer.startField(field, 1, false, false, false); consumer.startTerm(new BytesRef("testing"), 2); @@ -559,13 +609,15 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { IOUtils.close(consumer); IOUtils.close(consumer); } - try (TermVectorsReader producer = codec.termVectorsFormat().vectorsReader(dir, segmentInfo, fieldInfos, readState.context)) { + try (TermVectorsReader producer = + codec.termVectorsFormat().vectorsReader(dir, segmentInfo, fieldInfos, readState.context)) { IOUtils.close(producer); IOUtils.close(producer); } - + // StoredFieldsFormat - try (StoredFieldsWriter consumer = codec.storedFieldsFormat().fieldsWriter(dir, segmentInfo, writeState.context)) { + try (StoredFieldsWriter consumer = + codec.storedFieldsFormat().fieldsWriter(dir, segmentInfo, writeState.context)) { consumer.startDocument(); consumer.writeField(field, customField); consumer.finishDocument(); @@ -573,14 +625,15 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { IOUtils.close(consumer); IOUtils.close(consumer); } - try (StoredFieldsReader producer = codec.storedFieldsFormat().fieldsReader(dir, segmentInfo, fieldInfos, readState.context)) { + try (StoredFieldsReader producer = + codec.storedFieldsFormat().fieldsReader(dir, segmentInfo, fieldInfos, readState.context)) { IOUtils.close(producer); IOUtils.close(producer); } - + IOUtils.close(oneDocReader, oneDocIndex, dir); } - + /** Tests exception handling on write and openInput/createOutput */ // TODO: this is really not ideal. each BaseXXXTestCase should have unit tests doing this. // but we use this shotgun approach to prevent bugs in the meantime: it just ensures the @@ -591,31 +644,31 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER); dir.setUseSlowOpenClosers(false); dir.setRandomIOExceptionRate(0.001); // more rare - + // log all exceptions we hit, in case we fail (for debugging) ByteArrayOutputStream exceptionLog = new ByteArrayOutputStream(); PrintStream exceptionStream = new PrintStream(exceptionLog, true, "UTF-8"); - //PrintStream exceptionStream = System.out; - + // PrintStream exceptionStream = System.out; + Analyzer analyzer = new MockAnalyzer(random()); - + IndexWriterConfig conf = newIndexWriterConfig(analyzer); // just for now, try to keep this test reproducible conf.setMergeScheduler(new SerialMergeScheduler()); conf.setCodec(getCodec()); - + int numDocs = atLeast(500); - + IndexWriter iw = new IndexWriter(dir, conf); try { boolean allowAlreadyClosed = false; for (int i = 0; i < numDocs; i++) { dir.setRandomIOExceptionRateOnOpen(0.02); // turn on exceptions for openInput/createOutput - + Document doc = new Document(); doc.add(newStringField("id", Integer.toString(i), Field.Store.NO)); addRandomFields(doc); - + // single doc try { iw.addDocument(doc); @@ -623,7 +676,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { iw.deleteDocuments(new Term("id", Integer.toString(i))); } catch (AlreadyClosedException ace) { // OK: writer was closed by abort; we just reopen now: - dir.setRandomIOExceptionRateOnOpen(0.0); // disable exceptions on openInput until next iteration + dir.setRandomIOExceptionRateOnOpen( + 0.0); // disable exceptions on openInput until next iteration assertTrue(iw.isDeleterClosed()); assertTrue(allowAlreadyClosed); allowAlreadyClosed = false; @@ -631,7 +685,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { // just for now, try to keep this test reproducible conf.setMergeScheduler(new SerialMergeScheduler()); conf.setCodec(getCodec()); - iw = new IndexWriter(dir, conf); + iw = new IndexWriter(dir, conf); } catch (IOException e) { handleFakeIOException(e, exceptionStream); allowAlreadyClosed = true; @@ -644,14 +698,16 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { DirectoryReader ir = null; try { ir = DirectoryReader.open(iw, random().nextBoolean(), false); - dir.setRandomIOExceptionRateOnOpen(0.0); // disable exceptions on openInput until next iteration + dir.setRandomIOExceptionRateOnOpen( + 0.0); // disable exceptions on openInput until next iteration TestUtil.checkReader(ir); } finally { IOUtils.closeWhileHandlingException(ir); } } else { - dir.setRandomIOExceptionRateOnOpen(0.0); // disable exceptions on openInput until next iteration: - // or we make slowExists angry and trip a scarier assert! + dir.setRandomIOExceptionRateOnOpen( + 0.0); // disable exceptions on openInput until next iteration: + // or we make slowExists angry and trip a scarier assert! iw.commit(); } if (DirectoryReader.indexExists(dir)) { @@ -659,7 +715,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { } } catch (AlreadyClosedException ace) { // OK: writer was closed by abort; we just reopen now: - dir.setRandomIOExceptionRateOnOpen(0.0); // disable exceptions on openInput until next iteration + dir.setRandomIOExceptionRateOnOpen( + 0.0); // disable exceptions on openInput until next iteration assertTrue(iw.isDeleterClosed()); assertTrue(allowAlreadyClosed); allowAlreadyClosed = false; @@ -667,23 +724,25 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { // just for now, try to keep this test reproducible conf.setMergeScheduler(new SerialMergeScheduler()); conf.setCodec(getCodec()); - iw = new IndexWriter(dir, conf); + iw = new IndexWriter(dir, conf); } catch (IOException e) { handleFakeIOException(e, exceptionStream); allowAlreadyClosed = true; } } } - + try { - dir.setRandomIOExceptionRateOnOpen(0.0); // disable exceptions on openInput until next iteration: - // or we make slowExists angry and trip a scarier assert! + dir.setRandomIOExceptionRateOnOpen( + 0.0); // disable exceptions on openInput until next iteration: + // or we make slowExists angry and trip a scarier assert! iw.close(); } catch (IOException e) { handleFakeIOException(e, exceptionStream); try { iw.rollback(); - } catch (Throwable t) {} + } catch (Throwable t) { + } } dir.close(); } catch (Throwable t) { @@ -693,13 +752,13 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { System.out.flush(); Rethrow.rethrow(t); } - + if (VERBOSE) { System.out.println("TEST PASSED: dumping fake-exception-log:..."); System.out.println(exceptionLog.toString("UTF-8")); } } - + private void handleFakeIOException(IOException e, PrintStream exceptionStream) { Throwable ex = e; while (ex != null) { @@ -710,13 +769,13 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { } ex = ex.getCause(); } - + Rethrow.rethrow(e); } /** - * Returns {@code false} if only the regular fields reader should be tested, - * and {@code true} if only the merge instance should be tested. + * Returns {@code false} if only the regular fields reader should be tested, and {@code true} if + * only the merge instance should be tested. */ protected boolean shouldTestMergeInstance() { return false; @@ -729,12 +788,11 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { return r; } - /** - * A directory that tracks created files that haven't been deleted. - */ + /** A directory that tracks created files that haven't been deleted. */ protected static class FileTrackingDirectoryWrapper extends FilterDirectory { - private final Set files = Collections.newSetFromMap(new ConcurrentHashMap()); + private final Set files = + Collections.newSetFromMap(new ConcurrentHashMap()); /** Sole constructor. */ FileTrackingDirectoryWrapper(Directory in) { @@ -764,7 +822,6 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { files.remove(name); super.deleteFile(name); } - } private static class ReadBytesIndexInputWrapper extends IndexInput { @@ -806,7 +863,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { @Override public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { IndexInput slice = in.slice(sliceDescription, offset, length); - return new ReadBytesIndexInputWrapper(slice, o -> readByte.accept(Math.toIntExact(offset + o))); + return new ReadBytesIndexInputWrapper( + slice, o -> readByte.accept(Math.toIntExact(offset + o))); } @Override @@ -823,7 +881,6 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { } in.readBytes(b, offset, len); } - } /** A directory that tracks read bytes. */ @@ -844,7 +901,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { @Override public IndexInput openInput(String name, IOContext context) throws IOException { IndexInput in = super.openInput(name, context); - final FixedBitSet set = readBytes.computeIfAbsent(name, n -> new FixedBitSet(Math.toIntExact(in.length()))); + final FixedBitSet set = + readBytes.computeIfAbsent(name, n -> new FixedBitSet(Math.toIntExact(in.length()))); if (set.length() != in.length()) { throw new IllegalStateException(); } @@ -854,7 +912,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { @Override public ChecksumIndexInput openChecksumInput(String name, IOContext context) throws IOException { ChecksumIndexInput in = super.openChecksumInput(name, context); - final FixedBitSet set = readBytes.computeIfAbsent(name, n -> new FixedBitSet(Math.toIntExact(in.length()))); + final FixedBitSet set = + readBytes.computeIfAbsent(name, n -> new FixedBitSet(Math.toIntExact(in.length()))); if (set.length() != in.length()) { throw new IllegalStateException(); } @@ -874,7 +933,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { } @Override - public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + public IndexInput slice(String sliceDescription, long offset, long length) + throws IOException { throw new UnsupportedOperationException(); } @@ -906,15 +966,20 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { } @Override - public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) throws IOException { + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) + throws IOException { throw new UnsupportedOperationException(); } } - /** This test is a best effort at verifying that checkIntegrity doesn't miss any files. It tests that the - * combination of opening a reader and calling checkIntegrity on it reads all bytes of all files. */ + /** + * This test is a best effort at verifying that checkIntegrity doesn't miss any files. It tests + * that the combination of opening a reader and calling checkIntegrity on it reads all bytes of + * all files. + */ public void testCheckIntegrityReadsAllBytes() throws Exception { - assumeFalse("SimpleText doesn't store checksums of its files", getCodec() instanceof SimpleTextCodec); + assumeFalse( + "SimpleText doesn't store checksums of its files", getCodec() instanceof SimpleTextCodec); FileTrackingDirectoryWrapper dir = new FileTrackingDirectoryWrapper(newDirectory()); applyCreatedVersionMajor(dir); @@ -937,7 +1002,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { Map readBytesMap = readBytesWrapperDir.getReadBytes(); - Set unreadFiles = new HashSet<>(dir.getFiles());System.out.println(Arrays.toString(dir.listAll())); + Set unreadFiles = new HashSet<>(dir.getFiles()); + System.out.println(Arrays.toString(dir.listAll())); unreadFiles.removeAll(readBytesMap.keySet()); unreadFiles.remove(IndexWriter.WRITE_LOCK_NAME); assertTrue("Some files have not been open: " + unreadFiles, unreadFiles.isEmpty()); @@ -949,7 +1015,14 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase { unreadBytes.flip(0, unreadBytes.length()); int unread = unreadBytes.nextSetBit(0); if (unread != Integer.MAX_VALUE) { - messages.add("Offset " + unread + " of file " + name + "(" + unreadBytes.length() + "bytes) was not read."); + messages.add( + "Offset " + + unread + + " of file " + + name + + "(" + + unreadBytes.length() + + "bytes) was not read."); } } assertTrue(String.join("\n", messages), messages.isEmpty()); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseLiveDocsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseLiveDocsFormatTestCase.java index 4f15bef21bd..74bc4def1b1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseLiveDocsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseLiveDocsFormatTestCase.java @@ -18,7 +18,6 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Collections; - import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.store.Directory; @@ -30,10 +29,7 @@ import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; -/** - * Abstract class that performs basic testing of a codec's - * {@link LiveDocsFormat}. - */ +/** Abstract class that performs basic testing of a codec's {@link LiveDocsFormat}. */ public abstract class BaseLiveDocsFormatTestCase extends LuceneTestCase { /** Returns the codec to run tests against */ @@ -78,7 +74,8 @@ public abstract class BaseLiveDocsFormatTestCase extends LuceneTestCase { testSerialization(IndexWriter.MAX_DOCS, IndexWriter.MAX_DOCS - 7, false); } - private void testSerialization(int maxDoc, int numLiveDocs, boolean fixedBitSet) throws IOException { + private void testSerialization(int maxDoc, int numLiveDocs, boolean fixedBitSet) + throws IOException { final Codec codec = Codec.getDefault(); final LiveDocsFormat format = codec.liveDocsFormat(); @@ -107,24 +104,35 @@ public abstract class BaseLiveDocsFormatTestCase extends LuceneTestCase { bits = liveDocs; } else { // Make sure the impl doesn't only work with a FixedBitSet - bits = new Bits() { + bits = + new Bits() { - @Override - public boolean get(int index) { - return liveDocs.get(index); - } + @Override + public boolean get(int index) { + return liveDocs.get(index); + } - @Override - public int length() { - return liveDocs.length(); - } - - }; + @Override + public int length() { + return liveDocs.length(); + } + }; } final Directory dir = newDirectory(); - final SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "foo", maxDoc, random().nextBoolean(), - codec, Collections.emptyMap(), StringHelper.randomId(), Collections.emptyMap(), null); + final SegmentInfo si = + new SegmentInfo( + dir, + Version.LATEST, + Version.LATEST, + "foo", + maxDoc, + random().nextBoolean(), + codec, + Collections.emptyMap(), + StringHelper.randomId(), + Collections.emptyMap(), + null); SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, 0, 0, -1, -1, StringHelper.randomId()); format.writeLiveDocs(bits, dir, sci, maxDoc - numLiveDocs, IOContext.DEFAULT); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java index 730bff734d4..1fa1cd465d6 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java @@ -27,7 +27,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.ToIntFunction; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.MergePolicy.MergeContext; @@ -45,46 +44,49 @@ import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; -/** - * Base test case for {@link MergePolicy}. - */ +/** Base test case for {@link MergePolicy}. */ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { - + /** Create a new {@link MergePolicy} instance. */ protected abstract MergePolicy mergePolicy(); /** - * Assert that the given segment infos match expectations of the merge - * policy, assuming segments that have only been either flushed or merged with - * this merge policy. + * Assert that the given segment infos match expectations of the merge policy, assuming segments + * that have only been either flushed or merged with this merge policy. */ - protected abstract void assertSegmentInfos(MergePolicy policy, SegmentInfos infos) throws IOException; + protected abstract void assertSegmentInfos(MergePolicy policy, SegmentInfos infos) + throws IOException; - /** - * Assert that the given merge matches expectations of the merge policy. - */ - protected abstract void assertMerge(MergePolicy policy, MergeSpecification merge) throws IOException; + /** Assert that the given merge matches expectations of the merge policy. */ + protected abstract void assertMerge(MergePolicy policy, MergeSpecification merge) + throws IOException; public void testForceMergeNotNeeded() throws IOException { try (Directory dir = newDirectory()) { final AtomicBoolean mayMerge = new AtomicBoolean(true); - final MergeScheduler mergeScheduler = new SerialMergeScheduler() { - @Override - synchronized public void merge(MergeSource mergeSource, MergeTrigger trigger) throws IOException { - if (mayMerge.get() == false) { - MergePolicy.OneMerge merge = mergeSource.getNextMerge(); - if (merge != null) { - System.out.println("TEST: we should not need any merging, yet merge policy returned merge " + merge); - throw new AssertionError(); + final MergeScheduler mergeScheduler = + new SerialMergeScheduler() { + @Override + public synchronized void merge(MergeSource mergeSource, MergeTrigger trigger) + throws IOException { + if (mayMerge.get() == false) { + MergePolicy.OneMerge merge = mergeSource.getNextMerge(); + if (merge != null) { + System.out.println( + "TEST: we should not need any merging, yet merge policy returned merge " + + merge); + throw new AssertionError(); + } } - } - super.merge(mergeSource, trigger); - } - }; + super.merge(mergeSource, trigger); + } + }; MergePolicy mp = mergePolicy(); - assumeFalse("this test cannot tolerate random forceMerges", mp.toString().contains("MockRandomMergePolicy")); + assumeFalse( + "this test cannot tolerate random forceMerges", + mp.toString().contains("MockRandomMergePolicy")); mp.setNoCFSRatio(random().nextBoolean() ? 0 : 1); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); @@ -105,7 +107,11 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { final int maxNumSegments = i == 0 ? 1 : TestUtil.nextInt(random(), 1, 10); mayMerge.set(segmentCount > maxNumSegments); if (VERBOSE) { - System.out.println("TEST: now forceMerge(maxNumSegments=" + maxNumSegments + ") vs segmentCount=" + segmentCount); + System.out.println( + "TEST: now forceMerge(maxNumSegments=" + + maxNumSegments + + ") vs segmentCount=" + + segmentCount); } writer.forceMerge(maxNumSegments); } @@ -116,7 +122,8 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { public void testFindForcedDeletesMerges() throws IOException { MergePolicy mp = mergePolicy(); if (mp instanceof FilterMergePolicy) { - assumeFalse("test doesn't work with MockRandomMP", + assumeFalse( + "test doesn't work with MockRandomMP", ((FilterMergePolicy) mp).in instanceof MockRandomMergePolicy); } SegmentInfos infos = new SegmentInfos(Version.LATEST.major); @@ -124,43 +131,45 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { MergePolicy.MergeContext context = new MockMergeContext(s -> 0); int numSegs = random().nextInt(10); for (int i = 0; i < numSegs; i++) { - SegmentInfo info = new SegmentInfo( - directory, // dir - Version.LATEST, // version - Version.LATEST, // min version - TestUtil.randomSimpleString(random()), // name - random().nextInt(Integer.MAX_VALUE), // maxDoc - random().nextBoolean(), // isCompoundFile - null, // codec - Collections.emptyMap(), // diagnostics - TestUtil.randomSimpleString(// id - random(), - StringHelper.ID_LENGTH, - StringHelper.ID_LENGTH).getBytes(StandardCharsets.US_ASCII), - Collections.emptyMap(), // attributes - null /* indexSort */); + SegmentInfo info = + new SegmentInfo( + directory, // dir + Version.LATEST, // version + Version.LATEST, // min version + TestUtil.randomSimpleString(random()), // name + random().nextInt(Integer.MAX_VALUE), // maxDoc + random().nextBoolean(), // isCompoundFile + null, // codec + Collections.emptyMap(), // diagnostics + TestUtil.randomSimpleString( // id + random(), StringHelper.ID_LENGTH, StringHelper.ID_LENGTH) + .getBytes(StandardCharsets.US_ASCII), + Collections.emptyMap(), // attributes + null /* indexSort */); info.setFiles(Collections.emptyList()); - infos.add(new SegmentCommitInfo(info, random().nextInt(1), 0, -1, -1, -1, StringHelper.randomId())); + infos.add( + new SegmentCommitInfo( + info, random().nextInt(1), 0, -1, -1, -1, StringHelper.randomId())); } - MergePolicy.MergeSpecification forcedDeletesMerges = mp.findForcedDeletesMerges(infos, context); + MergePolicy.MergeSpecification forcedDeletesMerges = + mp.findForcedDeletesMerges(infos, context); if (forcedDeletesMerges != null) { assertEquals(0, forcedDeletesMerges.merges.size()); } } } - /** - * Simple mock merge context for tests - */ + /** Simple mock merge context for tests */ public static final class MockMergeContext implements MergePolicy.MergeContext { private final ToIntFunction numDeletesFunc; - private final InfoStream infoStream = new NullInfoStream() { - @Override - public boolean isEnabled(String component) { - // otherwise tests that simulate merging may bottleneck on generating messages - return false; - } - }; + private final InfoStream infoStream = + new NullInfoStream() { + @Override + public boolean isEnabled(String component) { + // otherwise tests that simulate merging may bottleneck on generating messages + return false; + } + }; private Set mergingSegments = Collections.emptySet(); @@ -194,100 +203,116 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { } /** - * Make a new {@link SegmentCommitInfo} with the given {@code maxDoc}, - * {@code numDeletedDocs} and {@code sizeInBytes}, which are usually the - * numbers that merge policies care about. + * Make a new {@link SegmentCommitInfo} with the given {@code maxDoc}, {@code numDeletedDocs} and + * {@code sizeInBytes}, which are usually the numbers that merge policies care about. */ - protected static SegmentCommitInfo makeSegmentCommitInfo(String name, int maxDoc, int numDeletedDocs, double sizeMB, String source) { + protected static SegmentCommitInfo makeSegmentCommitInfo( + String name, int maxDoc, int numDeletedDocs, double sizeMB, String source) { if (name.startsWith("_") == false) { throw new IllegalArgumentException("name must start with an _, got " + name); } byte[] id = new byte[StringHelper.ID_LENGTH]; random().nextBytes(id); - SegmentInfo info = new SegmentInfo(FAKE_DIRECTORY, Version.LATEST, Version.LATEST, - name, maxDoc, false, TestUtil.getDefaultCodec(), Collections.emptyMap(), id, - Collections.singletonMap(IndexWriter.SOURCE, source), null); - info.setFiles(Collections.singleton(name + "_size=" + Long.toString((long) (sizeMB * 1024 * 1024)) + ".fake")); + SegmentInfo info = + new SegmentInfo( + FAKE_DIRECTORY, + Version.LATEST, + Version.LATEST, + name, + maxDoc, + false, + TestUtil.getDefaultCodec(), + Collections.emptyMap(), + id, + Collections.singletonMap(IndexWriter.SOURCE, source), + null); + info.setFiles( + Collections.singleton( + name + "_size=" + Long.toString((long) (sizeMB * 1024 * 1024)) + ".fake")); return new SegmentCommitInfo(info, numDeletedDocs, 0, 0, 0, 0, StringHelper.randomId()); } /** A directory that computes the length of a file based on its name. */ - private static final Directory FAKE_DIRECTORY = new Directory() { + private static final Directory FAKE_DIRECTORY = + new Directory() { - @Override - public String[] listAll() throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public String[] listAll() throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public void deleteFile(String name) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public void deleteFile(String name) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public long fileLength(String name) throws IOException { - if (name.endsWith(".liv")) { - return 0L; - } - if (name.endsWith(".fake") == false) { - throw new IllegalArgumentException(name); - } - int startIndex = name.indexOf("_size=") + "_size=".length(); - int endIndex = name.length() - ".fake".length(); - return Long.parseLong(name.substring(startIndex, endIndex)); - } + @Override + public long fileLength(String name) throws IOException { + if (name.endsWith(".liv")) { + return 0L; + } + if (name.endsWith(".fake") == false) { + throw new IllegalArgumentException(name); + } + int startIndex = name.indexOf("_size=") + "_size=".length(); + int endIndex = name.length() - ".fake".length(); + return Long.parseLong(name.substring(startIndex, endIndex)); + } - @Override - public IndexOutput createOutput(String name, IOContext context) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public IndexOutput createOutput(String name, IOContext context) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) + throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public void sync(Collection names) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public void sync(Collection names) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public void rename(String source, String dest) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public void rename(String source, String dest) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public void syncMetaData() throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public void syncMetaData() throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public IndexInput openInput(String name, IOContext context) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public Lock obtainLock(String name) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public Lock obtainLock(String name) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public void close() throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public void close() throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public Set getPendingDeletions() throws IOException { - throw new UnsupportedOperationException(); - } - }; + @Override + public Set getPendingDeletions() throws IOException { + throw new UnsupportedOperationException(); + } + }; /** - * Apply a merge to a {@link SegmentInfos} instance, accumulating the number - * of written bytes into {@code stats}. + * Apply a merge to a {@link SegmentInfos} instance, accumulating the number of written bytes into + * {@code stats}. */ - protected static SegmentInfos applyMerge(SegmentInfos infos, OneMerge merge, String mergedSegmentName, IOStats stats) throws IOException { + protected static SegmentInfos applyMerge( + SegmentInfos infos, OneMerge merge, String mergedSegmentName, IOStats stats) + throws IOException { LinkedHashSet scis = new LinkedHashSet<>(infos.asList()); int newMaxDoc = 0; double newSize = 0; @@ -301,19 +326,16 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { SegmentInfos newInfos = new SegmentInfos(Version.LATEST.major); newInfos.addAll(scis); // Now add the merged segment - newInfos.add(makeSegmentCommitInfo(mergedSegmentName, newMaxDoc, 0, newSize, IndexWriter.SOURCE_MERGE)); + newInfos.add( + makeSegmentCommitInfo(mergedSegmentName, newMaxDoc, 0, newSize, IndexWriter.SOURCE_MERGE)); stats.mergeBytesWritten += newSize * 1024 * 1024; return newInfos; } - /** - * Apply {@code numDeletes} uniformly across all segments of {@code infos}. - */ + /** Apply {@code numDeletes} uniformly across all segments of {@code infos}. */ protected static SegmentInfos applyDeletes(SegmentInfos infos, int numDeletes) { List infoList = infos.asList(); - int totalNumDocs = infoList.stream() - .mapToInt(s -> s.info.maxDoc() - s.getDelCount()) - .sum(); + int totalNumDocs = infoList.stream().mapToInt(s -> s.info.maxDoc() - s.getDelCount()).sum(); if (numDeletes > totalNumDocs) { throw new IllegalArgumentException("More deletes than documents"); } @@ -326,12 +348,21 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { if (i == infoList.size() - 1) { segDeletes = numDeletes; } else { - segDeletes = Math.min(numDeletes, (int) Math.ceil(w * (sci.info.maxDoc() - sci.getDelCount()))); + segDeletes = + Math.min(numDeletes, (int) Math.ceil(w * (sci.info.maxDoc() - sci.getDelCount()))); } int newDelCount = sci.getDelCount() + segDeletes; assert newDelCount <= sci.info.maxDoc(); if (newDelCount < sci.info.maxDoc()) { // drop fully deleted segments - SegmentCommitInfo newInfo = new SegmentCommitInfo(sci.info, sci.getDelCount() + segDeletes, 0, sci.getDelGen() + 1, sci.getFieldInfosGen(), sci.getDocValuesGen(), StringHelper.randomId()); + SegmentCommitInfo newInfo = + new SegmentCommitInfo( + sci.info, + sci.getDelCount() + segDeletes, + 0, + sci.getDelGen() + 1, + sci.getFieldInfosGen(), + sci.getDocValuesGen(), + StringHelper.randomId()); newInfoList.add(newInfo); } numDeletes -= segDeletes; @@ -342,19 +373,17 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { return newInfos; } - /** - * Simulate an append-only use-case, ie. there are no deletes. - */ + /** Simulate an append-only use-case, ie. there are no deletes. */ public void testSimulateAppendOnly() throws IOException { doTestSimulateAppendOnly(mergePolicy(), 100_000_000, 10_000); } /** - * Simulate an append-only use-case, ie. there are no deletes. - * {@code totalDocs} exist in the index in the end, and flushes contribute at most - * {@code maxDocsPerFlush} documents. + * Simulate an append-only use-case, ie. there are no deletes. {@code totalDocs} exist in the + * index in the end, and flushes contribute at most {@code maxDocsPerFlush} documents. */ - protected void doTestSimulateAppendOnly(MergePolicy mergePolicy, int totalDocs, int maxDocsPerFlush) throws IOException { + protected void doTestSimulateAppendOnly( + MergePolicy mergePolicy, int totalDocs, int maxDocsPerFlush) throws IOException { IOStats stats = new IOStats(); AtomicLong segNameGenerator = new AtomicLong(); MergeContext mergeContext = new MockMergeContext(SegmentCommitInfo::getDelCount); @@ -365,14 +394,22 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { numDocs += flushDocCount; double flushSizeMB = flushDocCount * avgDocSizeMB; stats.flushBytesWritten += flushSizeMB * 1024 * 1024; - segmentInfos.add(makeSegmentCommitInfo("_" + segNameGenerator.getAndIncrement(), flushDocCount, 0, flushSizeMB, IndexWriter.SOURCE_FLUSH)); + segmentInfos.add( + makeSegmentCommitInfo( + "_" + segNameGenerator.getAndIncrement(), + flushDocCount, + 0, + flushSizeMB, + IndexWriter.SOURCE_FLUSH)); - MergeSpecification merges = mergePolicy.findMerges(MergeTrigger.SEGMENT_FLUSH, segmentInfos, mergeContext); + MergeSpecification merges = + mergePolicy.findMerges(MergeTrigger.SEGMENT_FLUSH, segmentInfos, mergeContext); while (merges != null) { assertTrue(merges.merges.size() > 0); assertMerge(mergePolicy, merges); for (OneMerge oneMerge : merges.merges) { - segmentInfos = applyMerge(segmentInfos, oneMerge, "_" + segNameGenerator.getAndIncrement(), stats); + segmentInfos = + applyMerge(segmentInfos, oneMerge, "_" + segNameGenerator.getAndIncrement(), stats); } merges = mergePolicy.findMerges(MergeTrigger.MERGE_FINISHED, segmentInfos, mergeContext); } @@ -380,24 +417,26 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { } if (VERBOSE) { - System.out.println("Write amplification for append-only: " + (double) (stats.flushBytesWritten + stats.mergeBytesWritten) / stats.flushBytesWritten); + System.out.println( + "Write amplification for append-only: " + + (double) (stats.flushBytesWritten + stats.mergeBytesWritten) + / stats.flushBytesWritten); } } - /** - * Simulate an update use-case where documents are uniformly updated across segments. - */ + /** Simulate an update use-case where documents are uniformly updated across segments. */ public void testSimulateUpdates() throws IOException { int numDocs = atLeast(1_000_000); doTestSimulateUpdates(mergePolicy(), numDocs, 2500); } /** - * Simulate an update use-case where documents are uniformly updated across segments. - * {@code totalDocs} exist in the index in the end, and flushes contribute at most - * {@code maxDocsPerFlush} documents. + * Simulate an update use-case where documents are uniformly updated across segments. {@code + * totalDocs} exist in the index in the end, and flushes contribute at most {@code + * maxDocsPerFlush} documents. */ - protected void doTestSimulateUpdates(MergePolicy mergePolicy, int totalDocs, int maxDocsPerFlush) throws IOException { + protected void doTestSimulateUpdates(MergePolicy mergePolicy, int totalDocs, int maxDocsPerFlush) + throws IOException { IOStats stats = new IOStats(); AtomicLong segNameGenerator = new AtomicLong(); MergeContext mergeContext = new MockMergeContext(SegmentCommitInfo::getDelCount); @@ -407,7 +446,7 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { final int flushDocCount; if (usually()) { // reasonable value - flushDocCount = TestUtil.nextInt(random(), maxDocsPerFlush/2, maxDocsPerFlush); + flushDocCount = TestUtil.nextInt(random(), maxDocsPerFlush / 2, maxDocsPerFlush); } else { // crazy value flushDocCount = TestUtil.nextInt(random(), 1, maxDocsPerFlush); @@ -418,12 +457,20 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { segmentInfos = applyDeletes(segmentInfos, delCount); double flushSize = flushDocCount * avgDocSizeMB; stats.flushBytesWritten += flushSize * 1024 * 1024; - segmentInfos.add(makeSegmentCommitInfo("_" + segNameGenerator.getAndIncrement(), flushDocCount, 0, flushSize, IndexWriter.SOURCE_FLUSH)); - MergeSpecification merges = mergePolicy.findMerges(MergeTrigger.SEGMENT_FLUSH, segmentInfos, mergeContext); + segmentInfos.add( + makeSegmentCommitInfo( + "_" + segNameGenerator.getAndIncrement(), + flushDocCount, + 0, + flushSize, + IndexWriter.SOURCE_FLUSH)); + MergeSpecification merges = + mergePolicy.findMerges(MergeTrigger.SEGMENT_FLUSH, segmentInfos, mergeContext); while (merges != null) { assertMerge(mergePolicy, merges); for (OneMerge oneMerge : merges.merges) { - segmentInfos = applyMerge(segmentInfos, oneMerge, "_" + segNameGenerator.getAndIncrement(), stats); + segmentInfos = + applyMerge(segmentInfos, oneMerge, "_" + segNameGenerator.getAndIncrement(), stats); } merges = mergePolicy.findMerges(MergeTrigger.MERGE_FINISHED, segmentInfos, mergeContext); } @@ -431,14 +478,14 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase { } if (VERBOSE) { - System.out.println("Write amplification for update: " + (double) (stats.flushBytesWritten + stats.mergeBytesWritten) / stats.flushBytesWritten); - int totalDelCount = segmentInfos.asList().stream() - .mapToInt(SegmentCommitInfo::getDelCount) - .sum(); - int totalMaxDoc = segmentInfos.asList().stream() - .map(s -> s.info) - .mapToInt(SegmentInfo::maxDoc) - .sum(); + System.out.println( + "Write amplification for update: " + + (double) (stats.flushBytesWritten + stats.mergeBytesWritten) + / stats.flushBytesWritten); + int totalDelCount = + segmentInfos.asList().stream().mapToInt(SegmentCommitInfo::getDelCount).sum(); + int totalMaxDoc = + segmentInfos.asList().stream().map(s -> s.info).mapToInt(SegmentInfo::maxDoc).sum(); System.out.println("Final live ratio: " + (1 - (double) totalDelCount / totalMaxDoc)); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java index a308f17eb2d..defd35e1a08 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java @@ -16,13 +16,14 @@ */ package org.apache.lucene.index; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.function.LongSupplier; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -40,16 +41,12 @@ import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.TestUtil; -import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; - /** - * Abstract class to do basic tests for a norms format. - * NOTE: This test focuses on the norms impl, nothing else. - * The [stretch] goal is for this test to be - * so thorough in testing a new NormsFormat that if this - * test passes, then all Lucene/Solr tests should also pass. Ie, - * if there is some bug in a given NormsFormat that this - * test fails to catch then this test needs to be improved! */ + * Abstract class to do basic tests for a norms format. NOTE: This test focuses on the norms impl, + * nothing else. The [stretch] goal is for this test to be so thorough in testing a new NormsFormat + * that if this test passes, then all Lucene/Solr tests should also pass. Ie, if there is some bug + * in a given NormsFormat that this test fails to catch then this test needs to be improved! + */ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCase { /** Whether the codec supports sparse values. */ @@ -61,12 +58,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); + } + }); } } @@ -75,12 +74,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); + } + }); } } @@ -88,12 +89,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return TestUtil.nextLong(r, Short.MIN_VALUE, Short.MAX_VALUE); - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return TestUtil.nextLong(r, Short.MIN_VALUE, Short.MAX_VALUE); + } + }); } } @@ -102,12 +105,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return TestUtil.nextLong(r, Short.MIN_VALUE, Short.MAX_VALUE); - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return TestUtil.nextLong(r, Short.MIN_VALUE, Short.MAX_VALUE); + } + }); } } @@ -115,12 +120,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return TestUtil.nextLong(r, Long.MIN_VALUE, Long.MAX_VALUE); - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return TestUtil.nextLong(r, Long.MIN_VALUE, Long.MAX_VALUE); + } + }); } } @@ -129,12 +136,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return TestUtil.nextLong(r, Long.MIN_VALUE, Long.MAX_VALUE); - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return TestUtil.nextLong(r, Long.MIN_VALUE, Long.MAX_VALUE); + } + }); } } @@ -142,17 +151,22 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - int thingToDo = r.nextInt(3); - switch (thingToDo) { - case 0: return Long.MIN_VALUE; - case 1: return Long.MAX_VALUE; - default: return TestUtil.nextLong(r, Long.MIN_VALUE, Long.MAX_VALUE); - } - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + int thingToDo = r.nextInt(3); + switch (thingToDo) { + case 0: + return Long.MIN_VALUE; + case 1: + return Long.MAX_VALUE; + default: + return TestUtil.nextLong(r, Long.MIN_VALUE, Long.MAX_VALUE); + } + } + }); } } @@ -161,17 +175,22 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - int thingToDo = r.nextInt(3); - switch (thingToDo) { - case 0: return Long.MIN_VALUE; - case 1: return Long.MAX_VALUE; - default: return TestUtil.nextLong(r, Long.MIN_VALUE, Long.MAX_VALUE); - } - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + int thingToDo = r.nextInt(3); + switch (thingToDo) { + case 0: + return Long.MIN_VALUE; + case 1: + return Long.MAX_VALUE; + default: + return TestUtil.nextLong(r, Long.MIN_VALUE, Long.MAX_VALUE); + } + } + }); } } @@ -179,12 +198,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return r.nextBoolean() ? 20 : 3; - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextBoolean() ? 20 : 3; + } + }); } } @@ -193,12 +214,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return r.nextBoolean() ? 20 : 3; - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextBoolean() ? 20 : 3; + } + }); } } @@ -206,12 +229,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return r.nextBoolean() ? 1000000L : -5000; - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextBoolean() ? 1000000L : -5000; + } + }); } } @@ -220,24 +245,28 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return r.nextBoolean() ? 1000000L : -5000; - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextBoolean() ? 1000000L : -5000; + } + }); } } public void testAllZeros() throws Exception { int iterations = atLeast(1); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return 0; - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return 0; + } + }); } } @@ -245,12 +274,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas assumeTrue("Requires sparse norms support", codecSupportsSparsity()); int iterations = atLeast(1); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return 0; - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return 0; + } + }); } } @@ -258,26 +289,32 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE) : 0; - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 ? TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE) : 0; + } + }); } } - + public void testOutliers() throws Exception { int iterations = atLeast(1); final Random r = random(); for (int i = 0; i < iterations; i++) { final long commonValue = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE) : commonValue; - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 + ? TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE) + : commonValue; + } + }); } } @@ -287,12 +324,16 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas final Random r = random(); for (int i = 0; i < iterations; i++) { final long commonValue = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE) : commonValue; - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 + ? TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE) + : commonValue; + } + }); } } @@ -302,12 +343,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas for (int i = 0; i < iterations; i++) { final long commonValue = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); final long uncommonValue = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? uncommonValue : commonValue; - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 ? uncommonValue : commonValue; + } + }); } } @@ -318,12 +361,14 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas for (int i = 0; i < iterations; i++) { final long commonValue = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); final long uncommonValue = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? uncommonValue : commonValue; - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 ? uncommonValue : commonValue; + } + }); } } @@ -339,12 +384,16 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas for (int j = 0; j < numOtherValues; ++j) { otherValues[j] = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); } - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? otherValues[r.nextInt(numOtherValues - 1)] : commonValues[r.nextInt(N - 1)]; - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 + ? otherValues[r.nextInt(numOtherValues - 1)] + : commonValues[r.nextInt(N - 1)]; + } + }); } public void testSparseNCommon() throws Exception { @@ -360,17 +409,19 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas for (int j = 0; j < numOtherValues; ++j) { otherValues[j] = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); } - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? otherValues[r.nextInt(numOtherValues - 1)] : commonValues[r.nextInt(N - 1)]; - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 + ? otherValues[r.nextInt(numOtherValues - 1)] + : commonValues[r.nextInt(N - 1)]; + } + }); } - /** - * a more thorough n-common that tests all low bpv - */ + /** a more thorough n-common that tests all low bpv */ @Nightly public void testNCommonBig() throws Exception { final int iterations = atLeast(1); @@ -388,19 +439,21 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas for (int j = 0; j < numOtherValues; ++j) { otherValues[j] = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); } - doTestNormsVersusDocValues(1, new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? otherValues[r.nextInt(numOtherValues - 1)] : commonValues[r.nextInt(N - 1)]; - } - }); + doTestNormsVersusDocValues( + 1, + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 + ? otherValues[r.nextInt(numOtherValues - 1)] + : commonValues[r.nextInt(N - 1)]; + } + }); } } } - /** - * a more thorough n-common that tests all low bpv and sparse docs - */ + /** a more thorough n-common that tests all low bpv and sparse docs */ @Nightly public void testSparseNCommonBig() throws Exception { assumeTrue("Requires sparse norms support", codecSupportsSparsity()); @@ -419,12 +472,16 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas for (int j = 0; j < numOtherValues; ++j) { otherValues[j] = TestUtil.nextLong(r, Byte.MIN_VALUE, Byte.MAX_VALUE); } - doTestNormsVersusDocValues(random().nextDouble(), new LongSupplier() { - @Override - public long getAsLong() { - return r.nextInt(100) == 0 ? otherValues[r.nextInt(numOtherValues - 1)] : commonValues[r.nextInt(N - 1)]; - } - }); + doTestNormsVersusDocValues( + random().nextDouble(), + new LongSupplier() { + @Override + public long getAsLong() { + return r.nextInt(100) == 0 + ? otherValues[r.nextInt(numOtherValues - 1)] + : commonValues[r.nextInt(N - 1)]; + } + }); } } } @@ -449,7 +506,7 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas for (int i = 0; i < numDocsWithField; i++) { norms[i] = longs.getAsLong(); } - + Directory dir = applyCreatedVersionMajor(newDirectory()); Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); IndexWriterConfig conf = newIndexWriterConfig(analyzer); @@ -463,7 +520,7 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas doc.add(idField); doc.add(indexedField); doc.add(dvField); - + for (int i = 0, j = 0; i < numDocs; i++) { idField.setStringValue(Integer.toString(i)); if (docsWithField.get(i) == false) { @@ -481,27 +538,27 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas writer.commit(); } } - + // delete some docs - int numDeletions = random().nextInt(numDocs/20); + int numDeletions = random().nextInt(numDocs / 20); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); } - + writer.commit(); - + // compare DirectoryReader ir = maybeWrapWithMergingReader(DirectoryReader.open(dir)); checkNormsVsDocValues(ir); ir.close(); - + writer.forceMerge(1); - + // compare again ir = maybeWrapWithMergingReader(DirectoryReader.open(dir)); checkNormsVsDocValues(ir); - + writer.close(); ir.close(); dir.close(); @@ -514,7 +571,9 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas NumericDocValues actual = r.getNormValues("indexed"); assertEquals(expected == null, actual == null); if (expected != null) { - for (int d = expected.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = expected.nextDoc()) { + for (int d = expected.nextDoc(); + d != DocIdSetIterator.NO_MORE_DOCS; + d = expected.nextDoc()) { assertEquals(d, actual.nextDoc()); assertEquals("doc " + d, expected.longValue(), actual.longValue()); } @@ -522,11 +581,11 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas } } } - + static class CannedNormSimilarity extends Similarity { final long norms[]; int index = 0; - + CannedNormSimilarity(long norms[]) { this.norms = norms; } @@ -543,7 +602,8 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas } @Override - public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + public SimScorer scorer( + float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { throw new UnsupportedOperationException(); } } @@ -552,39 +612,39 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas protected void addRandomFields(Document doc) { // TODO: improve doc.add(new TextField("foobar", TestUtil.randomSimpleString(random()), Field.Store.NO)); - } @Override public void testMergeStability() throws Exception { // TODO: can we improve this base test to just have subclasses declare the extensions to check, - // rather than a blacklist to exclude? we need to index stuff to get norms, but we dont care about testing + // rather than a blacklist to exclude? we need to index stuff to get norms, but we dont care + // about testing // the PFs actually doing that... assumeTrue("The MockRandom PF randomizes content on the fly, so we can't check it", false); } - + // TODO: test thread safety (e.g. across different fields) explicitly here /* * LUCENE-6006: Tests undead norms. - * ..... - * C C / - * /< / - * ___ __________/_#__=o - * /(- /(\_\________ \ - * \ ) \ )_ \o \ - * /|\ /|\ |' | - * | _| - * /o __\ - * / ' | - * / / | - * /_/\______| - * ( _( < - * \ \ \ - * \ \ | - * \____\____\ - * ____\_\__\_\ - * /` /` o\ + * ..... + * C C / + * /< / + * ___ __________/_#__=o + * /(- /(\_\________ \ + * \ ) \ )_ \o \ + * /|\ /|\ |' | + * | _| + * /o __\ + * / ' | + * / / | + * /_/\______| + * ( _( < + * \ \ \ + * \ \ | + * \____\____\ + * ____\_\__\_\ + * /` /` o\ * |___ |_______| * */ @@ -593,30 +653,31 @@ public abstract class BaseNormsFormatTestCase extends BaseIndexFileFormatTestCas RandomIndexWriter w = new RandomIndexWriter(random(), dir); int numDocs = atLeast(500); List toDelete = new ArrayList<>(); - for(int i=0;i docValues = new ArrayList<>(); List docIDs = new ArrayList<>(); - for(int docID=0;docID= 0; + + if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) { + return Relation.CELL_OUTSIDE_QUERY; + } else if (min.compareTo(queryMin[dim]) < 0 + || max.compareTo(queryMax[dim]) > 0) { + crosses = true; + } + } + + if (crosses) { + return Relation.CELL_CROSSES_QUERY; + } else { + return Relation.CELL_INSIDE_QUERY; } } - - //System.out.println(" yes"); - hits.set(docBase+docID); - } - - @Override - public Relation compare(byte[] minPacked, byte[] maxPacked) { - boolean crosses = false; - for(int dim=0;dim= 0; - - if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) { - return Relation.CELL_OUTSIDE_QUERY; - } else if (min.compareTo(queryMin[dim]) < 0 || max.compareTo(queryMax[dim]) > 0) { - crosses = true; - } - } - - if (crosses) { - return Relation.CELL_CROSSES_QUERY; - } else { - return Relation.CELL_INSIDE_QUERY; - } - } - }); + }); } - for(int docID=0;docID 0) { expected = false; @@ -502,7 +523,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa } } r.close(); - } + } } public void testRandomBinaryTiny() throws Exception { @@ -520,18 +541,19 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa } private void doTestRandomBinary(int count) throws Exception { - int numDocs = TestUtil.nextInt(random(), count, count*2); + int numDocs = TestUtil.nextInt(random(), count, count * 2); int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES); int numDataDims = TestUtil.nextInt(random(), 1, PointValues.MAX_INDEX_DIMENSIONS); int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims); byte[][][] docValues = new byte[numDocs][][]; - for(int docID=0;docID 0) { + if (Arrays.compareUnsigned( + docValues[ord][dim], 0, numBytesPerDim, expectedMaxValues[dim], 0, numBytesPerDim) + > 0) { System.arraycopy(docValues[ord][dim], 0, expectedMaxValues[dim], 0, numBytesPerDim); } } @@ -637,7 +687,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa } } w = new RandomIndexWriter(random(), dir, iwc); - addIndexesAt = TestUtil.nextInt(random(), 1, numValues-1); + addIndexesAt = TestUtil.nextInt(random(), 1, numValues - 1); } else { saveW = null; saveDir = null; @@ -652,7 +702,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa Document doc = null; int lastID = -1; - for(int ord=0;ord 0) { - System.arraycopy(leafMaxValues, dim*numBytesPerDim, maxValues, dim*numBytesPerDim, numBytesPerDim); + if (Arrays.compareUnsigned( + leafMaxValues, + dim * numBytesPerDim, + dim * numBytesPerDim + numBytesPerDim, + maxValues, + dim * numBytesPerDim, + dim * numBytesPerDim + numBytesPerDim) + > 0) { + System.arraycopy( + leafMaxValues, + dim * numBytesPerDim, + maxValues, + dim * numBytesPerDim, + numBytesPerDim); } } } byte[] scratch = new byte[numBytesPerDim]; - for(int dim=0;dim 0) { + // System.out.println(" query_outside_cell"); + return Relation.CELL_OUTSIDE_QUERY; + } else if (Arrays.compareUnsigned( + minPacked, + dim * numBytesPerDim, + dim * numBytesPerDim + numBytesPerDim, + queryMin[dim], + 0, + numBytesPerDim) + < 0 + || Arrays.compareUnsigned( + maxPacked, + dim * numBytesPerDim, + dim * numBytesPerDim + numBytesPerDim, + queryMax[dim], + 0, + numBytesPerDim) + > 0) { + crosses = true; + } + } - @Override - public Relation compare(byte[] minPacked, byte[] maxPacked) { - boolean crosses = false; - //System.out.println("compare"); - for(int dim=0;dim 0) { - //System.out.println(" query_outside_cell"); - return Relation.CELL_OUTSIDE_QUERY; - } else if (Arrays.compareUnsigned(minPacked, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0 || - Arrays.compareUnsigned(maxPacked, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMax[dim], 0, numBytesPerDim) > 0) { - crosses = true; + if (crosses) { + // System.out.println(" query_crosses_cell"); + return Relation.CELL_CROSSES_QUERY; + } else { + // System.out.println(" cell_inside_query"); + return Relation.CELL_INSIDE_QUERY; } } - - if (crosses) { - //System.out.println(" query_crosses_cell"); - return Relation.CELL_CROSSES_QUERY; - } else { - //System.out.println(" cell_inside_query"); - return Relation.CELL_INSIDE_QUERY; - } - } - }); + }); } BitSet expected = new BitSet(); - for(int ord=0;ord 0) { + if (Arrays.compareUnsigned(x, 0, numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0 + || Arrays.compareUnsigned(x, 0, numBytesPerDim, queryMax[dim], 0, numBytesPerDim) + > 0) { matches = false; break; } @@ -906,7 +1035,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa int limit = Math.max(expected.length(), hits.length()); int failCount = 0; int successCount = 0; - for(int id=0;id termFreqs = new ConcurrentHashMap<>(); + final Map termFreqs = new ConcurrentHashMap<>(); final AtomicLong sumDocFreq = new AtomicLong(); final AtomicLong sumTotalTermFreq = new AtomicLong(); @@ -376,104 +422,58 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest // TODO: would be better to use / delegate to the current // Codec returned by getCodec() - iwc.setCodec(new FilterCodec(getCodec().getName(), getCodec()) { - @Override - public PostingsFormat postingsFormat() { + iwc.setCodec( + new FilterCodec(getCodec().getName(), getCodec()) { + @Override + public PostingsFormat postingsFormat() { - final PostingsFormat defaultPostingsFormat = delegate.postingsFormat(); + final PostingsFormat defaultPostingsFormat = delegate.postingsFormat(); - final Thread mainThread = Thread.currentThread(); + final Thread mainThread = Thread.currentThread(); - // A PF that counts up some stats and then in - // the end we verify the stats match what the - // final IndexReader says, just to exercise the - // new freedom of iterating the postings more - // than once at flush/merge: + // A PF that counts up some stats and then in + // the end we verify the stats match what the + // final IndexReader says, just to exercise the + // new freedom of iterating the postings more + // than once at flush/merge: - return new PostingsFormat(defaultPostingsFormat.getName()) { + return new PostingsFormat(defaultPostingsFormat.getName()) { - @Override - public FieldsConsumer fieldsConsumer(final SegmentWriteState state) throws IOException { + @Override + public FieldsConsumer fieldsConsumer(final SegmentWriteState state) + throws IOException { - final FieldsConsumer fieldsConsumer = defaultPostingsFormat.fieldsConsumer(state); + final FieldsConsumer fieldsConsumer = defaultPostingsFormat.fieldsConsumer(state); - return new FieldsConsumer() { - @Override - public void write(Fields fields, NormsProducer norms) throws IOException { - fieldsConsumer.write(fields, norms); + return new FieldsConsumer() { + @Override + public void write(Fields fields, NormsProducer norms) throws IOException { + fieldsConsumer.write(fields, norms); - boolean isMerge = state.context.context == IOContext.Context.MERGE; + boolean isMerge = state.context.context == IOContext.Context.MERGE; - // We only use one thread for flushing - // in this test: - assert isMerge || Thread.currentThread() == mainThread; + // We only use one thread for flushing + // in this test: + assert isMerge || Thread.currentThread() == mainThread; - // We iterate the provided TermsEnum - // twice, so we excercise this new freedom - // with the inverted API; if - // addOnSecondPass is true, we add up - // term stats on the 2nd iteration: - boolean addOnSecondPass = random().nextBoolean(); + // We iterate the provided TermsEnum + // twice, so we excercise this new freedom + // with the inverted API; if + // addOnSecondPass is true, we add up + // term stats on the 2nd iteration: + boolean addOnSecondPass = random().nextBoolean(); - //System.out.println("write isMerge=" + isMerge + " 2ndPass=" + addOnSecondPass); + // System.out.println("write isMerge=" + isMerge + " 2ndPass=" + + // addOnSecondPass); - // Gather our own stats: - Terms terms = fields.terms("body"); - assert terms != null; + // Gather our own stats: + Terms terms = fields.terms("body"); + assert terms != null; - TermsEnum termsEnum = terms.iterator(); - PostingsEnum docs = null; - while(termsEnum.next() != null) { - BytesRef term = termsEnum.term(); - // TODO: also sometimes ask for payloads/offsets? - boolean noPositions = random().nextBoolean(); - if (noPositions) { - docs = termsEnum.postings(docs, PostingsEnum.FREQS); - } else { - docs = termsEnum.postings(null, PostingsEnum.POSITIONS); - } - int docFreq = 0; - long totalTermFreq = 0; - while (docs.nextDoc() != PostingsEnum.NO_MORE_DOCS) { - docFreq++; - totalTermFreq += docs.freq(); - int limit = TestUtil.nextInt(random(), 1, docs.freq()); - if (!noPositions) { - for (int i = 0; i < limit; i++) { - docs.nextPosition(); - } - } - } - - String termString = term.utf8ToString(); - - // During merge we should only see terms - // we had already seen during a - // previous flush: - assertTrue(isMerge==false || termFreqs.containsKey(termString)); - - if (isMerge == false) { - if (addOnSecondPass == false) { - TermFreqs tf = termFreqs.get(termString); - if (tf == null) { - tf = new TermFreqs(); - termFreqs.put(termString, tf); - } - tf.docFreq += docFreq; - tf.totalTermFreq += totalTermFreq; - sumDocFreq.addAndGet(docFreq); - sumTotalTermFreq.addAndGet(totalTermFreq); - } else if (termFreqs.containsKey(termString) == false) { - // Add placeholder (2nd pass will - // set its counts): - termFreqs.put(termString, new TermFreqs()); - } - } - } - - // Also test seeking the TermsEnum: - for(String term : termFreqs.keySet()) { - if (termsEnum.seekExact(new BytesRef(term))) { + TermsEnum termsEnum = terms.iterator(); + PostingsEnum docs = null; + while (termsEnum.next() != null) { + BytesRef term = termsEnum.term(); // TODO: also sometimes ask for payloads/offsets? boolean noPositions = random().nextBoolean(); if (noPositions) { @@ -481,7 +481,6 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest } else { docs = termsEnum.postings(null, PostingsEnum.POSITIONS); } - int docFreq = 0; long totalTermFreq = 0; while (docs.nextDoc() != PostingsEnum.NO_MORE_DOCS) { @@ -495,45 +494,96 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest } } - if (isMerge == false && addOnSecondPass) { - TermFreqs tf = termFreqs.get(term); - assert tf != null; - tf.docFreq += docFreq; - tf.totalTermFreq += totalTermFreq; - sumDocFreq.addAndGet(docFreq); - sumTotalTermFreq.addAndGet(totalTermFreq); + String termString = term.utf8ToString(); + + // During merge we should only see terms + // we had already seen during a + // previous flush: + assertTrue(isMerge == false || termFreqs.containsKey(termString)); + + if (isMerge == false) { + if (addOnSecondPass == false) { + TermFreqs tf = termFreqs.get(termString); + if (tf == null) { + tf = new TermFreqs(); + termFreqs.put(termString, tf); + } + tf.docFreq += docFreq; + tf.totalTermFreq += totalTermFreq; + sumDocFreq.addAndGet(docFreq); + sumTotalTermFreq.addAndGet(totalTermFreq); + } else if (termFreqs.containsKey(termString) == false) { + // Add placeholder (2nd pass will + // set its counts): + termFreqs.put(termString, new TermFreqs()); + } } + } - //System.out.println(" term=" + term + " docFreq=" + docFreq + " ttDF=" + termToDocFreq.get(term)); - assertTrue(docFreq <= termFreqs.get(term).docFreq); - assertTrue(totalTermFreq <= termFreqs.get(term).totalTermFreq); + // Also test seeking the TermsEnum: + for (String term : termFreqs.keySet()) { + if (termsEnum.seekExact(new BytesRef(term))) { + // TODO: also sometimes ask for payloads/offsets? + boolean noPositions = random().nextBoolean(); + if (noPositions) { + docs = termsEnum.postings(docs, PostingsEnum.FREQS); + } else { + docs = termsEnum.postings(null, PostingsEnum.POSITIONS); + } + + int docFreq = 0; + long totalTermFreq = 0; + while (docs.nextDoc() != PostingsEnum.NO_MORE_DOCS) { + docFreq++; + totalTermFreq += docs.freq(); + int limit = TestUtil.nextInt(random(), 1, docs.freq()); + if (!noPositions) { + for (int i = 0; i < limit; i++) { + docs.nextPosition(); + } + } + } + + if (isMerge == false && addOnSecondPass) { + TermFreqs tf = termFreqs.get(term); + assert tf != null; + tf.docFreq += docFreq; + tf.totalTermFreq += totalTermFreq; + sumDocFreq.addAndGet(docFreq); + sumTotalTermFreq.addAndGet(totalTermFreq); + } + + // System.out.println(" term=" + term + " docFreq=" + docFreq + " ttDF=" + + // termToDocFreq.get(term)); + assertTrue(docFreq <= termFreqs.get(term).docFreq); + assertTrue(totalTermFreq <= termFreqs.get(term).totalTermFreq); + } + } + + // Also test seekCeil + for (int iter = 0; iter < 10; iter++) { + BytesRef term = new BytesRef(TestUtil.randomRealisticUnicodeString(random())); + SeekStatus status = termsEnum.seekCeil(term); + if (status == SeekStatus.NOT_FOUND) { + assertTrue(term.compareTo(termsEnum.term()) < 0); + } } } - // Also test seekCeil - for(int iter=0;iter<10;iter++) { - BytesRef term = new BytesRef(TestUtil.randomRealisticUnicodeString(random())); - SeekStatus status = termsEnum.seekCeil(term); - if (status == SeekStatus.NOT_FOUND) { - assertTrue(term.compareTo(termsEnum.term()) < 0); - } + @Override + public void close() throws IOException { + fieldsConsumer.close(); } - } + }; + } - @Override - public void close() throws IOException { - fieldsConsumer.close(); - } - }; - } - - @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { - return defaultPostingsFormat.fieldsProducer(state); - } - }; - } - }); + @Override + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + return defaultPostingsFormat.fieldsProducer(state); + } + }; + } + }); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); @@ -558,7 +608,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest TermsEnum termsEnum = terms.iterator(); long termCount = 0; boolean supportsOrds = true; - while(termsEnum.next() != null) { + while (termsEnum.next() != null) { BytesRef term = termsEnum.term(); assertEquals(termFreqs.get(term.utf8ToString()).docFreq, termsEnum.docFreq()); assertEquals(termFreqs.get(term.utf8ToString()).totalTermFreq, termsEnum.totalTermFreq()); @@ -581,14 +631,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest r.close(); dir.close(); } - + protected void assertReused(String field, PostingsEnum p1, PostingsEnum p2) { // if its not DirectPF, we should always reuse. This one has trouble. if (!"Direct".equals(TestUtil.getPostingsFormat(field))) { assertSame(p1, p2); } } - + public void testPostingsEnumDocsOnly() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); @@ -597,14 +647,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest doc.add(new StringField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + // sugar method (FREQS) PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar")); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(1, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator(); termsEnum.seekExact(new BytesRef("bar")); @@ -616,9 +666,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(0, postings.nextDoc()); assertEquals(1, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // asking for any flags: ok - for (int flag : new int[] { NONE, FREQS, POSITIONS, PAYLOADS, OFFSETS, ALL }) { + for (int flag : new int[] {NONE, FREQS, POSITIONS, PAYLOADS, OFFSETS, ALL}) { postings = termsEnum.postings(null, flag); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); @@ -634,20 +684,22 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(1, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); } - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumFreqs() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer()); - } - }); + IndexWriterConfig iwc = + new IndexWriterConfig( + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer()); + } + }); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); @@ -655,14 +707,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest doc.add(new Field("foo", "bar bar", ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + // sugar method (FREQS) PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar")); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator(); termsEnum.seekExact(new BytesRef("bar")); @@ -674,7 +726,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -692,9 +744,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly.freq() == 1 || docsOnly.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for any flags: ok - for (int flag : new int[] { NONE, FREQS, POSITIONS, PAYLOADS, OFFSETS, ALL }) { + for (int flag : new int[] {NONE, FREQS, POSITIONS, PAYLOADS, OFFSETS, ALL}) { postings = termsEnum.postings(null, flag); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); @@ -714,33 +766,35 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest } assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); } - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumPositions() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer()); - } - }); + IndexWriterConfig iwc = + new IndexWriterConfig( + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer()); + } + }); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new TextField("foo", "bar bar", Field.Store.NO)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + // sugar method (FREQS) PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar")); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator(); termsEnum.seekExact(new BytesRef("bar")); @@ -752,7 +806,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -770,9 +824,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok - PostingsEnum docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); assertEquals(2, docsAndPositionsEnum.freq()); @@ -785,9 +840,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum.endOffset()); assertNull(docsAndPositionsEnum.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertReused("foo", docsAndPositionsEnum, docsAndPositionsEnum2); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); @@ -801,9 +857,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads, offsets, etc don't cause an error if they aren't there - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS); + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); // but make sure they work assertEquals(-1, docsAndPositionsEnum.docID()); @@ -833,8 +890,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS); + + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -863,8 +921,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL); + + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -892,20 +951,22 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumOffsets() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer()); - } - }); + IndexWriterConfig iwc = + new IndexWriterConfig( + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer()); + } + }); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); @@ -913,14 +974,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest doc.add(new Field("foo", "bar bar", ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + // sugar method (FREQS) PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar")); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator(); termsEnum.seekExact(new BytesRef("bar")); @@ -932,7 +993,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -950,9 +1011,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok - PostingsEnum docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); assertEquals(2, docsAndPositionsEnum.freq()); @@ -967,27 +1029,31 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 7); assertNull(docsAndPositionsEnum.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertReused("foo", docsAndPositionsEnum, docsAndPositionsEnum2); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(0, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads don't cause an error if they aren't there - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS); + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); // but make sure they work assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1012,17 +1078,20 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(0, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS); + + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -1051,8 +1120,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(7, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL); + + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -1080,12 +1150,12 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(7, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumPayloads() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); @@ -1098,14 +1168,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest doc.add(new TextField("foo", new CannedTokenStream(token1, token2))); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + // sugar method (FREQS) PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar")); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator(); termsEnum.seekExact(new BytesRef("bar")); @@ -1117,7 +1187,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -1135,9 +1205,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok - PostingsEnum docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); assertEquals(2, docsAndPositionsEnum.freq()); @@ -1145,16 +1216,21 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum.startOffset()); assertEquals(-1, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); assertEquals(1, docsAndPositionsEnum.nextPosition()); assertEquals(-1, docsAndPositionsEnum.startOffset()); assertEquals(-1, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertReused("foo", docsAndPositionsEnum, docsAndPositionsEnum2); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); @@ -1163,16 +1239,21 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum2.startOffset()); assertEquals(-1, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); assertEquals(1, docsAndPositionsEnum2.nextPosition()); assertEquals(-1, docsAndPositionsEnum2.startOffset()); assertEquals(-1, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS); + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -1201,8 +1282,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS); + + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -1211,12 +1293,16 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum.startOffset()); assertEquals(-1, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); assertEquals(1, docsAndPositionsEnum.nextPosition()); assertEquals(-1, docsAndPositionsEnum.startOffset()); assertEquals(-1, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); // reuse docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.OFFSETS); @@ -1228,15 +1314,20 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum2.startOffset()); assertEquals(-1, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); assertEquals(1, docsAndPositionsEnum2.nextPosition()); assertEquals(-1, docsAndPositionsEnum2.startOffset()); assertEquals(-1, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL); + + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -1264,12 +1355,12 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumAll() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); @@ -1284,14 +1375,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest doc.add(new Field("foo", new CannedTokenStream(token1, token2), ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + // sugar method (FREQS) PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar")); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator(); termsEnum.seekExact(new BytesRef("bar")); @@ -1303,7 +1394,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -1321,9 +1412,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok - PostingsEnum docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); assertEquals(2, docsAndPositionsEnum.freq()); @@ -1332,37 +1424,49 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertTrue(docsAndPositionsEnum.startOffset() == -1 || docsAndPositionsEnum.startOffset() == 0); assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 3); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); assertEquals(1, docsAndPositionsEnum.nextPosition()); // we don't define what it is, but if its something else, we should look into it? assertTrue(docsAndPositionsEnum.startOffset() == -1 || docsAndPositionsEnum.startOffset() == 4); assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 7); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertReused("foo", docsAndPositionsEnum, docsAndPositionsEnum2); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(0, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); assertEquals(1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS); + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -1386,17 +1490,20 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(0, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); assertEquals(new BytesRef("pay1"), docsAndPositionsEnum2.getPayload()); assertEquals(1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS); + + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -1405,12 +1512,16 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(0, docsAndPositionsEnum.startOffset()); assertEquals(3, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); assertEquals(1, docsAndPositionsEnum.nextPosition()); assertEquals(4, docsAndPositionsEnum.startOffset()); assertEquals(7, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); // reuse docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.OFFSETS); @@ -1422,15 +1533,20 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(0, docsAndPositionsEnum2.startOffset()); assertEquals(3, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); assertEquals(1, docsAndPositionsEnum2.nextPosition()); assertEquals(4, docsAndPositionsEnum2.startOffset()); assertEquals(7, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - - docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL); + + docsAndPositionsEnum = + getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); assertEquals(0, docsAndPositionsEnum.nextDoc()); @@ -1458,7 +1574,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest assertEquals(7, docsAndPositionsEnum2.endOffset()); assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java index a3a5391055d..e86da77f1ed 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; - import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; import org.apache.lucene.document.StoredField; @@ -40,13 +39,11 @@ import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; /** - * Abstract class to do basic tests for si format. - * NOTE: This test focuses on the si impl, nothing else. - * The [stretch] goal is for this test to be - * so thorough in testing a new si format that if this - * test passes, then all Lucene/Solr tests should also pass. Ie, - * if there is some bug in a given si Format that this - * test fails to catch then this test needs to be improved! */ + * Abstract class to do basic tests for si format. NOTE: This test focuses on the si impl, nothing + * else. The [stretch] goal is for this test to be so thorough in testing a new si format that if + * this test passes, then all Lucene/Solr tests should also pass. Ie, if there is some bug in a + * given si Format that this test fails to catch then this test needs to be improved! + */ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatTestCase { /** Whether this format records min versions. */ @@ -59,110 +56,183 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), null); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + null); info.setFiles(Collections.emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(info.files(), info2.files()); dir.close(); } - + /** Tests SI writer adds itself to files... */ public void testAddsSelfToFiles() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), null); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + null); Set originalFiles = Collections.singleton("_123.a"); info.setFiles(originalFiles); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); - + Set modifiedFiles = info.files(); assertTrue(modifiedFiles.containsAll(originalFiles)); - assertTrue("did you forget to add yourself to files()", modifiedFiles.size() > originalFiles.size()); - + assertTrue( + "did you forget to add yourself to files()", modifiedFiles.size() > originalFiles.size()); + SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(info.files(), info2.files()); // files set should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - info2.files().add("bogus"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + info2.files().add("bogus"); + }); dir.close(); } - + /** Test diagnostics map */ public void testDiagnostics() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); - Map diagnostics = new HashMap<>(); + Map diagnostics = new HashMap<>(); diagnostics.put("key1", "value1"); diagnostics.put("key2", "value2"); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - diagnostics, id, Collections.emptyMap(), null); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + diagnostics, + id, + Collections.emptyMap(), + null); info.setFiles(Collections.emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(diagnostics, info2.getDiagnostics()); // diagnostics map should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - info2.getDiagnostics().put("bogus", "bogus"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + info2.getDiagnostics().put("bogus", "bogus"); + }); dir.close(); } - + /** Test attributes map */ public void testAttributes() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); - Map attributes = new HashMap<>(); + Map attributes = new HashMap<>(); attributes.put("key1", "value1"); attributes.put("key2", "value2"); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, attributes, null); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + attributes, + null); info.setFiles(Collections.emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(attributes, info2.getAttributes()); - + // attributes map should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - info2.getAttributes().put("bogus", "bogus"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + info2.getAttributes().put("bogus", "bogus"); + }); dir.close(); } - + /** Test unique ID */ public void testUniqueID() throws Exception { Codec codec = getCodec(); Directory dir = newDirectory(); byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), null); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + null); info.setFiles(Collections.emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertIDEquals(id, info2.getId()); dir.close(); } - + /** Test versions */ public void testVersions() throws Exception { Codec codec = getCodec(); for (Version v : getVersions()) { - for (Version minV : new Version[] { v, null}) { + for (Version minV : new Version[] {v, null}) { Directory dir = newDirectory(); byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, v, minV, "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), null); + SegmentInfo info = + new SegmentInfo( + dir, + v, + minV, + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + null); info.setFiles(Collections.emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); @@ -184,58 +254,71 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT private SortField randomIndexSortField() { boolean reversed = random().nextBoolean(); SortField sortField; - switch(random().nextInt(10)) { + switch (random().nextInt(10)) { case 0: - sortField = new SortField(TestUtil.randomSimpleString(random()), SortField.Type.INT, reversed); + sortField = + new SortField(TestUtil.randomSimpleString(random()), SortField.Type.INT, reversed); if (random().nextBoolean()) { sortField.setMissingValue(random().nextInt()); } break; case 1: - sortField = new SortedNumericSortField(TestUtil.randomSimpleString(random()), SortField.Type.INT, reversed); + sortField = + new SortedNumericSortField( + TestUtil.randomSimpleString(random()), SortField.Type.INT, reversed); if (random().nextBoolean()) { sortField.setMissingValue(random().nextInt()); } break; case 2: - sortField = new SortField(TestUtil.randomSimpleString(random()), SortField.Type.LONG, reversed); + sortField = + new SortField(TestUtil.randomSimpleString(random()), SortField.Type.LONG, reversed); if (random().nextBoolean()) { sortField.setMissingValue(random().nextLong()); } break; case 3: - sortField = new SortedNumericSortField(TestUtil.randomSimpleString(random()), SortField.Type.LONG, reversed); + sortField = + new SortedNumericSortField( + TestUtil.randomSimpleString(random()), SortField.Type.LONG, reversed); if (random().nextBoolean()) { sortField.setMissingValue(random().nextLong()); } break; case 4: - sortField = new SortField(TestUtil.randomSimpleString(random()), SortField.Type.FLOAT, reversed); + sortField = + new SortField(TestUtil.randomSimpleString(random()), SortField.Type.FLOAT, reversed); if (random().nextBoolean()) { sortField.setMissingValue(random().nextFloat()); } break; case 5: - sortField = new SortedNumericSortField(TestUtil.randomSimpleString(random()), SortField.Type.FLOAT, reversed); + sortField = + new SortedNumericSortField( + TestUtil.randomSimpleString(random()), SortField.Type.FLOAT, reversed); if (random().nextBoolean()) { sortField.setMissingValue(random().nextFloat()); } break; case 6: - sortField = new SortField(TestUtil.randomSimpleString(random()), SortField.Type.DOUBLE, reversed); + sortField = + new SortField(TestUtil.randomSimpleString(random()), SortField.Type.DOUBLE, reversed); if (random().nextBoolean()) { sortField.setMissingValue(random().nextDouble()); } break; case 7: - sortField = new SortedNumericSortField(TestUtil.randomSimpleString(random()), SortField.Type.DOUBLE, reversed); + sortField = + new SortedNumericSortField( + TestUtil.randomSimpleString(random()), SortField.Type.DOUBLE, reversed); if (random().nextBoolean()) { sortField.setMissingValue(random().nextDouble()); } break; case 8: - sortField = new SortField(TestUtil.randomSimpleString(random()), SortField.Type.STRING, reversed); + sortField = + new SortField(TestUtil.randomSimpleString(random()), SortField.Type.STRING, reversed); if (random().nextBoolean()) { sortField.setMissingValue(SortField.STRING_LAST); } @@ -274,8 +357,19 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), sort); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + sort); info.setFiles(Collections.emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); @@ -284,135 +378,191 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT } } - /** - * Test segment infos write that hits exception immediately on open. - * make sure we get our exception back, no file handle leaks, etc. + /** + * Test segment infos write that hits exception immediately on open. make sure we get our + * exception back, no file handle leaks, etc. */ public void testExceptionOnCreateOutput() throws Exception { - Failure fail = new Failure() { - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail && callStackContainsAnyOf("createOutput")) { - throw new FakeIOException(); - } - } - }; - + Failure fail = + new Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail && callStackContainsAnyOf("createOutput")) { + throw new FakeIOException(); + } + } + }; + MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), null); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + null); info.setFiles(Collections.emptySet()); - + fail.setDoFail(); - expectThrows(FakeIOException.class, () -> { - codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); - }); - fail.clearDoFail(); - - dir.close(); - } - - /** - * Test segment infos write that hits exception on close. - * make sure we get our exception back, no file handle leaks, etc. - */ - public void testExceptionOnCloseOutput() throws Exception { - Failure fail = new Failure() { - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail && callStackContainsAnyOf("close")) { - throw new FakeIOException(); - } - } - }; - - MockDirectoryWrapper dir = newMockDirectory(); - dir.failOn(fail); - Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), null); - info.setFiles(Collections.emptySet()); - - fail.setDoFail(); - expectThrows(FakeIOException.class, () -> { - codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); - }); - fail.clearDoFail(); - - dir.close(); - } - - /** - * Test segment infos read that hits exception immediately on open. - * make sure we get our exception back, no file handle leaks, etc. - */ - public void testExceptionOnOpenInput() throws Exception { - Failure fail = new Failure() { - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail && callStackContainsAnyOf("openInput")) { - throw new FakeIOException(); - } - } - }; - - MockDirectoryWrapper dir = newMockDirectory(); - dir.failOn(fail); - Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), null); - info.setFiles(Collections.emptySet()); - codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); - - fail.setDoFail(); - expectThrows(FakeIOException.class, () -> { - codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); - }); - fail.clearDoFail(); - - dir.close(); - } - - /** - * Test segment infos read that hits exception on close - * make sure we get our exception back, no file handle leaks, etc. - */ - public void testExceptionOnCloseInput() throws Exception { - Failure fail = new Failure() { - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail && callStackContainsAnyOf("close")) { - throw new FakeIOException(); - } - } - }; - - MockDirectoryWrapper dir = newMockDirectory(); - dir.failOn(fail); - Codec codec = getCodec(); - byte id[] = StringHelper.randomId(); - SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, - Collections.emptyMap(), id, Collections.emptyMap(), null); - info.setFiles(Collections.emptySet()); - codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); - - fail.setDoFail(); - expectThrows(FakeIOException.class, () -> { - codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); - }); + expectThrows( + FakeIOException.class, + () -> { + codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); + }); fail.clearDoFail(); dir.close(); } - - /** - * Sets some otherwise hard-to-test properties: - * random segment names, ID values, document count, etc and round-trips + + /** + * Test segment infos write that hits exception on close. make sure we get our exception back, no + * file handle leaks, etc. + */ + public void testExceptionOnCloseOutput() throws Exception { + Failure fail = + new Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail && callStackContainsAnyOf("close")) { + throw new FakeIOException(); + } + } + }; + + MockDirectoryWrapper dir = newMockDirectory(); + dir.failOn(fail); + Codec codec = getCodec(); + byte id[] = StringHelper.randomId(); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + null); + info.setFiles(Collections.emptySet()); + + fail.setDoFail(); + expectThrows( + FakeIOException.class, + () -> { + codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); + }); + fail.clearDoFail(); + + dir.close(); + } + + /** + * Test segment infos read that hits exception immediately on open. make sure we get our exception + * back, no file handle leaks, etc. + */ + public void testExceptionOnOpenInput() throws Exception { + Failure fail = + new Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail && callStackContainsAnyOf("openInput")) { + throw new FakeIOException(); + } + } + }; + + MockDirectoryWrapper dir = newMockDirectory(); + dir.failOn(fail); + Codec codec = getCodec(); + byte id[] = StringHelper.randomId(); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + null); + info.setFiles(Collections.emptySet()); + codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); + + fail.setDoFail(); + expectThrows( + FakeIOException.class, + () -> { + codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); + }); + fail.clearDoFail(); + + dir.close(); + } + + /** + * Test segment infos read that hits exception on close make sure we get our exception back, no + * file handle leaks, etc. + */ + public void testExceptionOnCloseInput() throws Exception { + Failure fail = + new Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail && callStackContainsAnyOf("close")) { + throw new FakeIOException(); + } + } + }; + + MockDirectoryWrapper dir = newMockDirectory(); + dir.failOn(fail); + Codec codec = getCodec(); + byte id[] = StringHelper.randomId(); + SegmentInfo info = + new SegmentInfo( + dir, + getVersions()[0], + getVersions()[0], + "_123", + 1, + false, + codec, + Collections.emptyMap(), + id, + Collections.emptyMap(), + null); + info.setFiles(Collections.emptySet()); + codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); + + fail.setDoFail(); + expectThrows( + FakeIOException.class, + () -> { + codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); + }); + fail.clearDoFail(); + + dir.close(); + } + + /** + * Sets some otherwise hard-to-test properties: random segment names, ID values, document count, + * etc and round-trips */ public void testRandom() throws Exception { Codec codec = getCodec(); @@ -421,7 +571,13 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT Directory dir = newDirectory(); Version version = versions[random().nextInt(versions.length)]; long randomSegmentIndex = Math.abs(random().nextLong()); - String name = "_" + Long.toString(randomSegmentIndex != Long.MIN_VALUE ? randomSegmentIndex : random().nextInt(Integer.MAX_VALUE), Character.MAX_RADIX); + String name = + "_" + + Long.toString( + randomSegmentIndex != Long.MIN_VALUE + ? randomSegmentIndex + : random().nextInt(Integer.MAX_VALUE), + Character.MAX_RADIX); int docCount = TestUtil.nextInt(random(), 1, IndexWriter.MAX_DOCS); boolean isCompoundFile = random().nextBoolean(); Set files = new HashSet<>(); @@ -431,32 +587,44 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT files.add(file); dir.createOutput(file, IOContext.DEFAULT).close(); } - Map diagnostics = new HashMap<>(); + Map diagnostics = new HashMap<>(); int numDiags = random().nextInt(10); for (int j = 0; j < numDiags; j++) { - diagnostics.put(TestUtil.randomUnicodeString(random()), - TestUtil.randomUnicodeString(random())); + diagnostics.put( + TestUtil.randomUnicodeString(random()), TestUtil.randomUnicodeString(random())); } byte id[] = new byte[StringHelper.ID_LENGTH]; random().nextBytes(id); - - Map attributes = new HashMap<>(); + + Map attributes = new HashMap<>(); int numAttributes = random().nextInt(10); for (int j = 0; j < numAttributes; j++) { - attributes.put(TestUtil.randomUnicodeString(random()), - TestUtil.randomUnicodeString(random())); + attributes.put( + TestUtil.randomUnicodeString(random()), TestUtil.randomUnicodeString(random())); } - - SegmentInfo info = new SegmentInfo(dir, version, null, name, docCount, isCompoundFile, codec, diagnostics, id, attributes, null); + + SegmentInfo info = + new SegmentInfo( + dir, + version, + null, + name, + docCount, + isCompoundFile, + codec, + diagnostics, + id, + attributes, + null); info.setFiles(files); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, name, id, IOContext.DEFAULT); assertEquals(info, info2); - + dir.close(); } } - + protected final void assertEquals(SegmentInfo expected, SegmentInfo actual) { assertSame(expected.dir, actual.dir); assertEquals(expected.name, actual.name); @@ -470,19 +638,20 @@ public abstract class BaseSegmentInfoFormatTestCase extends BaseIndexFileFormatT assertEquals(expected.getVersion(), actual.getVersion()); assertEquals(expected.getAttributes(), actual.getAttributes()); } - + /** Returns the versions this SI should test */ protected abstract Version[] getVersions(); - - /** - * assert that unique id is equal. + + /** + * assert that unique id is equal. + * * @deprecated only exists to be overridden by old codecs that didnt support this */ @Deprecated protected void assertIDEquals(byte expected[], byte actual[]) { assertArrayEquals(expected, actual); } - + @Override protected void addRandomFields(Document doc) { doc.add(new StoredField("foobar", TestUtil.randomSimpleString(random()))); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java index 0c975cf9c78..d66bf88a109 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java @@ -16,6 +16,9 @@ */ package org.apache.lucene.index; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -27,16 +30,15 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.StoredFieldsFormat; import org.apache.lucene.codecs.simpletext.SimpleTextCodec; import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field; -import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StringField; @@ -47,20 +49,17 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; -import org.apache.lucene.store.MockDirectoryWrapper.Throttling; import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.store.MockDirectoryWrapper.Throttling; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.TestUtil; -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import com.carrotsearch.randomizedtesting.generators.RandomStrings; - /** - * Base class aiming at testing {@link StoredFieldsFormat stored fields formats}. - * To test a new format, all you need is to register a new {@link Codec} which - * uses it and extend this class and override {@link #getCodec()}. + * Base class aiming at testing {@link StoredFieldsFormat stored fields formats}. To test a new + * format, all you need is to register a new {@link Codec} which uses it and extend this class and + * override {@link #getCodec()}. + * * @lucene.experimental */ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormatTestCase { @@ -76,8 +75,13 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat public void testRandomStoredFields() throws IOException { Directory dir = newDirectory(); Random rand = random(); - RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(TestUtil.nextInt(rand, 5, 20))); - //w.w.setNoCFSRatio(0.0); + RandomIndexWriter w = + new RandomIndexWriter( + rand, + dir, + newIndexWriterConfig(new MockAnalyzer(random())) + .setMaxBufferedDocs(TestUtil.nextInt(rand, 5, 20))); + // w.w.setNoCFSRatio(0.0); final int docCount = atLeast(200); final int fieldCount = TestUtil.nextInt(rand, 1, 5); @@ -87,11 +91,11 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat customType.setTokenized(false); Field idField = newField("id", "", customType); - for(int i=0;i docs = new HashMap<>(); + final Map docs = new HashMap<>(); if (VERBOSE) { System.out.println("TEST: build index docCount=" + docCount); @@ -99,21 +103,21 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat FieldType customType2 = new FieldType(); customType2.setStored(true); - for(int i=0;i 0) { - final String delID = ""+rand.nextInt(i); + final String delID = "" + rand.nextInt(i); if (VERBOSE) { System.out.println("TEST: delete doc id=" + delID); } @@ -139,7 +143,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat if (docs.size() > 0) { String[] idsList = docs.keySet().toArray(new String[docs.size()]); - for(int x=0;x<2;x++) { + for (int x = 0; x < 2; x++) { DirectoryReader r = maybeWrapWithMergingReader(w.getReader()); IndexSearcher s = newSearcher(r); @@ -148,7 +152,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat } int num = atLeast(100); - for(int iter=0;iter[] typeAnswers = new Class[numDocs]; - for(int id=0;id fields = + Arrays.asList( + new Field("bytes", bytes, ft), + new Field("string", string, ft), + new StoredField("long", l), + new StoredField("int", i), + new StoredField("float", f), + new StoredField("double", d)); for (int k = 0; k < 100; ++k) { Document doc = new Document(); @@ -369,13 +375,13 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat iw.close(); dir.close(); } - + public void testEmptyDocs() throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random())); iwConf.setMaxBufferedDocs(RandomNumbers.randomIntBetween(random(), 2, 30)); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf); - + // make sure that the fact that documents might be empty is not a problem final Document emptyDoc = new Document(); final int numDocs = random().nextBoolean() ? 1 : atLeast(1000); @@ -390,17 +396,17 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat assertTrue(doc.getFields().isEmpty()); } rd.close(); - + iw.close(); dir.close(); } - + public void testConcurrentReads() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random())); iwConf.setMaxBufferedDocs(RandomNumbers.randomIntBetween(random(), 2, 30)); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf); - + // make sure the readers are properly cloned final Document doc = new Document(); final Field field = new StringField("fld", "", Store.YES); @@ -419,39 +425,42 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat final List readThreads = new ArrayList<>(); final AtomicReference ex = new AtomicReference<>(); for (int i = 0; i < concurrentReads; ++i) { - readThreads.add(new Thread() { + readThreads.add( + new Thread() { - int[] queries; + int[] queries; - { - queries = new int[readsPerThread]; - for (int i = 0; i < queries.length; ++i) { - queries[i] = random().nextInt(numDocs); - } - } - - @Override - public void run() { - for (int q : queries) { - final Query query = new TermQuery(new Term("fld", "" + q)); - try { - final TopDocs topDocs = searcher.search(query, 1); - if (topDocs.totalHits.value != 1) { - throw new IllegalStateException("Expected 1 hit, got " + topDocs.totalHits.value); + { + queries = new int[readsPerThread]; + for (int i = 0; i < queries.length; ++i) { + queries[i] = random().nextInt(numDocs); } - final Document sdoc = rd.document(topDocs.scoreDocs[0].doc); - if (sdoc == null || sdoc.get("fld") == null) { - throw new IllegalStateException("Could not find document " + q); - } - if (!Integer.toString(q).equals(sdoc.get("fld"))) { - throw new IllegalStateException("Expected " + q + ", but got " + sdoc.get("fld")); - } - } catch (Exception e) { - ex.compareAndSet(null, e); } - } - } - }); + + @Override + public void run() { + for (int q : queries) { + final Query query = new TermQuery(new Term("fld", "" + q)); + try { + final TopDocs topDocs = searcher.search(query, 1); + if (topDocs.totalHits.value != 1) { + throw new IllegalStateException( + "Expected 1 hit, got " + topDocs.totalHits.value); + } + final Document sdoc = rd.document(topDocs.scoreDocs[0].doc); + if (sdoc == null || sdoc.get("fld") == null) { + throw new IllegalStateException("Could not find document " + q); + } + if (!Integer.toString(q).equals(sdoc.get("fld"))) { + throw new IllegalStateException( + "Expected " + q + ", but got " + sdoc.get("fld")); + } + } catch (Exception e) { + ex.compareAndSet(null, e); + } + } + } + }); } for (Thread thread : readThreads) { thread.start(); @@ -463,11 +472,11 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat if (ex.get() != null) { throw ex.get(); } - + iw.close(); dir.close(); } - + private byte[] randomByteArray(int length, int max) { final byte[] result = new byte[length]; for (int i = 0; i < length; ++i) { @@ -475,7 +484,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat } return result; } - + public void testWriteReadMerge() throws IOException { // get another codec, other than the default: so we are merging segments across different codecs final Codec otherCodec; @@ -490,16 +499,15 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf); final int docCount = atLeast(200); - final byte[][][] data = new byte [docCount][][]; + final byte[][][] data = new byte[docCount][][]; for (int i = 0; i < docCount; ++i) { - final int fieldCount = rarely() - ? RandomNumbers.randomIntBetween(random(), 1, 500) - : RandomNumbers.randomIntBetween(random(), 1, 5); + final int fieldCount = + rarely() + ? RandomNumbers.randomIntBetween(random(), 1, 500) + : RandomNumbers.randomIntBetween(random(), 1, 5); data[i] = new byte[fieldCount][]; for (int j = 0; j < fieldCount; ++j) { - final int length = rarely() - ? random().nextInt(1000) - : random().nextInt(10); + final int length = rarely() ? random().nextInt(1000) : random().nextInt(10); final int max = rarely() ? 256 : 2; data[i][j] = randomByteArray(length, max); } @@ -538,7 +546,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat for (int i = 0; i < 10; ++i) { final int min = random().nextInt(data.length); final int max = min + random().nextInt(20); - iw.deleteDocuments(IntPoint.newRangeQuery("id", min, max-1)); + iw.deleteDocuments(IntPoint.newRangeQuery("id", min, max - 1)); } iw.forceMerge(2); // force merges with deletions @@ -553,7 +561,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat if (doc == null) { continue; } - ++ numDocs; + ++numDocs; final int docId = doc.getField("id").numericValue().intValue(); assertEquals(data[docId].length + 1, doc.getFields().size()); for (int j = 0; j < data[docId].length; ++j) { @@ -569,7 +577,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat iw.deleteAll(); iw.commit(); iw.forceMerge(1); - + iw.close(); dir.close(); } @@ -595,18 +603,19 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat public CacheHelper getReaderCacheHelper() { return null; } - } private static class DummyFilterDirectoryReader extends FilterDirectoryReader { public DummyFilterDirectoryReader(DirectoryReader in) throws IOException { - super(in, new SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader reader) { - return new DummyFilterLeafReader(reader); - } - }); + super( + in, + new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + return new DummyFilterLeafReader(reader); + } + }); } @Override @@ -618,7 +627,6 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat public CacheHelper getReaderCacheHelper() { return null; } - } public void testMergeFilterReader() throws IOException { @@ -648,9 +656,10 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat } w.commit(); w.close(); - - DirectoryReader reader = new DummyFilterDirectoryReader(maybeWrapWithMergingReader(DirectoryReader.open(dir))); - + + DirectoryReader reader = + new DummyFilterDirectoryReader(maybeWrapWithMergingReader(DirectoryReader.open(dir))); + Directory dir2 = newDirectory(); w = new RandomIndexWriter(random(), dir2); TestUtil.addIndexesSlowly(w.w, reader); @@ -679,12 +688,13 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat @Nightly public void testBigDocuments() throws IOException { assumeWorkingMMapOnWindows(); - + // "big" as "much bigger than the chunk size" // for this test we force a FS dir // we can't just use newFSDirectory, because this test doesn't really index anything. // so if we get NRTCachingDir+SimpleText, we make massive stored fields and OOM (LUCENE-4484) - Directory dir = new MockDirectoryWrapper(random(), new MMapDirectory(createTempDir("testBigDocuments"))); + Directory dir = + new MockDirectoryWrapper(random(), new MMapDirectory(createTempDir("testBigDocuments"))); IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random())); iwConf.setMaxBufferedDocs(RandomNumbers.randomIntBetween(random(), 2, 30)); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf); @@ -705,13 +715,18 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat final FieldType onlyStored = new FieldType(StringField.TYPE_STORED); onlyStored.setIndexOptions(IndexOptions.NONE); - final Field smallField = new Field("fld", randomByteArray(random().nextInt(10), 256), onlyStored); + final Field smallField = + new Field("fld", randomByteArray(random().nextInt(10), 256), onlyStored); final int numFields = RandomNumbers.randomIntBetween(random(), 500000, 1000000); for (int i = 0; i < numFields; ++i) { bigDoc1.add(smallField); } - final Field bigField = new Field("fld", randomByteArray(RandomNumbers.randomIntBetween(random(), 1000000, 5000000), 2), onlyStored); + final Field bigField = + new Field( + "fld", + randomByteArray(RandomNumbers.randomIntBetween(random(), 1000000, 5000000), 2), + onlyStored); bigDoc2.add(bigField); final int numDocs = atLeast(5); @@ -750,7 +765,12 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat public void testBulkMergeWithDeletes() throws IOException { final int numDocs = atLeast(200); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE)); + RandomIndexWriter w = + new RandomIndexWriter( + random(), + dir, + newIndexWriterConfig(new MockAnalyzer(random())) + .setMergePolicy(NoMergePolicy.INSTANCE)); for (int i = 0; i < numDocs; ++i) { Document doc = new Document(); doc.add(new StringField("id", Integer.toString(i), Store.YES)); @@ -787,7 +807,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat for (int j = 0; j < 10; j++) { iw.addDocument(doc); } - + DirectoryReader reader = maybeWrapWithMergingReader(DirectoryReader.open(iw)); // mix up fields explicitly if (random().nextBoolean()) { @@ -798,15 +818,15 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat TestUtil.addIndexesSlowly(adder, reader); adder.commit(); adder.close(); - + IOUtils.close(reader, iw, dir); } - + Directory everything = newDirectory(); IndexWriter iw = new IndexWriter(everything, new IndexWriterConfig(null)); iw.addIndexes(dirs); iw.forceMerge(1); - + LeafReader ir = getOnlyLeafReader(DirectoryReader.open(iw)); for (int i = 0; i < ir.maxDoc(); i++) { Document doc = ir.document(i); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java index 4de16b6a9bd..5f958cf3eb7 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java @@ -23,6 +23,7 @@ import static org.apache.lucene.index.PostingsEnum.OFFSETS; import static org.apache.lucene.index.PostingsEnum.PAYLOADS; import static org.apache.lucene.index.PostingsEnum.POSITIONS; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -33,9 +34,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CannedTokenStream; import org.apache.lucene.analysis.MockTokenizer; @@ -64,16 +62,15 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; /** - * Base class aiming at testing {@link TermVectorsFormat term vectors formats}. - * To test a new format, all you need is to register a new {@link Codec} which - * uses it and extend this class and override {@link #getCodec()}. + * Base class aiming at testing {@link TermVectorsFormat term vectors formats}. To test a new + * format, all you need is to register a new {@link Codec} which uses it and extend this class and + * override {@link #getCodec()}. + * * @lucene.experimental */ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatTestCase { - /** - * A combination of term vectors options. - */ + /** A combination of term vectors options. */ protected enum Options { NONE(false, false, false), POSITIONS(true, false, false), @@ -82,6 +79,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT POSITIONS_AND_PAYLOADS(true, false, true), POSITIONS_AND_OFFSETS_AND_PAYLOADS(true, true, true); final boolean positions, offsets, payloads; + private Options(boolean positions, boolean offsets, boolean payloads) { this.positions = positions; this.offsets = offsets; @@ -119,7 +117,8 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT } // custom impl to test cases that are forbidden by the default OffsetAttribute impl - private static class PermissiveOffsetAttributeImpl extends AttributeImpl implements OffsetAttribute { + private static class PermissiveOffsetAttributeImpl extends AttributeImpl + implements OffsetAttribute { int start, end; @@ -175,7 +174,6 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT reflector.reflect(OffsetAttribute.class, "startOffset", start); reflector.reflect(OffsetAttribute.class, "endOffset", end); } - } // TODO: use CannedTokenStream? @@ -216,7 +214,8 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT if (i == 0) { startOffsets[i] = TestUtil.nextInt(random(), 0, 1 << 16); } else { - startOffsets[i] = startOffsets[i-1] + TestUtil.nextInt(random(), 0, rarely() ? 1 << 16 : 20); + startOffsets[i] = + startOffsets[i - 1] + TestUtil.nextInt(random(), 0, rarely() ? 1 << 16 : 20); } endOffsets[i] = startOffsets[i] + TestUtil.nextInt(random(), 0, rarely() ? 1 << 10 : 20); } @@ -330,7 +329,6 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT return false; } } - } /** Randomly generated document: call toDocument to index it */ @@ -340,7 +338,13 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT private final FieldType[] fieldTypes; private final RandomTokenStream[] tokenStreams; - protected RandomDocument(int fieldCount, int maxTermCount, Options options, String[] fieldNames, String[] sampleTerms, BytesRef[] sampleTermBytes) { + protected RandomDocument( + int fieldCount, + int maxTermCount, + Options options, + String[] fieldNames, + String[] sampleTerms, + BytesRef[] sampleTermBytes) { if (fieldCount > fieldNames.length) { throw new IllegalArgumentException(); } @@ -354,7 +358,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT this.fieldNames[i] = RandomPicks.randomFrom(random(), fieldNames); } while (usedFileNames.contains(this.fieldNames[i])); usedFileNames.add(this.fieldNames[i]); - tokenStreams[i] = new RandomTokenStream(TestUtil.nextInt(random(), 1, maxTermCount), sampleTerms, sampleTermBytes); + tokenStreams[i] = + new RandomTokenStream( + TestUtil.nextInt(random(), 1, maxTermCount), sampleTerms, sampleTermBytes); } } @@ -365,7 +371,6 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT } return doc; } - } /** Factory for generating random documents, call newDocument to generate each one */ @@ -393,7 +398,6 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT public RandomDocument newDocument(int fieldCount, int maxTermCount, Options options) { return new RandomDocument(fieldCount, maxTermCount, options, fieldNames, terms, termBytes); } - } protected void assertEquals(RandomDocument doc, Fields fields) throws IOException { @@ -457,7 +461,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT this.docsEnum.set(postingsEnum); PostingsEnum docsAndPositionsEnum = termsEnum.postings(null); - docsAndPositionsEnum = termsEnum.postings(random().nextBoolean() ? null : docsAndPositionsEnum, PostingsEnum.POSITIONS); + docsAndPositionsEnum = + termsEnum.postings( + random().nextBoolean() ? null : docsAndPositionsEnum, PostingsEnum.POSITIONS); if (terms.hasPositions() || terms.hasOffsets()) { assertEquals(0, docsAndPositionsEnum.nextDoc()); final int freq = docsAndPositionsEnum.freq(); @@ -485,7 +491,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT if (terms.hasOffsets()) { boolean foundOffset = false; for (int index : indexes) { - if (tk.termBytes[index].equals(termsEnum.term()) && tk.startOffsets[index] == docsAndPositionsEnum.startOffset() && tk.endOffsets[index] == docsAndPositionsEnum.endOffset()) { + if (tk.termBytes[index].equals(termsEnum.term()) + && tk.startOffsets[index] == docsAndPositionsEnum.startOffset() + && tk.endOffsets[index] == docsAndPositionsEnum.endOffset()) { foundOffset = true; break; } @@ -495,7 +503,8 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT if (terms.hasPayloads()) { boolean foundPayload = false; for (int index : indexes) { - if (tk.termBytes[index].equals(termsEnum.term()) && equals(tk.payloads[index], docsAndPositionsEnum.getPayload())) { + if (tk.termBytes[index].equals(termsEnum.term()) + && equals(tk.payloads[index], docsAndPositionsEnum.getPayload())) { foundPayload = true; break; } @@ -513,7 +522,8 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT if (random().nextBoolean()) { assertTrue(termsEnum.seekExact(RandomPicks.randomFrom(random(), tk.termBytes))); } else { - assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(RandomPicks.randomFrom(random(), tk.termBytes))); + assertEquals( + SeekStatus.FOUND, termsEnum.seekCeil(RandomPicks.randomFrom(random(), tk.termBytes))); } } } @@ -540,7 +550,8 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT final Document emptyDoc = new Document(); final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); - final RandomDocument doc = docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), 20, options); + final RandomDocument doc = + docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), 20, options); for (int i = 0; i < numDocs; ++i) { if (i == docWithVectors) { writer.addDocument(addId(doc.toDocument(), "42")); @@ -575,7 +586,8 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT } final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); - final RandomDocument doc = docFactory.newDocument(TestUtil.nextInt(random(), 1, 2), atLeast(2000), options); + final RandomDocument doc = + docFactory.newDocument(TestUtil.nextInt(random(), 1, 2), atLeast(2000), options); writer.addDocument(doc.toDocument()); final IndexReader reader = writer.getReader(); assertEquals(doc, reader.getTermVectors(0)); @@ -592,7 +604,8 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT for (Options options : validOptions()) { final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); - final RandomDocument doc = docFactory.newDocument(TestUtil.nextInt(random(), 20, fieldCount), 5, options); + final RandomDocument doc = + docFactory.newDocument(TestUtil.nextInt(random(), 20, fieldCount), 5, options); writer.addDocument(doc.toDocument()); final IndexReader reader = writer.getReader(); assertEquals(doc, reader.getTermVectors(0)); @@ -614,7 +627,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); final RandomDocument doc1 = docFactory.newDocument(numFields, 20, options1); - final RandomDocument doc2 = docFactory.newDocument(numFields, 20, options2); + final RandomDocument doc2 = docFactory.newDocument(numFields, 20, options2); writer.addDocument(addId(doc1.toDocument(), "1")); writer.addDocument(addId(doc2.toDocument(), "2")); final IndexReader reader = writer.getReader(); @@ -634,16 +647,20 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT final int numDocs = atLeast(50); final RandomDocument[] docs = new RandomDocument[numDocs]; for (int i = 0; i < numDocs; ++i) { - docs[i] = docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), TestUtil.nextInt(random(), 10, 50), randomOptions()); + docs[i] = + docFactory.newDocument( + TestUtil.nextInt(random(), 1, 3), + TestUtil.nextInt(random(), 10, 50), + randomOptions()); } final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; ++i) { - writer.addDocument(addId(docs[i].toDocument(), ""+i)); + writer.addDocument(addId(docs[i].toDocument(), "" + i)); } final IndexReader reader = writer.getReader(); for (int i = 0; i < numDocs; ++i) { - final int docID = docID(reader, ""+i); + final int docID = docID(reader, "" + i); assertEquals(docs[i], reader.getTermVectors(docID)); } reader.close(); @@ -667,7 +684,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; ++i) { - writer.addDocument(addId(docs[i].toDocument(), ""+i)); + writer.addDocument(addId(docs[i].toDocument(), "" + i)); if (rarely()) { writer.commit(); } @@ -680,7 +697,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT final IndexReader reader = writer.getReader(); for (int i = 0; i < numDocs; ++i) { if (!deletes.contains(i)) { - final int docID = docID(reader, ""+i); + final int docID = docID(reader, "" + i); assertEquals(docs[i], reader.getTermVectors(docID)); } } @@ -703,31 +720,32 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; ++i) { - writer.addDocument(addId(docs[i].toDocument(), ""+i)); + writer.addDocument(addId(docs[i].toDocument(), "" + i)); } final IndexReader reader = writer.getReader(); for (int i = 0; i < numDocs; ++i) { - final int docID = docID(reader, ""+i); + final int docID = docID(reader, "" + i); assertEquals(docs[i], reader.getTermVectors(docID)); } final AtomicReference exception = new AtomicReference<>(); final Thread[] threads = new Thread[2]; for (int i = 0; i < threads.length; ++i) { - threads[i] = new Thread() { - @Override - public void run() { - try { - for (int i = 0; i < atLeast(100); ++i) { - final int idx = random().nextInt(numDocs); - final int docID = docID(reader, ""+idx); - assertEquals(docs[idx], reader.getTermVectors(docID)); + threads[i] = + new Thread() { + @Override + public void run() { + try { + for (int i = 0; i < atLeast(100); ++i) { + final int idx = random().nextInt(numDocs); + final int docID = docID(reader, "" + idx); + assertEquals(docs[idx], reader.getTermVectors(docID)); + } + } catch (Throwable t) { + exception.set(t); + } } - } catch (Throwable t) { - exception.set(t); - } - } - }; + }; } for (Thread thread : threads) { thread.start(); @@ -741,15 +759,17 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertNull("One thread threw an exception", exception.get()); } } - + public void testPostingsEnumFreqs() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer()); - } - }); + IndexWriterConfig iwc = + new IndexWriterConfig( + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer()); + } + }); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); @@ -757,19 +777,19 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT doc.add(new Field("foo", "bar bar", ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(new BytesRef("bar"), termsEnum.next()); - + // simple use (FREQS) PostingsEnum postings = termsEnum.postings(null); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) PostingsEnum postings2 = termsEnum.postings(postings); assertNotNull(postings2); @@ -778,7 +798,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -795,9 +815,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly.freq() == 1 || docsOnly.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for any flags: ok - for (int flag : new int[] { NONE, FREQS, POSITIONS, PAYLOADS, OFFSETS, ALL }) { + for (int flag : new int[] {NONE, FREQS, POSITIONS, PAYLOADS, OFFSETS, ALL}) { postings = termsEnum.postings(null, flag); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); @@ -816,20 +836,22 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT } assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); } - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumPositions() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer()); - } - }); + IndexWriterConfig iwc = + new IndexWriterConfig( + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer()); + } + }); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); @@ -838,19 +860,19 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT doc.add(new Field("foo", "bar bar", ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(new BytesRef("bar"), termsEnum.next()); - + // simple use (FREQS) PostingsEnum postings = termsEnum.postings(null); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) PostingsEnum postings2 = termsEnum.postings(postings); assertNotNull(postings2); @@ -859,7 +881,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -876,7 +898,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok PostingsEnum docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -891,9 +913,10 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum.endOffset()); assertNull(docsAndPositionsEnum.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); assertEquals(2, docsAndPositionsEnum2.freq()); @@ -906,7 +929,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads, offsets, etc don't cause an error if they aren't there docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); @@ -937,7 +960,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -966,7 +989,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -994,20 +1017,22 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumOffsets() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer()); - } - }); + IndexWriterConfig iwc = + new IndexWriterConfig( + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer()); + } + }); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); @@ -1017,19 +1042,19 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT doc.add(new Field("foo", "bar bar", ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(new BytesRef("bar"), termsEnum.next()); - + // simple usage (FREQS) PostingsEnum postings = termsEnum.postings(null); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) PostingsEnum postings2 = termsEnum.postings(postings); assertNotNull(postings2); @@ -1038,7 +1063,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -1055,7 +1080,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok PostingsEnum docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1072,24 +1097,27 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 7); assertNull(docsAndPositionsEnum.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(0, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads don't cause an error if they aren't there docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); @@ -1115,16 +1143,18 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(0, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1153,7 +1183,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(7, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1181,20 +1211,22 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(7, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumOffsetsWithoutPositions() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - return new TokenStreamComponents(new MockTokenizer()); - } - }); + IndexWriterConfig iwc = + new IndexWriterConfig( + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(new MockTokenizer()); + } + }); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); @@ -1203,19 +1235,19 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT doc.add(new Field("foo", "bar bar", ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(new BytesRef("bar"), termsEnum.next()); - + // simple usage (FREQS) PostingsEnum postings = termsEnum.postings(null); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) PostingsEnum postings2 = termsEnum.postings(postings); assertNotNull(postings2); @@ -1224,7 +1256,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -1241,7 +1273,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok PostingsEnum docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1258,24 +1290,27 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 7); assertNull(docsAndPositionsEnum.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(-1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(-1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads don't cause an error if they aren't there docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); @@ -1301,16 +1336,18 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(-1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(-1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1339,7 +1376,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(7, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1367,12 +1404,12 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(7, docsAndPositionsEnum2.endOffset()); assertNull(docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumPayloads() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); @@ -1389,19 +1426,19 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT doc.add(new Field("foo", new CannedTokenStream(token1, token2), ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(new BytesRef("bar"), termsEnum.next()); - + // sugar method (FREQS) PostingsEnum postings = termsEnum.postings(null); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) PostingsEnum postings2 = termsEnum.postings(postings); assertNotNull(postings2); @@ -1410,7 +1447,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -1427,7 +1464,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok PostingsEnum docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1437,16 +1474,21 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum.startOffset()); assertEquals(-1, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); assertEquals(1, docsAndPositionsEnum.nextPosition()); assertEquals(-1, docsAndPositionsEnum.startOffset()); assertEquals(-1, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); assertEquals(2, docsAndPositionsEnum2.freq()); @@ -1454,14 +1496,18 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum2.startOffset()); assertEquals(-1, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); assertEquals(1, docsAndPositionsEnum2.nextPosition()); assertEquals(-1, docsAndPositionsEnum2.startOffset()); assertEquals(-1, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); @@ -1491,7 +1537,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1501,12 +1547,16 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum.startOffset()); assertEquals(-1, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); assertEquals(1, docsAndPositionsEnum.nextPosition()); assertEquals(-1, docsAndPositionsEnum.startOffset()); assertEquals(-1, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); // reuse docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.OFFSETS); @@ -1517,14 +1567,18 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum2.startOffset()); assertEquals(-1, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); assertEquals(1, docsAndPositionsEnum2.nextPosition()); assertEquals(-1, docsAndPositionsEnum2.startOffset()); assertEquals(-1, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1552,12 +1606,12 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(-1, docsAndPositionsEnum2.endOffset()); assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); } - + public void testPostingsEnumAll() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); @@ -1575,19 +1629,19 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT doc.add(new Field("foo", new CannedTokenStream(token1, token2), ft)); iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - + Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(new BytesRef("bar"), termsEnum.next()); - + // sugar method (FREQS) PostingsEnum postings = termsEnum.postings(null); assertEquals(-1, postings.docID()); assertEquals(0, postings.nextDoc()); assertEquals(2, postings.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc()); - + // termsenum reuse (FREQS) PostingsEnum postings2 = termsEnum.postings(postings); assertNotNull(postings2); @@ -1596,7 +1650,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(0, postings2.nextDoc()); assertEquals(2, postings2.freq()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings2.nextDoc()); - + // asking for docs only: ok PostingsEnum docsOnly = termsEnum.postings(null, PostingsEnum.NONE); assertEquals(-1, docsOnly.docID()); @@ -1613,7 +1667,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT // we don't define what it is, but if its something else, we should look into it? assertTrue(docsOnly2.freq() == 1 || docsOnly2.freq() == 2); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc()); - + // asking for positions, ok PostingsEnum docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1624,34 +1678,45 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertTrue(docsAndPositionsEnum.startOffset() == -1 || docsAndPositionsEnum.startOffset() == 0); assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 3); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); assertEquals(1, docsAndPositionsEnum.nextPosition()); // we don't define what it is, but if its something else, we should look into it? assertTrue(docsAndPositionsEnum.startOffset() == -1 || docsAndPositionsEnum.startOffset() == 4); assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 7); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); - + // now reuse the positions - PostingsEnum docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); + PostingsEnum docsAndPositionsEnum2 = + termsEnum.postings(docsAndPositionsEnum, PostingsEnum.POSITIONS); assertEquals(-1, docsAndPositionsEnum2.docID()); assertEquals(0, docsAndPositionsEnum2.nextDoc()); assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(0, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); assertEquals(1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + // payloads docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.PAYLOADS); assertNotNull(docsAndPositionsEnum); @@ -1676,16 +1741,18 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(2, docsAndPositionsEnum2.freq()); assertEquals(0, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3); assertEquals(new BytesRef("pay1"), docsAndPositionsEnum2.getPayload()); assertEquals(1, docsAndPositionsEnum2.nextPosition()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); + assertTrue( + docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4); assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7); assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.OFFSETS); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1695,12 +1762,16 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(0, docsAndPositionsEnum.startOffset()); assertEquals(3, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload())); assertEquals(1, docsAndPositionsEnum.nextPosition()); assertEquals(4, docsAndPositionsEnum.startOffset()); assertEquals(7, docsAndPositionsEnum.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); + assertTrue( + docsAndPositionsEnum.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc()); // reuse docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.OFFSETS); @@ -1711,14 +1782,18 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(0, docsAndPositionsEnum2.startOffset()); assertEquals(3, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload())); assertEquals(1, docsAndPositionsEnum2.nextPosition()); assertEquals(4, docsAndPositionsEnum2.startOffset()); assertEquals(7, docsAndPositionsEnum2.endOffset()); // we don't define what it is, but if its something else, we should look into it? - assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); + assertTrue( + docsAndPositionsEnum2.getPayload() == null + || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload())); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.ALL); assertNotNull(docsAndPositionsEnum); assertEquals(-1, docsAndPositionsEnum.docID()); @@ -1746,10 +1821,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT assertEquals(7, docsAndPositionsEnum2.endOffset()); assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc()); - + iw.close(); reader.close(); dir.close(); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTestCheckIndex.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTestCheckIndex.java index 21ccf3b777f..35c5458ece5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTestCheckIndex.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTestCheckIndex.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; @@ -33,26 +32,25 @@ import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -/** - * Base class for CheckIndex tests. - */ +/** Base class for CheckIndex tests. */ public class BaseTestCheckIndex extends LuceneTestCase { public void testDeletedDocs(Directory dir) throws IOException { - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())) - .setMaxBufferedDocs(2)); - for(int i=0;i<19;i++) { + IndexWriter writer = + new IndexWriter( + dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2)); + for (int i = 0; i < 19; i++) { Document doc = new Document(); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectorOffsets(true); - doc.add(newField("field", "aaa"+i, customType)); + doc.add(newField("field", "aaa" + i, customType)); writer.addDocument(doc); } writer.forceMerge(1); writer.commit(); - writer.deleteDocuments(new Term("field","aaa5")); + writer.deleteDocuments(new Term("field", "aaa5")); writer.close(); ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); @@ -65,12 +63,12 @@ public class BaseTestCheckIndex extends LuceneTestCase { System.out.println(bos.toString(IOUtils.UTF_8)); fail(); } - + final CheckIndex.Status.SegmentInfoStatus seg = indexStatus.segmentInfos.get(0); assertTrue(seg.openReaderPassed); assertNotNull(seg.diagnostics); - + assertNotNull(seg.fieldNormStatus); assertNull(seg.fieldNormStatus.error); assertEquals(1, seg.fieldNormStatus.totFields); @@ -97,11 +95,11 @@ public class BaseTestCheckIndex extends LuceneTestCase { assertTrue(seg.diagnostics.size() > 0); final List onlySegments = new ArrayList<>(); onlySegments.add("_0"); - + assertTrue(checker.checkIndex(onlySegments).clean == true); checker.close(); } - + public void testChecksumsOnly(Directory dir) throws IOException { LineFileDocs lf = new LineFileDocs(random()); MockAnalyzer analyzer = new MockAnalyzer(random()); @@ -114,7 +112,7 @@ public class BaseTestCheckIndex extends LuceneTestCase { iw.commit(); iw.close(); lf.close(); - + ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); CheckIndex checker = new CheckIndex(dir); checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8)); @@ -124,7 +122,7 @@ public class BaseTestCheckIndex extends LuceneTestCase { checker.close(); analyzer.close(); } - + public void testChecksumsOnlyVerbose(Directory dir) throws IOException { LineFileDocs lf = new LineFileDocs(random()); MockAnalyzer analyzer = new MockAnalyzer(random()); @@ -137,7 +135,7 @@ public class BaseTestCheckIndex extends LuceneTestCase { iw.commit(); iw.close(); lf.close(); - + ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); CheckIndex checker = new CheckIndex(dir); checker.setInfoStream(new PrintStream(bos, true, IOUtils.UTF_8)); @@ -147,17 +145,19 @@ public class BaseTestCheckIndex extends LuceneTestCase { checker.close(); analyzer.close(); } - + public void testObtainsLock(Directory dir) throws IOException { IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null)); iw.addDocument(new Document()); iw.commit(); - + // keep IW open... should not be able to obtain write lock - expectThrows(LockObtainFailedException.class, () -> { - new CheckIndex(dir); - }); - + expectThrows( + LockObtainFailedException.class, + () -> { + new CheckIndex(dir); + }); + iw.close(); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java b/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java index 418e5e69f68..dd26a5905ff 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java @@ -21,7 +21,6 @@ import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import java.util.Random; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -37,11 +36,12 @@ import org.apache.lucene.store.Directory; /** Helper functions for tests that handles documents */ public class DocHelper { - + public static final FieldType customType; public static final String FIELD_1_TEXT = "field one text"; public static final String TEXT_FIELD_1_KEY = "textField1"; public static Field textField1; + static { customType = new FieldType(TextField.TYPE_STORED); textField1 = new Field(TEXT_FIELD_1_KEY, FIELD_1_TEXT, customType); @@ -49,10 +49,11 @@ public class DocHelper { public static final FieldType customType2; public static final String FIELD_2_TEXT = "field field field two text"; - //Fields will be lexicographically sorted. So, the order is: field, text, two - public static final int [] FIELD_2_FREQS = {3, 1, 1}; + // Fields will be lexicographically sorted. So, the order is: field, text, two + public static final int[] FIELD_2_FREQS = {3, 1, 1}; public static final String TEXT_FIELD_2_KEY = "textField2"; public static Field textField2; + static { customType2 = new FieldType(TextField.TYPE_STORED); customType2.setStoreTermVectors(true); @@ -60,12 +61,12 @@ public class DocHelper { customType2.setStoreTermVectorOffsets(true); textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, customType2); } - + public static final FieldType customType3; public static final String FIELD_3_TEXT = "aaaNoNorms aaaNoNorms bbbNoNorms"; public static final String TEXT_FIELD_3_KEY = "textField3"; public static Field textField3; - + static { customType3 = new FieldType(TextField.TYPE_STORED); customType3.setOmitNorms(true); @@ -75,6 +76,7 @@ public class DocHelper { public static final String KEYWORD_TEXT = "Keyword"; public static final String KEYWORD_FIELD_KEY = "keyField"; public static Field keyField; + static { keyField = new StringField(KEYWORD_FIELD_KEY, KEYWORD_TEXT, Field.Store.YES); } @@ -83,6 +85,7 @@ public class DocHelper { public static final String NO_NORMS_TEXT = "omitNormsText"; public static final String NO_NORMS_KEY = "omitNorms"; public static Field noNormsField; + static { customType5 = new FieldType(TextField.TYPE_STORED); customType5.setOmitNorms(true); @@ -94,6 +97,7 @@ public class DocHelper { public static final String NO_TF_TEXT = "analyzed with no tf and positions"; public static final String NO_TF_KEY = "omitTermFreqAndPositions"; public static Field noTFField; + static { customType6 = new FieldType(TextField.TYPE_STORED); customType6.setIndexOptions(IndexOptions.DOCS); @@ -104,21 +108,23 @@ public class DocHelper { public static final String UNINDEXED_FIELD_TEXT = "unindexed field text"; public static final String UNINDEXED_FIELD_KEY = "unIndField"; public static Field unIndField; + static { customType7 = new FieldType(); customType7.setStored(true); unIndField = new Field(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT, customType7); } - public static final String UNSTORED_1_FIELD_TEXT = "unstored field text"; public static final String UNSTORED_FIELD_1_KEY = "unStoredField1"; - public static Field unStoredField1 = new TextField(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT, Field.Store.NO); + public static Field unStoredField1 = + new TextField(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT, Field.Store.NO); public static final FieldType customType8; public static final String UNSTORED_2_FIELD_TEXT = "unstored field text"; public static final String UNSTORED_FIELD_2_KEY = "unStoredField2"; public static Field unStoredField2; + static { customType8 = new FieldType(TextField.TYPE_NOT_STORED); customType8.setStoreTermVectors(true); @@ -126,101 +132,98 @@ public class DocHelper { } public static final String LAZY_FIELD_BINARY_KEY = "lazyFieldBinary"; - public static byte [] LAZY_FIELD_BINARY_BYTES; + public static byte[] LAZY_FIELD_BINARY_BYTES; public static Field lazyFieldBinary; public static final String LAZY_FIELD_KEY = "lazyField"; public static final String LAZY_FIELD_TEXT = "These are some field bytes"; public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, customType); - + public static final String LARGE_LAZY_FIELD_KEY = "largeLazyField"; public static String LARGE_LAZY_FIELD_TEXT; public static Field largeLazyField; - - //From Issue 509 + + // From Issue 509 public static final String FIELD_UTF1_TEXT = "field one \u4e00text"; public static final String TEXT_FIELD_UTF1_KEY = "textField1Utf8"; public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT, customType); public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text"; - //Fields will be lexicographically sorted. So, the order is: field, text, two - public static final int [] FIELD_UTF2_FREQS = {3, 1, 1}; + // Fields will be lexicographically sorted. So, the order is: field, text, two + public static final int[] FIELD_UTF2_FREQS = {3, 1, 1}; public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8"; public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, customType2); - - - - - public static Map nameValues = null; + + public static Map nameValues = null; // ordered list of all the fields... // could use LinkedHashMap for this purpose if Java1.4 is OK - public static Field[] fields = new Field[] { - textField1, - textField2, - textField3, - keyField, - noNormsField, - noTFField, - unIndField, - unStoredField1, - unStoredField2, - textUtfField1, - textUtfField2, - lazyField, - lazyFieldBinary,//placeholder for binary field, since this is null. It must be second to last. - largeLazyField//placeholder for large field, since this is null. It must always be last - }; + public static Field[] fields = + new Field[] { + textField1, + textField2, + textField3, + keyField, + noNormsField, + noTFField, + unIndField, + unStoredField1, + unStoredField2, + textUtfField1, + textUtfField2, + lazyField, + // placeholder for binary field, since this is null. It must be second to last. + lazyFieldBinary, + // placeholder for large field, since this is null. It must always be last + largeLazyField + }; - public static Map all =new HashMap<>(); - public static Map indexed =new HashMap<>(); - public static Map stored =new HashMap<>(); - public static Map unstored=new HashMap<>(); - public static Map unindexed=new HashMap<>(); - public static Map termvector=new HashMap<>(); - public static Map notermvector=new HashMap<>(); - public static Map lazy= new HashMap<>(); - public static Map noNorms=new HashMap<>(); - public static Map noTf=new HashMap<>(); + public static Map all = new HashMap<>(); + public static Map indexed = new HashMap<>(); + public static Map stored = new HashMap<>(); + public static Map unstored = new HashMap<>(); + public static Map unindexed = new HashMap<>(); + public static Map termvector = new HashMap<>(); + public static Map notermvector = new HashMap<>(); + public static Map lazy = new HashMap<>(); + public static Map noNorms = new HashMap<>(); + public static Map noTf = new HashMap<>(); static { - //Initialize the large Lazy Field + // Initialize the large Lazy Field StringBuilder buffer = new StringBuilder(); - for (int i = 0; i < 10000; i++) - { + for (int i = 0; i < 10000; i++) { buffer.append("Lazily loading lengths of language in lieu of laughing "); } - + LAZY_FIELD_BINARY_BYTES = "These are some binary field bytes".getBytes(StandardCharsets.UTF_8); lazyFieldBinary = new StoredField(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES); fields[fields.length - 2] = lazyFieldBinary; LARGE_LAZY_FIELD_TEXT = buffer.toString(); largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, customType); fields[fields.length - 1] = largeLazyField; - for (int i=0; i map, IndexableField field) { + private static void add(Map map, IndexableField field) { map.put(field.name(), field); } - - static - { + static { nameValues = new HashMap<>(); nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT); nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT); @@ -236,37 +239,45 @@ public class DocHelper { nameValues.put(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT); nameValues.put(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT); nameValues.put(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT); - } - - /** - * Adds the fields above to a document - * @param doc The document to write - */ - public static void setupDoc(Document doc) { - for (int i=0; i fields; private final boolean negate; private final FieldInfos fieldInfos; @@ -46,7 +45,7 @@ public final class FieldFilterLeafReader extends FilterLeafReader { } fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()])); } - + boolean hasField(String field) { return negate ^ fields.contains(field); } @@ -70,42 +69,45 @@ public final class FieldFilterLeafReader extends FilterLeafReader { @Override public void document(final int docID, final StoredFieldVisitor visitor) throws IOException { - super.document(docID, new StoredFieldVisitor() { - @Override - public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { - visitor.binaryField(fieldInfo, value); - } + super.document( + docID, + new StoredFieldVisitor() { + @Override + public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { + visitor.binaryField(fieldInfo, value); + } - @Override - public void stringField(FieldInfo fieldInfo, String value) throws IOException { - visitor.stringField(fieldInfo, Objects.requireNonNull(value, "String value should not be null")); - } + @Override + public void stringField(FieldInfo fieldInfo, String value) throws IOException { + visitor.stringField( + fieldInfo, Objects.requireNonNull(value, "String value should not be null")); + } - @Override - public void intField(FieldInfo fieldInfo, int value) throws IOException { - visitor.intField(fieldInfo, value); - } + @Override + public void intField(FieldInfo fieldInfo, int value) throws IOException { + visitor.intField(fieldInfo, value); + } - @Override - public void longField(FieldInfo fieldInfo, long value) throws IOException { - visitor.longField(fieldInfo, value); - } + @Override + public void longField(FieldInfo fieldInfo, long value) throws IOException { + visitor.longField(fieldInfo, value); + } - @Override - public void floatField(FieldInfo fieldInfo, float value) throws IOException { - visitor.floatField(fieldInfo, value); - } + @Override + public void floatField(FieldInfo fieldInfo, float value) throws IOException { + visitor.floatField(fieldInfo, value); + } - @Override - public void doubleField(FieldInfo fieldInfo, double value) throws IOException { - visitor.doubleField(fieldInfo, value); - } + @Override + public void doubleField(FieldInfo fieldInfo, double value) throws IOException { + visitor.doubleField(fieldInfo, value); + } - @Override - public Status needsField(FieldInfo fieldInfo) throws IOException { - return hasField(fieldInfo.name) ? visitor.needsField(fieldInfo) : Status.NO; - } - }); + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + return hasField(fieldInfo.name) ? visitor.needsField(fieldInfo) : Status.NO; + } + }); } @Override @@ -122,12 +124,12 @@ public final class FieldFilterLeafReader extends FilterLeafReader { public SortedDocValues getSortedDocValues(String field) throws IOException { return hasField(field) ? super.getSortedDocValues(field) : null; } - + @Override public SortedNumericDocValues getSortedNumericDocValues(String field) throws IOException { return hasField(field) ? super.getSortedNumericDocValues(field) : null; } - + @Override public SortedSetDocValues getSortedSetDocValues(String field) throws IOException { return hasField(field) ? super.getSortedSetDocValues(field) : null; @@ -172,7 +174,6 @@ public final class FieldFilterLeafReader extends FilterLeafReader { public Terms terms(String field) throws IOException { return hasField(field) ? super.terms(field) : null; } - } @Override @@ -184,5 +185,4 @@ public final class FieldFilterLeafReader extends FilterLeafReader { public CacheHelper getReaderCacheHelper() { return null; } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/ForceMergePolicy.java b/lucene/test-framework/src/java/org/apache/lucene/index/ForceMergePolicy.java index 7519b628916..d0460c0c0f5 100755 --- a/lucene/test-framework/src/java/org/apache/lucene/index/ForceMergePolicy.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/ForceMergePolicy.java @@ -20,9 +20,9 @@ import java.io.IOException; /** * A {@link MergePolicy} that only returns forced merges. - *

    - * NOTE: Use this policy if you wish to disallow background merges but wish to run optimize/forceMerge segment - * merges. + * + *

    NOTE: Use this policy if you wish to disallow background merges but wish to run + * optimize/forceMerge segment merges. * * @lucene.experimental */ @@ -34,9 +34,9 @@ public final class ForceMergePolicy extends FilterMergePolicy { } @Override - public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext) + public MergeSpecification findMerges( + MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException { return null; } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/IndexWriterMaxDocsChanger.java b/lucene/test-framework/src/java/org/apache/lucene/index/IndexWriterMaxDocsChanger.java index 8dec9893483..8a3497174e1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/IndexWriterMaxDocsChanger.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/IndexWriterMaxDocsChanger.java @@ -20,27 +20,29 @@ import org.apache.lucene.util.LuceneTestCase; /** * Accessor to make some package protected methods in {@link IndexWriter} available for testing. + * * @lucene.internal */ -public final class IndexWriterMaxDocsChanger { - +public final class IndexWriterMaxDocsChanger { + private IndexWriterMaxDocsChanger() {} - + /** - * Tells {@link IndexWriter} to enforce the specified limit as the maximum number of documents in one index; call - * {@link #restoreMaxDocs} once your test is done. + * Tells {@link IndexWriter} to enforce the specified limit as the maximum number of documents in + * one index; call {@link #restoreMaxDocs} once your test is done. + * * @see LuceneTestCase#setIndexWriterMaxDocs(int) */ public static void setMaxDocs(int limit) { IndexWriter.setMaxDocs(limit); } - /** + /** * Returns to the default {@link IndexWriter#MAX_DOCS} limit. + * * @see LuceneTestCase#restoreIndexWriterMaxDocs() */ public static void restoreMaxDocs() { IndexWriter.setMaxDocs(IndexWriter.MAX_DOCS); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MergingCodecReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/MergingCodecReader.java index 41c80ad823a..f94c7e8115e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/MergingCodecReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/MergingCodecReader.java @@ -21,28 +21,30 @@ import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.util.CloseableThreadLocal; /** - * {@link CodecReader} wrapper that performs all reads using the merging - * instance of the index formats. + * {@link CodecReader} wrapper that performs all reads using the merging instance of the index + * formats. */ public class MergingCodecReader extends FilterCodecReader { - private final CloseableThreadLocal fieldsReader = new CloseableThreadLocal() { - @Override - protected StoredFieldsReader initialValue() { - return in.getFieldsReader().getMergeInstance(); - } - }; - private final CloseableThreadLocal normsReader = new CloseableThreadLocal() { - @Override - protected NormsProducer initialValue() { - NormsProducer norms = in.getNormsReader(); - if (norms == null) { - return null; - } else { - return norms.getMergeInstance(); - } - } - }; + private final CloseableThreadLocal fieldsReader = + new CloseableThreadLocal() { + @Override + protected StoredFieldsReader initialValue() { + return in.getFieldsReader().getMergeInstance(); + } + }; + private final CloseableThreadLocal normsReader = + new CloseableThreadLocal() { + @Override + protected NormsProducer initialValue() { + NormsProducer norms = in.getNormsReader(); + if (norms == null) { + return null; + } else { + return norms.getMergeInstance(); + } + } + }; // TODO: other formats too /** Wrap the given instance. */ @@ -71,5 +73,4 @@ public class MergingCodecReader extends FilterCodecReader { // same content, we can delegate return in.getReaderCacheHelper(); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MergingDirectoryReaderWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/index/MergingDirectoryReaderWrapper.java index d587bcd6ea2..3950c3dff69 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/MergingDirectoryReaderWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/MergingDirectoryReaderWrapper.java @@ -19,21 +19,22 @@ package org.apache.lucene.index; import java.io.IOException; /** - * {@link DirectoryReader} wrapper that uses the merge instances of the wrapped - * {@link CodecReader}s. - * NOTE: This class will fail to work if the leaves of the wrapped directory are - * not codec readers. + * {@link DirectoryReader} wrapper that uses the merge instances of the wrapped {@link + * CodecReader}s. NOTE: This class will fail to work if the leaves of the wrapped directory are not + * codec readers. */ public final class MergingDirectoryReaderWrapper extends FilterDirectoryReader { /** Wrap the given directory. */ public MergingDirectoryReaderWrapper(DirectoryReader in) throws IOException { - super(in, new SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader reader) { - return new MergingCodecReader((CodecReader) reader); - } - }); + super( + in, + new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + return new MergingCodecReader((CodecReader) reader); + } + }); } @Override @@ -46,5 +47,4 @@ public final class MergingDirectoryReaderWrapper extends FilterDirectoryReader { // doesn't change the content: can delegate return in.getReaderCacheHelper(); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedDirectoryReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedDirectoryReader.java index 7fb15810a79..d401f004e8f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedDirectoryReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedDirectoryReader.java @@ -19,19 +19,16 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Random; -/** - * A {@link DirectoryReader} that wraps all its subreaders with - * {@link MismatchedLeafReader} - */ +/** A {@link DirectoryReader} that wraps all its subreaders with {@link MismatchedLeafReader} */ public class MismatchedDirectoryReader extends FilterDirectoryReader { static class MismatchedSubReaderWrapper extends SubReaderWrapper { final Random random; - + MismatchedSubReaderWrapper(Random random) { this.random = random; } - + @Override public LeafReader wrap(LeafReader reader) { return new MismatchedLeafReader(reader, random); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java index 3fd866b6961..cced27df37f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java @@ -24,18 +24,18 @@ import java.util.Objects; import java.util.Random; /** - * Shuffles field numbers around to try to trip bugs where field numbers - * are assumed to always be consistent across segments. + * Shuffles field numbers around to try to trip bugs where field numbers are assumed to always be + * consistent across segments. */ public class MismatchedLeafReader extends FilterLeafReader { final FieldInfos shuffled; - + /** Creates a new reader which will renumber fields in {@code in} */ public MismatchedLeafReader(LeafReader in, Random random) { super(in); shuffled = shuffleInfos(in.getFieldInfos(), random); } - + @Override public FieldInfos getFieldInfos() { return shuffled; @@ -63,41 +63,41 @@ public class MismatchedLeafReader extends FilterLeafReader { shuffled.add(info); } Collections.shuffle(shuffled, random); - + // now renumber: for (int i = 0; i < shuffled.size(); i++) { FieldInfo oldInfo = shuffled.get(i); // TODO: should we introduce "gaps" too? - FieldInfo newInfo = new FieldInfo(oldInfo.name, // name - i, // number - oldInfo.hasVectors(), // storeTermVector - oldInfo.omitsNorms(), // omitNorms - oldInfo.hasPayloads(), // storePayloads - oldInfo.getIndexOptions(), // indexOptions - oldInfo.getDocValuesType(), // docValuesType - oldInfo.getDocValuesGen(), // dvGen - oldInfo.attributes(), // attributes - oldInfo.getPointDimensionCount(), // data dimension count - oldInfo.getPointIndexDimensionCount(), // index dimension count - oldInfo.getPointNumBytes(), // dimension numBytes - oldInfo.getVectorDimension(), // number of dimensions of the field's vector - oldInfo.getVectorSearchStrategy(), // distance function for calculating similarity of the field's vector - oldInfo.isSoftDeletesField()); // used as soft-deletes field + FieldInfo newInfo = + new FieldInfo( + oldInfo.name, // name + i, // number + oldInfo.hasVectors(), // storeTermVector + oldInfo.omitsNorms(), // omitNorms + oldInfo.hasPayloads(), // storePayloads + oldInfo.getIndexOptions(), // indexOptions + oldInfo.getDocValuesType(), // docValuesType + oldInfo.getDocValuesGen(), // dvGen + oldInfo.attributes(), // attributes + oldInfo.getPointDimensionCount(), // data dimension count + oldInfo.getPointIndexDimensionCount(), // index dimension count + oldInfo.getPointNumBytes(), // dimension numBytes + oldInfo.getVectorDimension(), // number of dimensions of the field's vector + // distance function for calculating similarity of the field's vector + oldInfo.getVectorSearchStrategy(), + oldInfo.isSoftDeletesField()); // used as soft-deletes field shuffled.set(i, newInfo); } - + return new FieldInfos(shuffled.toArray(new FieldInfo[shuffled.size()])); } - - /** - * StoredFieldsVisitor that remaps actual field numbers - * to our new shuffled ones. - */ + + /** StoredFieldsVisitor that remaps actual field numbers to our new shuffled ones. */ // TODO: its strange this part of our IR api exposes FieldInfo, // no other "user-accessible" codec apis do this? class MismatchedVisitor extends StoredFieldVisitor { final StoredFieldVisitor in; - + MismatchedVisitor(StoredFieldVisitor in) { this.in = in; } @@ -109,7 +109,8 @@ public class MismatchedLeafReader extends FilterLeafReader { @Override public void stringField(FieldInfo fieldInfo, String value) throws IOException { - in.stringField(renumber(fieldInfo), Objects.requireNonNull(value, "String value should not be null")); + in.stringField( + renumber(fieldInfo), Objects.requireNonNull(value, "String value should not be null")); } @Override @@ -136,7 +137,7 @@ public class MismatchedLeafReader extends FilterLeafReader { public Status needsField(FieldInfo fieldInfo) throws IOException { return in.needsField(renumber(fieldInfo)); } - + FieldInfo renumber(FieldInfo original) { FieldInfo renumbered = shuffled.fieldInfo(original.name); if (renumbered == null) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java b/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java index 92ffc732a29..3549bf30141 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java @@ -23,13 +23,10 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; - import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; -/** - * MergePolicy that makes random decisions for testing. - */ +/** MergePolicy that makes random decisions for testing. */ public class MockRandomMergePolicy extends MergePolicy { private final Random random; boolean doNonBulkMerges = true; @@ -39,24 +36,25 @@ public class MockRandomMergePolicy extends MergePolicy { // unpredictably from threads: this.random = new Random(random.nextLong()); } - - /** - * Set to true if sometimes readers to be merged should be wrapped in a FilterReader - * to mixup bulk merging. + + /** + * Set to true if sometimes readers to be merged should be wrapped in a FilterReader to mixup bulk + * merging. */ public void setDoNonBulkMerges(boolean v) { doNonBulkMerges = v; } @Override - public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext) { + public MergeSpecification findMerges( + MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext) { MergeSpecification mergeSpec = null; - //System.out.println("MRMP: findMerges sis=" + segmentInfos); + // System.out.println("MRMP: findMerges sis=" + segmentInfos); List segments = new ArrayList<>(); final Set merging = mergeContext.getMergingSegments(); - for(SegmentCommitInfo sipc : segmentInfos) { + for (SegmentCommitInfo sipc : segmentInfos) { if (!merging.contains(sipc)) { segments.add(sipc); } @@ -72,7 +70,7 @@ public class MockRandomMergePolicy extends MergePolicy { mergeSpec = new MergeSpecification(); final int segsToMerge = TestUtil.nextInt(random, 1, numSegments); if (doNonBulkMerges && random.nextBoolean()) { - mergeSpec.add(new MockRandomOneMerge(segments.subList(0, segsToMerge),random.nextLong())); + mergeSpec.add(new MockRandomOneMerge(segments.subList(0, segsToMerge), random.nextLong())); } else { mergeSpec.add(new OneMerge(segments.subList(0, segsToMerge))); } @@ -83,39 +81,46 @@ public class MockRandomMergePolicy extends MergePolicy { @Override public MergeSpecification findForcedMerges( - SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToMerge, MergeContext mergeContext) - throws IOException { + SegmentInfos segmentInfos, + int maxSegmentCount, + Map segmentsToMerge, + MergeContext mergeContext) + throws IOException { final List eligibleSegments = new ArrayList<>(); - for(SegmentCommitInfo info : segmentInfos) { + for (SegmentCommitInfo info : segmentInfos) { if (segmentsToMerge.containsKey(info)) { eligibleSegments.add(info); } } - //System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments); + // System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments); MergeSpecification mergeSpec = null; - if (eligibleSegments.size() > 1 || (eligibleSegments.size() == 1 && isMerged(segmentInfos, eligibleSegments.get(0), mergeContext) == false)) { + if (eligibleSegments.size() > 1 + || (eligibleSegments.size() == 1 + && isMerged(segmentInfos, eligibleSegments.get(0), mergeContext) == false)) { mergeSpec = new MergeSpecification(); // Already shuffled having come out of a set but // shuffle again for good measure: Collections.shuffle(eligibleSegments, random); int upto = 0; - while(upto < eligibleSegments.size()) { - int max = Math.min(10, eligibleSegments.size()-upto); + while (upto < eligibleSegments.size()) { + int max = Math.min(10, eligibleSegments.size() - upto); int inc = max <= 2 ? max : TestUtil.nextInt(random, 2, max); if (doNonBulkMerges && random.nextBoolean()) { - mergeSpec.add(new MockRandomOneMerge(eligibleSegments.subList(upto, upto+inc), random.nextLong())); + mergeSpec.add( + new MockRandomOneMerge( + eligibleSegments.subList(upto, upto + inc), random.nextLong())); } else { - mergeSpec.add(new OneMerge(eligibleSegments.subList(upto, upto+inc))); + mergeSpec.add(new OneMerge(eligibleSegments.subList(upto, upto + inc))); } upto += inc; } } if (mergeSpec != null) { - for(OneMerge merge : mergeSpec.merges) { - for(SegmentCommitInfo info : merge.segments) { + for (OneMerge merge : mergeSpec.merges) { + for (SegmentCommitInfo info : merge.segments) { assert segmentsToMerge.containsKey(info); } } @@ -124,12 +129,15 @@ public class MockRandomMergePolicy extends MergePolicy { } @Override - public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException { + public MergeSpecification findForcedDeletesMerges( + SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException { return findMerges(null, segmentInfos, mergeContext); } @Override - public MergeSpecification findFullFlushMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException { + public MergeSpecification findFullFlushMerges( + MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext) + throws IOException { MergeSpecification mergeSpecification = findMerges(null, segmentInfos, mergeContext); if (mergeSpecification == null) { return null; @@ -161,11 +169,13 @@ public class MockRandomMergePolicy extends MergePolicy { } @Override - public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, MergeContext mergeContext) throws IOException { + public boolean useCompoundFile( + SegmentInfos infos, SegmentCommitInfo mergedInfo, MergeContext mergeContext) + throws IOException { // 80% of the time we create CFS: return random.nextInt(5) != 1; } - + static class MockRandomOneMerge extends OneMerge { final Random r; @@ -184,29 +194,35 @@ public class MockRandomMergePolicy extends MergePolicy { if (thingToDo == 0) { // simple no-op FilterReader if (LuceneTestCase.VERBOSE) { - System.out.println("NOTE: MockRandomMergePolicy now swaps in a SlowCodecReaderWrapper for merging reader=" + reader); + System.out.println( + "NOTE: MockRandomMergePolicy now swaps in a SlowCodecReaderWrapper for merging reader=" + + reader); } - return SlowCodecReaderWrapper.wrap(new FilterLeafReader(new MergeReaderWrapper(reader)) { + return SlowCodecReaderWrapper.wrap( + new FilterLeafReader(new MergeReaderWrapper(reader)) { - @Override - public CacheHelper getCoreCacheHelper() { - return in.getCoreCacheHelper(); - } + @Override + public CacheHelper getCoreCacheHelper() { + return in.getCoreCacheHelper(); + } - @Override - public CacheHelper getReaderCacheHelper() { - return in.getReaderCacheHelper(); - } - }); + @Override + public CacheHelper getReaderCacheHelper() { + return in.getReaderCacheHelper(); + } + }); } else if (thingToDo == 1) { // renumber fields // NOTE: currently this only "blocks" bulk merges just by - // being a FilterReader. But it might find bugs elsewhere, + // being a FilterReader. But it might find bugs elsewhere, // and maybe the situation can be improved in the future. if (LuceneTestCase.VERBOSE) { - System.out.println("NOTE: MockRandomMergePolicy now swaps in a MismatchedLeafReader for merging reader=" + reader); + System.out.println( + "NOTE: MockRandomMergePolicy now swaps in a MismatchedLeafReader for merging reader=" + + reader); } - return SlowCodecReaderWrapper.wrap(new MismatchedLeafReader(new MergeReaderWrapper(reader), r)); + return SlowCodecReaderWrapper.wrap( + new MismatchedLeafReader(new MergeReaderWrapper(reader), r)); } else { // otherwise, reader is unchanged return reader; diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/OwnCacheKeyMultiReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/OwnCacheKeyMultiReader.java index 346a4e65bc8..9f97228bbb6 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/OwnCacheKeyMultiReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/OwnCacheKeyMultiReader.java @@ -19,32 +19,28 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; - import org.apache.lucene.util.IOUtils; -/** - * A {@link MultiReader} that has its own cache key, occasionally useful for - * testing purposes. - */ +/** A {@link MultiReader} that has its own cache key, occasionally useful for testing purposes. */ public final class OwnCacheKeyMultiReader extends MultiReader { private final Set readerClosedListeners = new CopyOnWriteArraySet<>(); - private final CacheHelper cacheHelper = new CacheHelper() { - private final CacheKey cacheKey = new CacheKey(); + private final CacheHelper cacheHelper = + new CacheHelper() { + private final CacheKey cacheKey = new CacheKey(); - @Override - public CacheKey getKey() { - return cacheKey; - } + @Override + public CacheKey getKey() { + return cacheKey; + } - @Override - public void addClosedListener(ClosedListener listener) { - ensureOpen(); - readerClosedListeners.add(listener); - } - - }; + @Override + public void addClosedListener(ClosedListener listener) { + ensureOpen(); + readerClosedListeners.add(listener); + } + }; /** Sole constructor. */ public OwnCacheKeyMultiReader(IndexReader... subReaders) throws IOException { @@ -58,9 +54,8 @@ public final class OwnCacheKeyMultiReader extends MultiReader { @Override void notifyReaderClosedListeners() throws IOException { - synchronized(readerClosedListeners) { + synchronized (readerClosedListeners) { IOUtils.applyToAll(readerClosedListeners, l -> l.onClose(cacheHelper.getKey())); } } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java b/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java index c417a886450..51ba6ca753c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java @@ -21,16 +21,15 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; - import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -/** Utility class to do efficient primary-key (only 1 doc contains the - * given term) lookups by segment, re-using the enums. This class is - * not thread safe, so it is the caller's job to create and use one - * instance of this per thread. Do not use this if a term may appear - * in more than one document! It will only return the first one it - * finds. */ +/** + * Utility class to do efficient primary-key (only 1 doc contains the given term) lookups by + * segment, re-using the enums. This class is not thread safe, so it is the caller's job to create + * and use one instance of this per thread. Do not use this if a term may appear in more than one + * document! It will only return the first one it finds. + */ public class PerThreadPKLookup { protected final TermsEnum[] termsEnums; @@ -45,12 +44,14 @@ public class PerThreadPKLookup { List leaves = new ArrayList<>(r.leaves()); // Larger segments are more likely to have the id, so we sort largest to smallest by numDocs: - Collections.sort(leaves, new Comparator() { - @Override - public int compare(LeafReaderContext c1, LeafReaderContext c2) { - return c2.reader().numDocs() - c1.reader().numDocs(); - } - }); + Collections.sort( + leaves, + new Comparator() { + @Override + public int compare(LeafReaderContext c1, LeafReaderContext c2) { + return c2.reader().numDocs() - c1.reader().numDocs(); + } + }); termsEnums = new TermsEnum[leaves.size()]; postingsEnums = new PostingsEnum[leaves.size()]; @@ -58,7 +59,7 @@ public class PerThreadPKLookup { docBases = new int[leaves.size()]; int numSegs = 0; boolean hasDeletions = false; - for(int i=0;i max? - if (Arrays.compareUnsigned(other.maxPackedValue, offset, offset + bytesPerDim, maxPackedValue, offset, offset + bytesPerDim) > 0) { + if (Arrays.compareUnsigned( + other.maxPackedValue, + offset, + offset + bytesPerDim, + maxPackedValue, + offset, + offset + bytesPerDim) + > 0) { return false; } } @@ -62,14 +78,14 @@ public class PointsStackTracker { this.numDims = numDims; this.bytesPerDim = bytesPerDim; } - + public void onCompare(byte[] minPackedValue, byte[] maxPackedValue) { Cell cell = new Cell(minPackedValue, maxPackedValue); // Pop stack: - while (stack.size() > 0 && stack.get(stack.size()-1).contains(cell) == false) { - stack.remove(stack.size()-1); - //System.out.println(" pop"); + while (stack.size() > 0 && stack.get(stack.size() - 1).contains(cell) == false) { + stack.remove(stack.size() - 1); + // System.out.println(" pop"); } // Push stack: diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java index 05cd84fbf88..171384a56c7 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; - import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PointsFormat; import org.apache.lucene.codecs.PointsReader; @@ -55,34 +54,35 @@ import org.apache.lucene.util.bkd.BKDWriter; /** * Codec that assigns per-field random postings formats. - *

    - * The same field/format assignment will happen regardless of order, - * a hash is computed up front that determines the mapping. - * This means fields can be put into things like HashSets and added to - * documents in different orders and the test will still be deterministic - * and reproducable. + * + *

    The same field/format assignment will happen regardless of order, a hash is computed up front + * that determines the mapping. This means fields can be put into things like HashSets and added to + * documents in different orders and the test will still be deterministic and reproducable. */ public class RandomCodec extends AssertingCodec { /** Shuffled list of postings formats to use for new mappings */ private List formats = new ArrayList<>(); - + /** Shuffled list of docvalues formats to use for new mappings */ private List dvFormats = new ArrayList<>(); - + /** unique set of format names this codec knows about */ public Set formatNames = new HashSet<>(); - + /** unique set of docvalues format names this codec knows about */ public Set dvFormatNames = new HashSet<>(); public final Set avoidCodecs; /** memorized field to postingsformat mappings */ - // note: we have to sync this map even though it's just for debugging/toString, - // otherwise DWPT's .toString() calls that iterate over the map can + // note: we have to sync this map even though it's just for debugging/toString, + // otherwise DWPT's .toString() calls that iterate over the map can // cause concurrentmodificationexception if indexwriter's infostream is on - private Map previousMappings = Collections.synchronizedMap(new HashMap()); - private Map previousDVMappings = Collections.synchronizedMap(new HashMap()); + private Map previousMappings = + Collections.synchronizedMap(new HashMap()); + + private Map previousDVMappings = + Collections.synchronizedMap(new HashMap()); private final int perFieldSeed; // a little messy: randomize the default codec's parameters here. @@ -95,63 +95,70 @@ public class RandomCodec extends AssertingCodec { @Override public PointsFormat pointsFormat() { - return new AssertingPointsFormat(new PointsFormat() { - @Override - public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { - - // Randomize how BKDWriter chooses its splits: - - return new Lucene86PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap) { + return new AssertingPointsFormat( + new PointsFormat() { @Override - public void writeField(FieldInfo fieldInfo, PointsReader reader) throws IOException { + public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { - PointValues values = reader.getValues(fieldInfo.name); + // Randomize how BKDWriter chooses its splits: - BKDConfig config = new BKDConfig(fieldInfo.getPointDimensionCount(), - fieldInfo.getPointIndexDimensionCount(), - fieldInfo.getPointNumBytes(), - maxPointsInLeafNode); + return new Lucene86PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap) { + @Override + public void writeField(FieldInfo fieldInfo, PointsReader reader) throws IOException { + PointValues values = reader.getValues(fieldInfo.name); - try (BKDWriter writer = new RandomlySplittingBKDWriter(writeState.segmentInfo.maxDoc(), - writeState.directory, - writeState.segmentInfo.name, - config, - maxMBSortInHeap, - values.size(), - bkdSplitRandomSeed ^ fieldInfo.name.hashCode())) { - values.intersect(new IntersectVisitor() { - @Override - public void visit(int docID) { - throw new IllegalStateException(); - } + BKDConfig config = + new BKDConfig( + fieldInfo.getPointDimensionCount(), + fieldInfo.getPointIndexDimensionCount(), + fieldInfo.getPointNumBytes(), + maxPointsInLeafNode); - public void visit(int docID, byte[] packedValue) throws IOException { - writer.add(packedValue, docID); - } + try (BKDWriter writer = + new RandomlySplittingBKDWriter( + writeState.segmentInfo.maxDoc(), + writeState.directory, + writeState.segmentInfo.name, + config, + maxMBSortInHeap, + values.size(), + bkdSplitRandomSeed ^ fieldInfo.name.hashCode())) { + values.intersect( + new IntersectVisitor() { + @Override + public void visit(int docID) { + throw new IllegalStateException(); + } - @Override - public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { - return PointValues.Relation.CELL_CROSSES_QUERY; - } - }); + public void visit(int docID, byte[] packedValue) throws IOException { + writer.add(packedValue, docID); + } - // We could have 0 points on merge since all docs with dimensional fields may be deleted: - Runnable finalizer = writer.finish(metaOut, indexOut, dataOut); - if (finalizer != null) { - metaOut.writeInt(fieldInfo.number); - finalizer.run(); + @Override + public PointValues.Relation compare( + byte[] minPackedValue, byte[] maxPackedValue) { + return PointValues.Relation.CELL_CROSSES_QUERY; + } + }); + + // We could have 0 points on merge since all docs with dimensional fields may be + // deleted: + Runnable finalizer = writer.finish(metaOut, indexOut, dataOut); + if (finalizer != null) { + metaOut.writeInt(fieldInfo.number); + finalizer.run(); + } } } + }; } - }; - } - @Override - public PointsReader fieldsReader(SegmentReadState readState) throws IOException { - return new Lucene86PointsReader(readState); - } - }); + @Override + public PointsReader fieldsReader(SegmentReadState readState) throws IOException { + return new Lucene86PointsReader(readState); + } + }); } @Override @@ -161,7 +168,7 @@ public class RandomCodec extends AssertingCodec { codec = formats.get(Math.abs(perFieldSeed ^ name.hashCode()) % formats.size()); previousMappings.put(name, codec); // Safety: - assert previousMappings.size() < 10000: "test went insane"; + assert previousMappings.size() < 10000 : "test went insane"; } return codec; } @@ -173,7 +180,7 @@ public class RandomCodec extends AssertingCodec { codec = dvFormats.get(Math.abs(perFieldSeed ^ name.hashCode()) % dvFormats.size()); previousDVMappings.put(name, codec); // Safety: - assert previousDVMappings.size() < 10000: "test went insane"; + assert previousDVMappings.size() < 10000 : "test went insane"; } return codec; } @@ -184,31 +191,41 @@ public class RandomCodec extends AssertingCodec { // TODO: make it possible to specify min/max iterms per // block via CL: int minItemsPerBlock = TestUtil.nextInt(random, 2, 100); - int maxItemsPerBlock = 2*(Math.max(2, minItemsPerBlock-1)) + random.nextInt(100); + int maxItemsPerBlock = 2 * (Math.max(2, minItemsPerBlock - 1)) + random.nextInt(100); int lowFreqCutoff = TestUtil.nextInt(random, 2, 100); maxPointsInLeafNode = TestUtil.nextInt(random, 16, 2048); - maxMBSortInHeap = 5.0 + (3*random.nextDouble()); + maxMBSortInHeap = 5.0 + (3 * random.nextDouble()); bkdSplitRandomSeed = random.nextInt(); - add(avoidCodecs, + add( + avoidCodecs, TestUtil.getDefaultPostingsFormat(minItemsPerBlock, maxItemsPerBlock), new FSTPostingsFormat(), - new DirectPostingsFormat(LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : maxItemsPerBlock), - LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : lowFreqCutoff)), - //TODO as a PostingsFormat which wraps others, we should allow TestBloomFilteredLucenePostings to be constructed - //with a choice of concrete PostingsFormats. Maybe useful to have a generic means of marking and dealing - //with such "wrapper" classes? - new TestBloomFilteredLucenePostings(), + new DirectPostingsFormat( + LuceneTestCase.rarely(random) + ? 1 + : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : maxItemsPerBlock), + LuceneTestCase.rarely(random) + ? 1 + : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : lowFreqCutoff)), + // TODO as a PostingsFormat which wraps others, we should allow + // TestBloomFilteredLucenePostings to be constructed + // with a choice of concrete PostingsFormats. Maybe useful to have a generic means of + // marking and dealing + // with such "wrapper" classes? + new TestBloomFilteredLucenePostings(), new MockRandomPostingsFormat(random), new BlockTreeOrdsPostingsFormat(minItemsPerBlock, maxItemsPerBlock), new LuceneFixedGap(TestUtil.nextInt(random, 1, 1000)), new LuceneVarGapFixedInterval(TestUtil.nextInt(random, 1, 1000)), - new LuceneVarGapDocFreqInterval(TestUtil.nextInt(random, 1, 100), TestUtil.nextInt(random, 1, 1000)), + new LuceneVarGapDocFreqInterval( + TestUtil.nextInt(random, 1, 100), TestUtil.nextInt(random, 1, 1000)), TestUtil.getDefaultPostingsFormat(), new AssertingPostingsFormat()); - - addDocValues(avoidCodecs, + + addDocValues( + avoidCodecs, TestUtil.getDefaultDocValuesFormat(), new Lucene80DocValuesFormat(Lucene80DocValuesFormat.Mode.BEST_COMPRESSION), new AssertingDocValuesFormat()); @@ -226,7 +243,7 @@ public class RandomCodec extends AssertingCodec { } public RandomCodec(Random random) { - this(random, Collections. emptySet()); + this(random, Collections.emptySet()); } private final void add(Set avoidCodecs, PostingsFormat... postings) { @@ -237,7 +254,7 @@ public class RandomCodec extends AssertingCodec { } } } - + private final void addDocValues(Set avoidCodecs, DocValuesFormat... docvalues) { for (DocValuesFormat d : docvalues) { if (!avoidCodecs.contains(d.getName())) { @@ -249,20 +266,34 @@ public class RandomCodec extends AssertingCodec { @Override public String toString() { - return super.toString() + ": " + previousMappings.toString() + - ", docValues:" + previousDVMappings.toString() + - ", maxPointsInLeafNode=" + maxPointsInLeafNode + - ", maxMBSortInHeap=" + maxMBSortInHeap; + return super.toString() + + ": " + + previousMappings.toString() + + ", docValues:" + + previousDVMappings.toString() + + ", maxPointsInLeafNode=" + + maxPointsInLeafNode + + ", maxMBSortInHeap=" + + maxMBSortInHeap; } - /** Just like {@link BKDWriter} except it evilly picks random ways to split cells on - * recursion to try to provoke geo APIs that get upset at fun rectangles. */ + /** + * Just like {@link BKDWriter} except it evilly picks random ways to split cells on recursion to + * try to provoke geo APIs that get upset at fun rectangles. + */ private static class RandomlySplittingBKDWriter extends BKDWriter { final Random random; - public RandomlySplittingBKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, BKDConfig config, double maxMBSortInHeap, - long totalPointCount, int randomSeed) throws IOException { + public RandomlySplittingBKDWriter( + int maxDoc, + Directory tempDir, + String tempFileNamePrefix, + BKDConfig config, + double maxMBSortInHeap, + long totalPointCount, + int randomSeed) + throws IOException { super(maxDoc, tempDir, tempFileNamePrefix, config, maxMBSortInHeap, totalPointCount); this.random = new Random(randomSeed); } @@ -272,9 +303,13 @@ public class RandomCodec extends AssertingCodec { return singleValuePerDoc && (new Random(randomSeed).nextBoolean()); } - private static boolean getRandomLongOrds(long totalPointCount, boolean singleValuePerDoc, int randomSeed) { - // Always use long ords if we have too many points, but sometimes randomly use it anyway when singleValuePerDoc is false: - return totalPointCount > Integer.MAX_VALUE || (getRandomSingleValuePerDoc(singleValuePerDoc, randomSeed) == false && new Random(randomSeed).nextBoolean()); + private static boolean getRandomLongOrds( + long totalPointCount, boolean singleValuePerDoc, int randomSeed) { + // Always use long ords if we have too many points, but sometimes randomly use it anyway when + // singleValuePerDoc is false: + return totalPointCount > Integer.MAX_VALUE + || (getRandomSingleValuePerDoc(singleValuePerDoc, randomSeed) == false + && new Random(randomSeed).nextBoolean()); } private static long getRandomOfflineSorterBufferMB(int randomSeed) { @@ -287,7 +322,8 @@ public class RandomCodec extends AssertingCodec { @Override protected int split(byte[] minPackedValue, byte[] maxPackedValue, int[] parentDims) { - // BKD normally defaults by the widest dimension, to try to make as squarish cells as possible, but we just pick a random one ;) + // BKD normally defaults by the widest dimension, to try to make as squarish cells as + // possible, but we just pick a random one ;) return random.nextInt(config.numIndexDims); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java index 7c70480a865..acf21776313 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java @@ -23,7 +23,6 @@ import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.concurrent.CopyOnWriteArrayList; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Field; @@ -37,12 +36,11 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NullInfoStream; import org.apache.lucene.util.TestUtil; -/** Silly class that randomizes the indexing experience. EG - * it may swap in a different merge policy/scheduler; may - * commit periodically; may or may not forceMerge in the end, - * may flush by doc count instead of RAM, etc. +/** + * Silly class that randomizes the indexing experience. EG it may swap in a different merge + * policy/scheduler; may commit periodically; may or may not forceMerge in the end, may flush by doc + * count instead of RAM, etc. */ - public class RandomIndexWriter implements Closeable { public final IndexWriter w; @@ -55,24 +53,33 @@ public class RandomIndexWriter implements Closeable { private final double softDeletesRatio; private final LiveIndexWriterConfig config; - /** Returns an indexwriter that randomly mixes up thread scheduling (by yielding at test points) */ - public static IndexWriter mockIndexWriter(Directory dir, IndexWriterConfig conf, Random r) throws IOException { + /** + * Returns an indexwriter that randomly mixes up thread scheduling (by yielding at test points) + */ + public static IndexWriter mockIndexWriter(Directory dir, IndexWriterConfig conf, Random r) + throws IOException { // Randomly calls Thread.yield so we mixup thread scheduling final Random random = new Random(r.nextLong()); - return mockIndexWriter(r, dir, conf, new TestPoint() { - @Override - public void apply(String message) { - if (random.nextInt(4) == 2) - Thread.yield(); - } - }); + return mockIndexWriter( + r, + dir, + conf, + new TestPoint() { + @Override + public void apply(String message) { + if (random.nextInt(4) == 2) Thread.yield(); + } + }); } - + /** Returns an indexwriter that enables the specified test point */ - public static IndexWriter mockIndexWriter(Random r, Directory dir, IndexWriterConfig conf, TestPoint testPoint) throws IOException { + public static IndexWriter mockIndexWriter( + Random r, Directory dir, IndexWriterConfig conf, TestPoint testPoint) throws IOException { conf.setInfoStream(new TestPointInfoStream(conf.getInfoStream(), testPoint)); DirectoryReader reader = null; - if (r.nextBoolean() && DirectoryReader.indexExists(dir) && conf.getOpenMode() != IndexWriterConfig.OpenMode.CREATE) { + if (r.nextBoolean() + && DirectoryReader.indexExists(dir) + && conf.getOpenMode() != IndexWriterConfig.OpenMode.CREATE) { if (LuceneTestCase.VERBOSE) { System.out.println("RIW: open writer from reader"); } @@ -83,12 +90,13 @@ public class RandomIndexWriter implements Closeable { IndexWriter iw; boolean success = false; try { - iw = new IndexWriter(dir, conf) { - @Override - protected boolean isEnableTestPoints() { - return true; - } - }; + iw = + new IndexWriter(dir, conf) { + @Override + protected boolean isEnableTestPoints() { + return true; + } + }; success = true; } finally { if (reader != null) { @@ -104,30 +112,34 @@ public class RandomIndexWriter implements Closeable { /** create a RandomIndexWriter with a random config: Uses MockAnalyzer */ public RandomIndexWriter(Random r, Directory dir) throws IOException { - this(r, dir, LuceneTestCase.newIndexWriterConfig(r, new MockAnalyzer(r)), true, r.nextBoolean()); + this( + r, dir, LuceneTestCase.newIndexWriterConfig(r, new MockAnalyzer(r)), true, r.nextBoolean()); } - + /** create a RandomIndexWriter with a random config */ public RandomIndexWriter(Random r, Directory dir, Analyzer a) throws IOException { this(r, dir, LuceneTestCase.newIndexWriterConfig(r, a)); } - + /** create a RandomIndexWriter with the provided config */ public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException { this(r, dir, c, false, r.nextBoolean()); } /** create a RandomIndexWriter with the provided config */ - public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c, boolean useSoftDeletes) throws IOException { + public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c, boolean useSoftDeletes) + throws IOException { this(r, dir, c, false, useSoftDeletes); } - - private RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c, boolean closeAnalyzer, boolean useSoftDeletes) throws IOException { + + private RandomIndexWriter( + Random r, Directory dir, IndexWriterConfig c, boolean closeAnalyzer, boolean useSoftDeletes) + throws IOException { // TODO: this should be solved in a different way; Random should not be shared (!). this.r = new Random(r.nextLong()); if (useSoftDeletes) { c.setSoftDeletesField("___soft_deletes"); - softDeletesRatio = 1.d / (double)1 + r.nextInt(10); + softDeletesRatio = 1.d / (double) 1 + r.nextInt(10); } else { softDeletesRatio = 0d; } @@ -146,10 +158,11 @@ public class RandomIndexWriter implements Closeable { // Make sure we sometimes test indices that don't get // any forced merges: doRandomForceMerge = !(c.getMergePolicy() instanceof NoMergePolicy) && r.nextBoolean(); - } - + } + /** * Adds a Document. + * * @see IndexWriter#addDocument(Iterable) */ public long addDocument(final Iterable doc) throws IOException { @@ -160,39 +173,41 @@ public class RandomIndexWriter implements Closeable { // (but we need to clone them), and only when // getReader, commit, etc. are called, we do an // addDocuments? Would be better testing. - seqNo = w.addDocuments(new Iterable>() { + seqNo = + w.addDocuments( + new Iterable>() { - @Override - public Iterator> iterator() { - return new Iterator>() { + @Override + public Iterator> iterator() { + return new Iterator>() { - boolean done; - - @Override - public boolean hasNext() { - return !done; - } + boolean done; - @Override - public void remove() { - throw new UnsupportedOperationException(); - } + @Override + public boolean hasNext() { + return !done; + } - @Override - public Iterable next() { - if (done) { - throw new IllegalStateException(); - } - done = true; - return doc; - } - }; - } - }); + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public Iterable next() { + if (done) { + throw new IllegalStateException(); + } + done = true; + return doc; + } + }; + } + }); } else { seqNo = w.addDocument(doc); } - + maybeFlushOrCommit(); return seqNo; @@ -224,29 +239,35 @@ public class RandomIndexWriter implements Closeable { private void flushAllBuffersSequentially() throws IOException { if (LuceneTestCase.VERBOSE) { - System.out.println("RIW.add/updateDocument: now flushing the largest writer at docCount=" + docCount); + System.out.println( + "RIW.add/updateDocument: now flushing the largest writer at docCount=" + docCount); } int threadPoolSize = w.docWriter.perThreadPool.size(); - int numFlushes = Math.min(1, r.nextInt(threadPoolSize+1)); + int numFlushes = Math.min(1, r.nextInt(threadPoolSize + 1)); for (int i = 0; i < numFlushes; i++) { if (w.flushNextBuffer() == false) { break; // stop once we didn't flush anything } } } - - public long addDocuments(Iterable> docs) throws IOException { + + public long addDocuments(Iterable> docs) + throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); long seqNo = w.addDocuments(docs); maybeFlushOrCommit(); return seqNo; } - public long updateDocuments(Term delTerm, Iterable> docs) throws IOException { + public long updateDocuments( + Term delTerm, Iterable> docs) + throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); long seqNo; if (useSoftDeletes()) { - seqNo = w.softUpdateDocuments(delTerm, docs, new NumericDocValuesField(config.getSoftDeletesField(), 1)); + seqNo = + w.softUpdateDocuments( + delTerm, docs, new NumericDocValuesField(config.getSoftDeletesField(), 1)); } else { seqNo = w.updateDocuments(delTerm, docs); } @@ -260,16 +281,22 @@ public class RandomIndexWriter implements Closeable { /** * Updates a document. + * * @see IndexWriter#updateDocument(Term, Iterable) */ - public long updateDocument(Term t, final Iterable doc) throws IOException { + public long updateDocument(Term t, final Iterable doc) + throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); final long seqNo; if (useSoftDeletes()) { if (r.nextInt(5) == 3) { - seqNo = w.softUpdateDocuments(t, Arrays.asList(doc), new NumericDocValuesField(config.getSoftDeletesField(), 1)); + seqNo = + w.softUpdateDocuments( + t, Arrays.asList(doc), new NumericDocValuesField(config.getSoftDeletesField(), 1)); } else { - seqNo = w.softUpdateDocument(t, doc, new NumericDocValuesField(config.getSoftDeletesField(), 1)); + seqNo = + w.softUpdateDocument( + t, doc, new NumericDocValuesField(config.getSoftDeletesField(), 1)); } } else { if (r.nextInt(5) == 3) { @@ -282,7 +309,7 @@ public class RandomIndexWriter implements Closeable { return seqNo; } - + public long addIndexes(Directory... dirs) throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); return w.addIndexes(dirs); @@ -292,22 +319,22 @@ public class RandomIndexWriter implements Closeable { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); return w.addIndexes(readers); } - + public long updateNumericDocValue(Term term, String field, Long value) throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); return w.updateNumericDocValue(term, field, value); } - + public long updateBinaryDocValue(Term term, String field, BytesRef value) throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); return w.updateBinaryDocValue(term, field, value); } - + public long updateDocValues(Term term, Field... updates) throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); return w.updateDocValues(term, updates); } - + public long deleteDocuments(Term term) throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); return w.deleteDocuments(term); @@ -321,18 +348,20 @@ public class RandomIndexWriter implements Closeable { public long commit() throws IOException { return commit(r.nextInt(10) == 0); } - + public long commit(boolean flushConcurrently) throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); if (flushConcurrently) { List throwableList = new CopyOnWriteArrayList<>(); - Thread thread = new Thread(() -> { - try { - flushAllBuffersSequentially(); - } catch (Throwable e) { - throwableList.add(e); - } - }); + Thread thread = + new Thread( + () -> { + try { + flushAllBuffersSequentially(); + } catch (Throwable e) { + throwableList.add(e); + } + }); thread.start(); try { return w.commit(); @@ -353,14 +382,13 @@ public class RandomIndexWriter implements Closeable { primary.addSuppressed(throwableList.get(i)); } if (primary instanceof IOException) { - throw (IOException)primary; + throw (IOException) primary; } else if (primary instanceof RuntimeException) { - throw (RuntimeException)primary; + throw (RuntimeException) primary; } else { throw new AssertionError(primary); } } - } return w.commit(); } @@ -416,7 +444,8 @@ public class RandomIndexWriter implements Closeable { } w.forceMerge(limit); if (limit == 1 || (config.getMergePolicy() instanceof TieredMergePolicy) == false) { - assert !doRandomForceMergeAssert || w.getSegmentCount() <= limit : "limit=" + limit + " actual=" + w.getSegmentCount(); + assert !doRandomForceMergeAssert || w.getSegmentCount() <= limit + : "limit=" + limit + " actual=" + w.getSegmentCount(); } } else { if (LuceneTestCase.VERBOSE) { @@ -427,7 +456,8 @@ public class RandomIndexWriter implements Closeable { } } - public DirectoryReader getReader(boolean applyDeletions, boolean writeAllDeletes) throws IOException { + public DirectoryReader getReader(boolean applyDeletions, boolean writeAllDeletes) + throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); getReaderCalled = true; if (r.nextInt(20) == 2) { @@ -462,6 +492,7 @@ public class RandomIndexWriter implements Closeable { /** * Close this writer. + * * @see IndexWriter#close() */ @Override @@ -476,7 +507,8 @@ public class RandomIndexWriter implements Closeable { if (getReaderCalled == false && r.nextInt(8) == 2 && w.isClosed() == false) { doRandomForceMerge(); if (config.getCommitOnClose() == false) { - // index may have changed, must commit the changes, or otherwise they are discarded by the call to close() + // index may have changed, must commit the changes, or otherwise they are discarded by the + // call to close() w.commit(); } } @@ -492,22 +524,23 @@ public class RandomIndexWriter implements Closeable { /** * Forces a forceMerge. - *

    - * NOTE: this should be avoided in tests unless absolutely necessary, - * as it will result in less test coverage. + * + *

    NOTE: this should be avoided in tests unless absolutely necessary, as it will result in less + * test coverage. + * * @see IndexWriter#forceMerge(int) */ public void forceMerge(int maxSegmentCount) throws IOException { LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config); w.forceMerge(maxSegmentCount); } - + static final class TestPointInfoStream extends InfoStream { private final InfoStream delegate; private final TestPoint testPoint; - + public TestPointInfoStream(InfoStream delegate, TestPoint testPoint) { - this.delegate = delegate == null ? new NullInfoStream(): delegate; + this.delegate = delegate == null ? new NullInfoStream() : delegate; this.testPoint = testPoint; } @@ -525,13 +558,13 @@ public class RandomIndexWriter implements Closeable { delegate.message(component, message); } } - + @Override public boolean isEnabled(String component) { return "TP".equals(component) || delegate.isEnabled(component); } } - + /** Writes all in-memory segments to the {@link Directory}. */ public final void flush() throws IOException { w.flush(); @@ -539,7 +572,8 @@ public class RandomIndexWriter implements Closeable { /** * Simple interface that is executed for each TP {@link InfoStream} component - * message. See also {@link RandomIndexWriter#mockIndexWriter(Random, Directory, IndexWriterConfig, TestPoint)} + * message. See also {@link RandomIndexWriter#mockIndexWriter(Random, Directory, + * IndexWriterConfig, TestPoint)} */ public interface TestPoint { void apply(String message); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java index b420e7f251d..cc8a7a78518 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java @@ -41,7 +41,6 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.function.IntToLongFunction; import java.util.stream.Collectors; - import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; @@ -99,7 +98,7 @@ public class RandomPostingsTester { private long totalPayloadBytes; // Holds all postings: - private Map> fields; + private Map> fields; private FieldInfos fieldInfos; @@ -127,13 +126,26 @@ public class RandomPostingsTester { continue; } - fieldInfoArray[fieldUpto] = new FieldInfo(field, fieldUpto, false, false, true, - IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, - DocValuesType.NONE, -1, new HashMap<>(), - 0, 0, 0, 0, VectorValues.SearchStrategy.NONE, false); + fieldInfoArray[fieldUpto] = + new FieldInfo( + field, + fieldUpto, + false, + false, + true, + IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, + DocValuesType.NONE, + -1, + new HashMap<>(), + 0, + 0, + 0, + 0, + VectorValues.SearchStrategy.NONE, + false); fieldUpto++; - SortedMap postings = new TreeMap<>(); + SortedMap postings = new TreeMap<>(); fields.put(field, postings); Set seenTerms = new HashSet<>(); @@ -146,7 +158,8 @@ public class RandomPostingsTester { while (postings.size() < numTerms) { int termUpto = postings.size(); - // Cannot contain surrogates else default Java string sort order (by UTF16 code unit) is different from Lucene: + // Cannot contain surrogates else default Java string sort order (by UTF16 code unit) is + // different from Lucene: String term = TestUtil.randomSimpleString(random); if (seenTerms.contains(term)) { continue; @@ -175,7 +188,7 @@ public class RandomPostingsTester { PostingsEnum postingsEnum = getSeedPostings(term, termSeed, IndexOptions.DOCS, true); int doc; int lastDoc = 0; - while((doc = postingsEnum.nextDoc()) != PostingsEnum.NO_MORE_DOCS) { + while ((doc = postingsEnum.nextDoc()) != PostingsEnum.NO_MORE_DOCS) { lastDoc = doc; } maxDoc = Math.max(lastDoc, maxDoc); @@ -183,7 +196,7 @@ public class RandomPostingsTester { // assign ords long ord = 0; - for(SeedAndOrd ent : postings.values()) { + for (SeedAndOrd ent : postings.values()) { ent.ord = ord++; } } @@ -194,20 +207,26 @@ public class RandomPostingsTester { maxDoc++; allTerms = new ArrayList<>(); - for(Map.Entry> fieldEnt : fields.entrySet()) { + for (Map.Entry> fieldEnt : fields.entrySet()) { String field = fieldEnt.getKey(); long ord = 0; - for(Map.Entry termEnt : fieldEnt.getValue().entrySet()) { + for (Map.Entry termEnt : fieldEnt.getValue().entrySet()) { allTerms.add(new FieldAndTerm(field, termEnt.getKey(), ord++)); } } if (LuceneTestCase.VERBOSE) { - System.out.println("TEST: done init postings; " + allTerms.size() + " total terms, across " + fieldInfos.size() + " fields"); + System.out.println( + "TEST: done init postings; " + + allTerms.size() + + " total terms, across " + + fieldInfos.size() + + " fields"); } } - public static SeedPostings getSeedPostings(String term, long seed, IndexOptions options, boolean allowPayloads) { + public static SeedPostings getSeedPostings( + String term, long seed, IndexOptions options, boolean allowPayloads) { int minDocFreq, maxDocFreq; if (term.startsWith("big_")) { minDocFreq = LuceneTestCase.RANDOM_MULTIPLIER * 50000; @@ -226,8 +245,7 @@ public class RandomPostingsTester { return new SeedPostings(seed, minDocFreq, maxDocFreq, options, allowPayloads); } - /** Given the same random seed this always enumerates the - * same random postings */ + /** Given the same random seed this always enumerates the same random postings */ public static class SeedPostings extends PostingsEnum { // Used only to generate docIDs; this way if you pull w/ // or w/o positions you get the same docID sequence: @@ -252,7 +270,8 @@ public class RandomPostingsTester { private int posSpacing; private int posUpto; - public SeedPostings(long seed, int minDocFreq, int maxDocFreq, IndexOptions options, boolean allowPayloads) { + public SeedPostings( + long seed, int minDocFreq, int maxDocFreq, IndexOptions options, boolean allowPayloads) { random = new Random(seed); docRandom = new Random(random.nextLong()); docFreq = TestUtil.nextInt(random, minDocFreq, maxDocFreq); @@ -276,7 +295,7 @@ public class RandomPostingsTester { @Override public int nextDoc() { - while(true) { + while (true) { _nextDoc(); return docID; } @@ -287,7 +306,7 @@ public class RandomPostingsTester { docID = 0; } // Must consume random: - while(posUpto < freq) { + while (posUpto < freq) { nextPosition(); } @@ -338,7 +357,7 @@ public class RandomPostingsTester { return -1; } assert posUpto < freq; - + if (posUpto == 0 && random.nextBoolean()) { // Sometimes index pos = 0 } else if (posSpacing == 1) { @@ -350,16 +369,16 @@ public class RandomPostingsTester { if (payloadSize != 0) { if (fixedPayloads) { payload.length = payloadSize; - random.nextBytes(payload.bytes); + random.nextBytes(payload.bytes); } else { int thisPayloadSize = random.nextInt(payloadSize); if (thisPayloadSize != 0) { payload.length = payloadSize; - random.nextBytes(payload.bytes); + random.nextBytes(payload.bytes); } else { payload.length = 0; } - } + } } else { payload.length = 0; } @@ -394,11 +413,11 @@ public class RandomPostingsTester { public int advance(int target) throws IOException { return slowAdvance(target); } - + @Override public long cost() { return docFreq; - } + } } /** Holds one field, term and ord. */ @@ -424,12 +443,16 @@ public class RandomPostingsTester { } private static class SeedFields extends Fields { - final Map> fields; + final Map> fields; final FieldInfos fieldInfos; final IndexOptions maxAllowed; final boolean allowPayloads; - public SeedFields(Map> fields, FieldInfos fieldInfos, IndexOptions maxAllowed, boolean allowPayloads) { + public SeedFields( + Map> fields, + FieldInfos fieldInfos, + IndexOptions maxAllowed, + boolean allowPayloads) { this.fields = fields; this.fieldInfos = fieldInfos; this.maxAllowed = maxAllowed; @@ -443,7 +466,7 @@ public class RandomPostingsTester { @Override public Terms terms(String field) { - SortedMap terms = fields.get(field); + SortedMap terms = fields.get(field); if (terms == null) { return null; } else { @@ -458,12 +481,16 @@ public class RandomPostingsTester { } private static class SeedTerms extends Terms { - final SortedMap terms; + final SortedMap terms; final FieldInfo fieldInfo; final IndexOptions maxAllowed; final boolean allowPayloads; - public SeedTerms(SortedMap terms, FieldInfo fieldInfo, IndexOptions maxAllowed, boolean allowPayloads) { + public SeedTerms( + SortedMap terms, + FieldInfo fieldInfo, + IndexOptions maxAllowed, + boolean allowPayloads) { this.terms = terms; this.fieldInfo = fieldInfo; this.maxAllowed = maxAllowed; @@ -505,14 +532,17 @@ public class RandomPostingsTester { @Override public boolean hasOffsets() { - return fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + return fieldInfo + .getIndexOptions() + .compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) + >= 0; } - + @Override public boolean hasPositions() { return fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; } - + @Override public boolean hasPayloads() { return allowPayloads && fieldInfo.hasPayloads(); @@ -520,15 +550,16 @@ public class RandomPostingsTester { } private static class SeedTermsEnum extends BaseTermsEnum { - final SortedMap terms; + final SortedMap terms; final IndexOptions maxAllowed; final boolean allowPayloads; - private Iterator> iterator; + private Iterator> iterator; - private Map.Entry current; + private Map.Entry current; - public SeedTermsEnum(SortedMap terms, IndexOptions maxAllowed, boolean allowPayloads) { + public SeedTermsEnum( + SortedMap terms, IndexOptions maxAllowed, boolean allowPayloads) { this.terms = terms; this.maxAllowed = maxAllowed; this.allowPayloads = allowPayloads; @@ -540,7 +571,7 @@ public class RandomPostingsTester { @Override public SeekStatus seekCeil(BytesRef text) { - SortedMap tailMap = terms.tailMap(text); + SortedMap tailMap = terms.tailMap(text); if (tailMap.isEmpty()) { return SeekStatus.END; } else { @@ -595,17 +626,20 @@ public class RandomPostingsTester { if (maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { return null; } - if (PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS) && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) { + if (PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS) + && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) { return null; } if (PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS) && allowPayloads == false) { return null; } } - if (PostingsEnum.featureRequested(flags, PostingsEnum.FREQS) && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS) < 0) { + if (PostingsEnum.featureRequested(flags, PostingsEnum.FREQS) + && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS) < 0) { return null; } - return getSeedPostings(current.getKey().utf8ToString(), current.getValue().seed, maxAllowed, allowPayloads); + return getSeedPostings( + current.getKey().utf8ToString(), current.getValue().seed, maxAllowed, allowPayloads); } @Override @@ -623,8 +657,26 @@ public class RandomPostingsTester { // maxAllowed = the "highest" we can index, but we will still // randomly index at lower IndexOption - public FieldsProducer buildIndex(Codec codec, Directory dir, IndexOptions maxAllowed, boolean allowPayloads, boolean alwaysTestMax) throws IOException { - SegmentInfo segmentInfo = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "_0", maxDoc, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null); + public FieldsProducer buildIndex( + Codec codec, + Directory dir, + IndexOptions maxAllowed, + boolean allowPayloads, + boolean alwaysTestMax) + throws IOException { + SegmentInfo segmentInfo = + new SegmentInfo( + dir, + Version.LATEST, + Version.LATEST, + "_0", + maxDoc, + false, + codec, + Collections.emptyMap(), + StringHelper.randomId(), + new HashMap<>(), + null); int maxIndexOption = Arrays.asList(IndexOptions.values()).indexOf(maxAllowed); if (LuceneTestCase.VERBOSE) { @@ -634,98 +686,114 @@ public class RandomPostingsTester { // TODO use allowPayloads FieldInfo[] newFieldInfoArray = new FieldInfo[fields.size()]; - for(int fieldUpto=0;fieldUpto= 0 && allowPayloads; + IndexOptions indexOptions = + IndexOptions.values()[ + alwaysTestMax ? maxIndexOption : TestUtil.nextInt(random, 1, maxIndexOption)]; + boolean doPayloads = + indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0 && allowPayloads; - newFieldInfoArray[fieldUpto] = new FieldInfo(oldFieldInfo.name, - fieldUpto, - false, - false, - doPayloads, - indexOptions, - DocValuesType.NONE, - -1, - new HashMap<>(), - 0, 0, 0, 0, VectorValues.SearchStrategy.NONE, false); + newFieldInfoArray[fieldUpto] = + new FieldInfo( + oldFieldInfo.name, + fieldUpto, + false, + false, + doPayloads, + indexOptions, + DocValuesType.NONE, + -1, + new HashMap<>(), + 0, + 0, + 0, + 0, + VectorValues.SearchStrategy.NONE, + false); } FieldInfos newFieldInfos = new FieldInfos(newFieldInfoArray); // Estimate that flushed segment size will be 25% of // what we use in RAM: - long bytes = totalPostings * 8 + totalPayloadBytes; + long bytes = totalPostings * 8 + totalPayloadBytes; - SegmentWriteState writeState = new SegmentWriteState(null, dir, - segmentInfo, newFieldInfos, - null, new IOContext(new FlushInfo(maxDoc, bytes))); + SegmentWriteState writeState = + new SegmentWriteState( + null, + dir, + segmentInfo, + newFieldInfos, + null, + new IOContext(new FlushInfo(maxDoc, bytes))); Fields seedFields = new SeedFields(fields, newFieldInfos, maxAllowed, allowPayloads); - NormsProducer fakeNorms = new NormsProducer() { + NormsProducer fakeNorms = + new NormsProducer() { - @Override - public void close() throws IOException {} + @Override + public void close() throws IOException {} - @Override - public long ramBytesUsed() { - return 0; - } + @Override + public long ramBytesUsed() { + return 0; + } - @Override - public NumericDocValues getNorms(FieldInfo field) throws IOException { - if (newFieldInfos.fieldInfo(field.number).hasNorms()) { - return new NumericDocValues() { - - int doc = -1; - - @Override - public int nextDoc() throws IOException { - if (++doc == segmentInfo.maxDoc()) { - return doc = NO_MORE_DOCS; - } - return doc; - } - - @Override - public int docID() { - return doc; - } - - @Override - public long cost() { - return segmentInfo.maxDoc(); - } - - @Override - public int advance(int target) throws IOException { - return doc = target >= segmentInfo.maxDoc() ? DocIdSetIterator.NO_MORE_DOCS : target; - } - - @Override - public boolean advanceExact(int target) throws IOException { - doc = target; - return true; - } - - @Override - public long longValue() throws IOException { - return DOC_TO_NORM.applyAsLong(doc); - } - }; - } else { - return null; - } - } + @Override + public NumericDocValues getNorms(FieldInfo field) throws IOException { + if (newFieldInfos.fieldInfo(field.number).hasNorms()) { + return new NumericDocValues() { - @Override - public void checkIntegrity() throws IOException {} - - }; + int doc = -1; + + @Override + public int nextDoc() throws IOException { + if (++doc == segmentInfo.maxDoc()) { + return doc = NO_MORE_DOCS; + } + return doc; + } + + @Override + public int docID() { + return doc; + } + + @Override + public long cost() { + return segmentInfo.maxDoc(); + } + + @Override + public int advance(int target) throws IOException { + return doc = + target >= segmentInfo.maxDoc() ? DocIdSetIterator.NO_MORE_DOCS : target; + } + + @Override + public boolean advanceExact(int target) throws IOException { + doc = target; + return true; + } + + @Override + public long longValue() throws IOException { + return DOC_TO_NORM.applyAsLong(doc); + } + }; + } else { + return null; + } + } + + @Override + public void checkIntegrity() throws IOException {} + }; FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(writeState); boolean success = false; try { @@ -741,32 +809,33 @@ public class RandomPostingsTester { if (LuceneTestCase.VERBOSE) { System.out.println("TEST: after indexing: files="); - for(String file : dir.listAll()) { + for (String file : dir.listAll()) { System.out.println(" " + file + ": " + dir.fileLength(file) + " bytes"); } } currentFieldInfos = newFieldInfos; - SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, IOContext.READ); + SegmentReadState readState = + new SegmentReadState(dir, segmentInfo, newFieldInfos, IOContext.READ); return codec.postingsFormat().fieldsProducer(readState); } - private void verifyEnum(Random random, - ThreadState threadState, - String field, - BytesRef term, - TermsEnum termsEnum, + private void verifyEnum( + Random random, + ThreadState threadState, + String field, + BytesRef term, + TermsEnum termsEnum, - // Maximum options (docs/freqs/positions/offsets) to test: - IndexOptions maxTestOptions, + // Maximum options (docs/freqs/positions/offsets) to test: + IndexOptions maxTestOptions, + IndexOptions maxIndexOptions, + EnumSet

    All other filesystem operations are passed thru as normal. */ public class DisableFsyncFS extends FilterFileSystemProvider { - - /** - * Create a new instance, wrapping {@code delegate}. - */ + + /** Create a new instance, wrapping {@code delegate}. */ public DisableFsyncFS(FileSystem delegate) { super("disablefsync://", delegate); } @Override - public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + public FileChannel newFileChannel( + Path path, Set options, FileAttribute... attrs) throws IOException { return new FilterFileChannel(super.newFileChannel(path, options, attrs)) { @Override public void force(boolean metaData) throws IOException {} @@ -49,8 +48,14 @@ public class DisableFsyncFS extends FilterFileSystemProvider { } @Override - public AsynchronousFileChannel newAsynchronousFileChannel(Path path, Set options, ExecutorService executor, FileAttribute... attrs) throws IOException { - return new FilterAsynchronousFileChannel(super.newAsynchronousFileChannel(path, options, executor, attrs)) { + public AsynchronousFileChannel newAsynchronousFileChannel( + Path path, + Set options, + ExecutorService executor, + FileAttribute... attrs) + throws IOException { + return new FilterAsynchronousFileChannel( + super.newAsynchronousFileChannel(path, options, executor, attrs)) { @Override public void force(boolean metaData) throws IOException {} }; diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/ExtrasFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/ExtrasFS.java index f8bba9c3594..5016473849c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/ExtrasFS.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/ExtrasFS.java @@ -22,30 +22,29 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.attribute.FileAttribute; -/** +/** * Adds extra files/subdirectories when directories are created. - *

    - * Lucene shouldn't care about these, but sometimes operating systems - * create special files themselves (.DS_Store, thumbs.db, .nfsXXX, ...), - * so we add them and see what breaks. - *

    - * When a directory is created, sometimes an "extra" file or directory - * will be included with it (use {@link #isExtra(String)} to check if it's one - * of those files). * - * All other filesystem operations are delegated as normal. + *

    Lucene shouldn't care about these, but sometimes operating systems create special files + * themselves (.DS_Store, thumbs.db, .nfsXXX, ...), so we add them and see what breaks. + * + *

    When a directory is created, sometimes an "extra" file or directory will be included with it + * (use {@link #isExtra(String)} to check if it's one of those files). + * + *

    All other filesystem operations are delegated as normal. */ public class ExtrasFS extends FilterFileSystemProvider { - private final static String EXTRA_FILE_NAME = "extra0"; + private static final String EXTRA_FILE_NAME = "extra0"; final boolean active; final boolean createDirectory; - - /** + + /** * Create a new instance, wrapping {@code delegate}. + * * @param active {@code true} if we should create extra files - * @param createDirectory {@code true} if we should create directories instead of files. - * Ignored if {@code active} is {@code false}. + * @param createDirectory {@code true} if we should create directories instead of files. Ignored + * if {@code active} is {@code false}. */ public ExtrasFS(FileSystem delegate, boolean active, boolean createDirectory) { super("extras://", delegate); @@ -55,9 +54,9 @@ public class ExtrasFS extends FilterFileSystemProvider { @Override public void createDirectory(Path dir, FileAttribute... attrs) throws IOException { - super.createDirectory(dir, attrs); + super.createDirectory(dir, attrs); // ok, we created the directory successfully. - + if (active) { // lets add a bogus file... if this fails, we don't care, its best effort. try { @@ -67,19 +66,17 @@ public class ExtrasFS extends FilterFileSystemProvider { } else { Files.createFile(target); } - } catch (Exception ignored) { + } catch (Exception ignored) { // best effort } } } - + // TODO: would be great if we overrode attributes, so file size was always zero for - // our fake files. But this is tricky because its hooked into several places. + // our fake files. But this is tricky because its hooked into several places. // Currently MDW has a hack so we don't break disk full tests. - /** - * @return Return true if {@code fileName} is one of the extra files added by this class. - */ + /** @return Return true if {@code fileName} is one of the extra files added by this class. */ public static boolean isExtra(String fileName) { return fileName.equals(EXTRA_FILE_NAME); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterAsynchronousFileChannel.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterAsynchronousFileChannel.java index 080b6e6f7da..950e17d5dbf 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterAsynchronousFileChannel.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterAsynchronousFileChannel.java @@ -24,30 +24,27 @@ import java.nio.channels.FileLock; import java.util.Objects; import java.util.concurrent.Future; -/** - * A {@code FilterAsynchronousFileChannel} contains another - * {@code AsynchronousFileChannel}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. +/** + * A {@code FilterAsynchronousFileChannel} contains another {@code AsynchronousFileChannel}, which + * it uses as its basic source of data, possibly transforming the data along the way or providing + * additional functionality. */ public class FilterAsynchronousFileChannel extends AsynchronousFileChannel { - - /** - * The underlying {@code AsynchronousFileChannel} instance. - */ + + /** The underlying {@code AsynchronousFileChannel} instance. */ protected final AsynchronousFileChannel delegate; - + /** - * Construct a {@code FilterAsynchronousFileChannel} based on - * the specified base channel. - *

    - * Note that base channel is closed if this channel is closed. + * Construct a {@code FilterAsynchronousFileChannel} based on the specified base channel. + * + *

    Note that base channel is closed if this channel is closed. + * * @param delegate specified base channel. */ public FilterAsynchronousFileChannel(AsynchronousFileChannel delegate) { this.delegate = Objects.requireNonNull(delegate); } - + @Override public void close() throws IOException { delegate.close(); @@ -75,7 +72,12 @@ public class FilterAsynchronousFileChannel extends AsynchronousFileChannel { } @Override - public void lock(long position, long size, boolean shared, A attachment, CompletionHandler handler) { + public void lock( + long position, + long size, + boolean shared, + A attachment, + CompletionHandler handler) { delegate.lock(position, size, shared, attachment, handler); } @@ -90,7 +92,8 @@ public class FilterAsynchronousFileChannel extends AsynchronousFileChannel { } @Override - public void read(ByteBuffer dst, long position, A attachment, CompletionHandler handler) { + public void read( + ByteBuffer dst, long position, A attachment, CompletionHandler handler) { delegate.read(dst, position, attachment, handler); } @@ -100,7 +103,8 @@ public class FilterAsynchronousFileChannel extends AsynchronousFileChannel { } @Override - public void write(ByteBuffer src, long position, A attachment, CompletionHandler handler) { + public void write( + ByteBuffer src, long position, A attachment, CompletionHandler handler) { delegate.write(src, position, attachment, handler); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterDirectoryStream.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterDirectoryStream.java index 8caf4fc2305..95639741507 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterDirectoryStream.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterDirectoryStream.java @@ -23,29 +23,24 @@ import java.nio.file.Path; import java.util.Iterator; import java.util.Objects; -/** - * A {@code FilterDirectoryStream} contains another - * {@code DirectoryStream}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. +/** + * A {@code FilterDirectoryStream} contains another {@code DirectoryStream}, which it uses as its + * basic source of data, possibly transforming the data along the way or providing additional + * functionality. */ public class FilterDirectoryStream implements DirectoryStream { - - /** - * The underlying {@code DirectoryStream} instance. - */ + + /** The underlying {@code DirectoryStream} instance. */ protected final DirectoryStream delegate; - - /** - * The underlying {@code FileSystem} instance. - */ + + /** The underlying {@code FileSystem} instance. */ protected final FileSystem fileSystem; /** - * Construct a {@code FilterDirectoryStream} based on - * the specified base stream. - *

    - * Note that base stream is closed if this stream is closed. + * Construct a {@code FilterDirectoryStream} based on the specified base stream. + * + *

    Note that base stream is closed if this stream is closed. + * * @param delegate specified base stream. */ public FilterDirectoryStream(DirectoryStream delegate, FileSystem fileSystem) { @@ -66,10 +61,12 @@ public class FilterDirectoryStream implements DirectoryStream { public boolean hasNext() { return delegateIterator.hasNext(); } + @Override public Path next() { return new FilterPath(delegateIterator.next(), fileSystem); } + @Override public void remove() { delegateIterator.remove(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileChannel.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileChannel.java index ccc6e7abc9e..df2f1d4d83c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileChannel.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileChannel.java @@ -25,24 +25,21 @@ import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; import java.util.Objects; -/** - * A {@code FilterFileChannel} contains another - * {@code FileChannel}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. +/** + * A {@code FilterFileChannel} contains another {@code FileChannel}, which it uses as its basic + * source of data, possibly transforming the data along the way or providing additional + * functionality. */ public abstract class FilterFileChannel extends FileChannel { - - /** - * The underlying {@code FileChannel} instance. - */ + + /** The underlying {@code FileChannel} instance. */ protected final FileChannel delegate; - + /** - * Construct a {@code FilterFileChannel} based on - * the specified base channel. - *

    - * Note that base channel is closed if this channel is closed. + * Construct a {@code FilterFileChannel} based on the specified base channel. + * + *

    Note that base channel is closed if this channel is closed. + * * @param delegate specified base channel. */ public FilterFileChannel(FileChannel delegate) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileStore.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileStore.java index 423b32d75ba..dd936435797 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileStore.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileStore.java @@ -22,27 +22,21 @@ import java.nio.file.attribute.FileAttributeView; import java.nio.file.attribute.FileStoreAttributeView; import java.util.Objects; -/** - * A {@code FilterFileStore} contains another - * {@code FileStore}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. +/** + * A {@code FilterFileStore} contains another {@code FileStore}, which it uses as its basic source + * of data, possibly transforming the data along the way or providing additional functionality. */ public abstract class FilterFileStore extends FileStore { - - /** - * The underlying {@code FileStore} instance. - */ + + /** The underlying {@code FileStore} instance. */ protected final FileStore delegate; - - /** - * URI scheme used for this instance. - */ + + /** URI scheme used for this instance. */ protected final String scheme; - + /** - * Construct a {@code FilterFileStore} based on - * the specified base store. + * Construct a {@code FilterFileStore} based on the specified base store. + * * @param delegate specified base store. * @param scheme URI scheme identifying this instance. */ diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileSystem.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileSystem.java index e24506d7cd5..b3737ad52fc 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileSystem.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileSystem.java @@ -29,31 +29,24 @@ import java.util.Iterator; import java.util.Objects; import java.util.Set; -/** - * A {@code FilterFileSystem} contains another - * {@code FileSystem}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. +/** + * A {@code FilterFileSystem} contains another {@code FileSystem}, which it uses as its basic source + * of data, possibly transforming the data along the way or providing additional functionality. */ public class FilterFileSystem extends FileSystem { - - /** - * FileSystemProvider that created this FilterFileSystem - */ + + /** FileSystemProvider that created this FilterFileSystem */ protected final FilterFileSystemProvider parent; - - /** - * The underlying {@code FileSystem} instance. - */ + + /** The underlying {@code FileSystem} instance. */ protected final FileSystem delegate; - + /** - * Construct a {@code FilterFileSystem} based on - * the specified base filesystem. - *

    - * Note that base filesystem is closed if this filesystem is closed, - * however the default filesystem provider will never be closed, it doesn't - * support that. + * Construct a {@code FilterFileSystem} based on the specified base filesystem. + * + *

    Note that base filesystem is closed if this filesystem is closed, however the default + * filesystem provider will never be closed, it doesn't support that. + * * @param delegate specified base channel. */ public FilterFileSystem(FilterFileSystemProvider parent, FileSystem delegate) { @@ -109,7 +102,7 @@ public class FilterFileSystem extends FileSystem { public Path next() { return new FilterPath(iterator.next(), FilterFileSystem.this); } - + @Override public void remove() { iterator.remove(); @@ -133,7 +126,7 @@ public class FilterFileSystem extends FileSystem { public FileStore next() { return new FilterFileStore(iterator.next(), parent.getScheme()) {}; } - + @Override public void remove() { iterator.remove(); @@ -157,7 +150,7 @@ public class FilterFileSystem extends FileSystem { final PathMatcher matcher = delegate.getPathMatcher(syntaxAndPattern); return path -> { if (path instanceof FilterPath) { - return matcher.matches(((FilterPath)path).delegate); + return matcher.matches(((FilterPath) path).delegate); } return false; }; diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileSystemProvider.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileSystemProvider.java index 8a7ff754e01..b9f771a952e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileSystemProvider.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterFileSystemProvider.java @@ -42,31 +42,24 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.ExecutorService; -/** - * A {@code FilterFileSystemProvider} contains another - * {@code FileSystemProvider}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. +/** + * A {@code FilterFileSystemProvider} contains another {@code FileSystemProvider}, which it uses as + * its basic source of data, possibly transforming the data along the way or providing additional + * functionality. */ public abstract class FilterFileSystemProvider extends FileSystemProvider { - - /** - * The underlying {@code FileSystemProvider}. - */ + + /** The underlying {@code FileSystemProvider}. */ protected final FileSystemProvider delegate; - /** - * The underlying {@code FileSystem} instance. - */ + /** The underlying {@code FileSystem} instance. */ protected FileSystem fileSystem; - /** - * The URI scheme for this provider. - */ + /** The URI scheme for this provider. */ protected final String scheme; - + /** - * Construct a {@code FilterFileSystemProvider} indicated by - * the specified {@code scheme} and wrapping functionality of the - * provider of the specified base filesystem. + * Construct a {@code FilterFileSystemProvider} indicated by the specified {@code scheme} and + * wrapping functionality of the provider of the specified base filesystem. + * * @param scheme URI scheme * @param delegateInstance specified base filesystem. */ @@ -76,11 +69,11 @@ public abstract class FilterFileSystemProvider extends FileSystemProvider { this.delegate = delegateInstance.provider(); this.fileSystem = new FilterFileSystem(this, delegateInstance); } - + /** - * Construct a {@code FilterFileSystemProvider} indicated by - * the specified {@code scheme} and wrapping functionality of the - * provider. You must set the singleton {@code filesystem} yourself. + * Construct a {@code FilterFileSystemProvider} indicated by the specified {@code scheme} and + * wrapping functionality of the provider. You must set the singleton {@code filesystem} yourself. + * * @param scheme URI scheme * @param delegate specified base provider. */ @@ -95,15 +88,15 @@ public abstract class FilterFileSystemProvider extends FileSystemProvider { } @Override - public FileSystem newFileSystem(URI uri, Map env) throws IOException { + public FileSystem newFileSystem(URI uri, Map env) throws IOException { if (fileSystem == null) { throw new IllegalStateException("subclass did not initialize singleton filesystem"); } return fileSystem; } - + @Override - public FileSystem newFileSystem(Path path, Map env) throws IOException { + public FileSystem newFileSystem(Path path, Map env) throws IOException { if (fileSystem == null) { throw new IllegalStateException("subclass did not initialize singleton filesystem"); } @@ -168,22 +161,26 @@ public abstract class FilterFileSystemProvider extends FileSystemProvider { } @Override - public V getFileAttributeView(Path path, Class type, LinkOption... options) { + public V getFileAttributeView( + Path path, Class type, LinkOption... options) { return delegate.getFileAttributeView(toDelegate(path), type, options); } @Override - public A readAttributes(Path path, Class type, LinkOption... options) throws IOException { + public A readAttributes( + Path path, Class type, LinkOption... options) throws IOException { return delegate.readAttributes(toDelegate(path), type, options); } @Override - public Map readAttributes(Path path, String attributes, LinkOption... options) throws IOException { + public Map readAttributes(Path path, String attributes, LinkOption... options) + throws IOException { return delegate.readAttributes(toDelegate(path), attributes, options); } @Override - public void setAttribute(Path path, String attribute, Object value, LinkOption... options) throws IOException { + public void setAttribute(Path path, String attribute, Object value, LinkOption... options) + throws IOException { delegate.setAttribute(toDelegate(path), attribute, value, options); } @@ -198,33 +195,44 @@ public abstract class FilterFileSystemProvider extends FileSystemProvider { } @Override - public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + public FileChannel newFileChannel( + Path path, Set options, FileAttribute... attrs) throws IOException { return delegate.newFileChannel(toDelegate(path), options, attrs); } @Override - public AsynchronousFileChannel newAsynchronousFileChannel(Path path, Set options, ExecutorService executor, FileAttribute... attrs) throws IOException { + public AsynchronousFileChannel newAsynchronousFileChannel( + Path path, + Set options, + ExecutorService executor, + FileAttribute... attrs) + throws IOException { return delegate.newAsynchronousFileChannel(toDelegate(path), options, executor, attrs); } - + @Override - public SeekableByteChannel newByteChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + public SeekableByteChannel newByteChannel( + Path path, Set options, FileAttribute... attrs) throws IOException { return delegate.newByteChannel(toDelegate(path), options, attrs); } @Override - public DirectoryStream newDirectoryStream(Path dir, final Filter filter) throws IOException { - Filter wrappedFilter = new Filter() { - @Override - public boolean accept(Path entry) throws IOException { - return filter.accept(new FilterPath(entry, fileSystem)); - } - }; - return new FilterDirectoryStream(delegate.newDirectoryStream(toDelegate(dir), wrappedFilter), fileSystem); + public DirectoryStream newDirectoryStream(Path dir, final Filter filter) + throws IOException { + Filter wrappedFilter = + new Filter() { + @Override + public boolean accept(Path entry) throws IOException { + return filter.accept(new FilterPath(entry, fileSystem)); + } + }; + return new FilterDirectoryStream( + delegate.newDirectoryStream(toDelegate(dir), wrappedFilter), fileSystem); } @Override - public void createSymbolicLink(Path link, Path target, FileAttribute... attrs) throws IOException { + public void createSymbolicLink(Path link, Path target, FileAttribute... attrs) + throws IOException { delegate.createSymbolicLink(toDelegate(link), toDelegate(target), attrs); } @@ -247,21 +255,25 @@ public abstract class FilterFileSystemProvider extends FileSystemProvider { if (path instanceof FilterPath) { FilterPath fp = (FilterPath) path; if (fp.fileSystem != fileSystem) { - throw new ProviderMismatchException("mismatch, expected: " + fileSystem.provider().getClass() + ", got: " + fp.fileSystem.provider().getClass()); + throw new ProviderMismatchException( + "mismatch, expected: " + + fileSystem.provider().getClass() + + ", got: " + + fp.fileSystem.provider().getClass()); } return fp.delegate; } else { - throw new ProviderMismatchException("mismatch, expected: FilterPath, got: " + path.getClass()); + throw new ProviderMismatchException( + "mismatch, expected: FilterPath, got: " + path.getClass()); } } - - /** + + /** * Override to trigger some behavior when the filesystem is closed. - *

    - * This is always called for each FilterFileSystemProvider in the chain. + * + *

    This is always called for each FilterFileSystemProvider in the chain. */ - protected void onClose() { - } + protected void onClose() {} @Override public String toString() { diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterInputStream2.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterInputStream2.java index aed5660c66d..5c4ff32f591 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterInputStream2.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterInputStream2.java @@ -21,31 +21,26 @@ import java.io.IOException; import java.io.InputStream; import java.util.Objects; -/** - * A {@code FilterInputStream2} contains another - * {@code InputStream}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. - *

    - * Note: unlike {@link FilterInputStream} this class - * delegates every method by default. This means to transform - * {@code read} calls, you need to override multiple methods. - * On the other hand, it is less trappy: a simple implementation - * that just overrides {@code close} will not force bytes to be - * read one-at-a-time. +/** + * A {@code FilterInputStream2} contains another {@code InputStream}, which it uses as its basic + * source of data, possibly transforming the data along the way or providing additional + * functionality. + * + *

    Note: unlike {@link FilterInputStream} this class delegates every method by default. This + * means to transform {@code read} calls, you need to override multiple methods. On the other hand, + * it is less trappy: a simple implementation that just overrides {@code close} will not force bytes + * to be read one-at-a-time. */ public class FilterInputStream2 extends InputStream { - - /** - * The underlying {@code InputStream} instance. - */ + + /** The underlying {@code InputStream} instance. */ protected final InputStream delegate; - + /** - * Construct a {@code FilterInputStream2} based on - * the specified base stream. - *

    - * Note that base stream is closed if this stream is closed. + * Construct a {@code FilterInputStream2} based on the specified base stream. + * + *

    Note that base stream is closed if this stream is closed. + * * @param delegate specified base stream. */ public FilterInputStream2(InputStream delegate) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterOutputStream2.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterOutputStream2.java index 5413c87dd71..9abfd12968e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterOutputStream2.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterOutputStream2.java @@ -21,37 +21,32 @@ import java.io.IOException; import java.io.OutputStream; import java.util.Objects; -/** - * A {@code FilterOutputStream2} contains another - * {@code OutputStream}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. - *

    - * Note: unlike {@link FilterOutputStream} this class - * delegates every method by default. This means to transform - * {@code write} calls, you need to override multiple methods. - * On the other hand, it is less trappy: a simple implementation - * that just overrides {@code close} will not force bytes to be - * written one-at-a-time. +/** + * A {@code FilterOutputStream2} contains another {@code OutputStream}, which it uses as its basic + * source of data, possibly transforming the data along the way or providing additional + * functionality. + * + *

    Note: unlike {@link FilterOutputStream} this class delegates every method by default. This + * means to transform {@code write} calls, you need to override multiple methods. On the other hand, + * it is less trappy: a simple implementation that just overrides {@code close} will not force bytes + * to be written one-at-a-time. */ public abstract class FilterOutputStream2 extends OutputStream { - - /** - * The underlying {@code OutputStream} instance. - */ + + /** The underlying {@code OutputStream} instance. */ protected final OutputStream delegate; - + /** - * Construct a {@code FilterOutputStream2} based on - * the specified base stream. - *

    - * Note that base stream is closed if this stream is closed. + * Construct a {@code FilterOutputStream2} based on the specified base stream. + * + *

    Note that base stream is closed if this stream is closed. + * * @param delegate specified base stream. */ public FilterOutputStream2(OutputStream delegate) { this.delegate = Objects.requireNonNull(delegate); } - + @Override public void write(byte[] b) throws IOException { delegate.write(b); diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterPath.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterPath.java index 8e6a8fb1da5..9323536cf62 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterPath.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterPath.java @@ -28,30 +28,24 @@ import java.nio.file.WatchEvent.Modifier; import java.nio.file.WatchKey; import java.nio.file.WatchService; import java.util.Iterator; - import org.apache.lucene.util.SuppressForbidden; -/** - * A {@code FilterPath} contains another - * {@code Path}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. +/** + * A {@code FilterPath} contains another {@code Path}, which it uses as its basic source of data, + * possibly transforming the data along the way or providing additional functionality. */ public class FilterPath implements Path { - - /** - * The underlying {@code Path} instance. - */ + + /** The underlying {@code Path} instance. */ protected final Path delegate; - - /** - * The parent {@code FileSystem} for this path. - */ + + /** The parent {@code FileSystem} for this path. */ protected final FileSystem fileSystem; - + /** - * Construct a {@code FilterPath} with parent - * {@code fileSystem}, based on the specified base path. + * Construct a {@code FilterPath} with parent {@code fileSystem}, based on the specified base + * path. + * * @param delegate specified base path. * @param fileSystem parent fileSystem. */ @@ -59,9 +53,10 @@ public class FilterPath implements Path { this.delegate = delegate; this.fileSystem = fileSystem; } - - /** + + /** * Get the underlying wrapped path. + * * @return wrapped path. */ public Path getDelegate() { @@ -171,13 +166,13 @@ public class FilterPath implements Path { } // TODO: should these methods not expose delegate result directly? - // it could allow code to "escape" the sandbox... + // it could allow code to "escape" the sandbox... @Override public URI toUri() { return delegate.toUri(); } - + @Override public String toString() { return delegate.toString(); @@ -201,7 +196,8 @@ public class FilterPath implements Path { } @Override - public WatchKey register(WatchService watcher, Kind[] events, Modifier... modifiers) throws IOException { + public WatchKey register(WatchService watcher, Kind[] events, Modifier... modifiers) + throws IOException { return delegate.register(watcher, events, modifiers); } @@ -235,7 +231,7 @@ public class FilterPath implements Path { public int compareTo(Path other) { return delegate.compareTo(toDelegate(other)); } - + @Override public int hashCode() { return delegate.hashCode(); @@ -257,38 +253,40 @@ public class FilterPath implements Path { } /** - * Unwraps all {@code FilterPath}s, returning - * the innermost {@code Path}. - *

    - * WARNING: this is exposed for testing only! + * Unwraps all {@code FilterPath}s, returning the innermost {@code Path}. + * + *

    WARNING: this is exposed for testing only! + * * @param path specified path. * @return innermost Path instance */ public static Path unwrap(Path path) { while (path instanceof FilterPath) { - path = ((FilterPath)path).delegate; + path = ((FilterPath) path).delegate; } return path; } - - /** Override this to customize the return wrapped - * path from various operations */ + + /** Override this to customize the return wrapped path from various operations */ protected Path wrap(Path other) { return new FilterPath(other, fileSystem); } - - /** Override this to customize the unboxing of Path - * from various operations - */ + + /** Override this to customize the unboxing of Path from various operations */ protected Path toDelegate(Path path) { if (path instanceof FilterPath) { FilterPath fp = (FilterPath) path; if (fp.fileSystem != fileSystem) { - throw new ProviderMismatchException("mismatch, expected: " + fileSystem.provider().getClass() + ", got: " + fp.fileSystem.provider().getClass()); + throw new ProviderMismatchException( + "mismatch, expected: " + + fileSystem.provider().getClass() + + ", got: " + + fp.fileSystem.provider().getClass()); } return fp.delegate; } else { - throw new ProviderMismatchException("mismatch, expected: FilterPath, got: " + path.getClass()); + throw new ProviderMismatchException( + "mismatch, expected: FilterPath, got: " + path.getClass()); } } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterSeekableByteChannel.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterSeekableByteChannel.java index d627e363167..d5b587909b5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterSeekableByteChannel.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/FilterSeekableByteChannel.java @@ -20,24 +20,21 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.SeekableByteChannel; -/** - * A {@code FilterSeekableByteChannel} contains another - * {@code SeekableByteChannel}, which it uses as its basic - * source of data, possibly transforming the data along the - * way or providing additional functionality. +/** + * A {@code FilterSeekableByteChannel} contains another {@code SeekableByteChannel}, which it uses + * as its basic source of data, possibly transforming the data along the way or providing additional + * functionality. */ public class FilterSeekableByteChannel implements SeekableByteChannel { - - /** - * The underlying {@code SeekableByteChannel} instance. - */ + + /** The underlying {@code SeekableByteChannel} instance. */ protected final SeekableByteChannel delegate; - + /** - * Construct a {@code FilterSeekableByteChannel} based on - * the specified base channel. - *

    - * Note that base channel is closed if this channel is closed. + * Construct a {@code FilterSeekableByteChannel} based on the specified base channel. + * + *

    Note that base channel is closed if this channel is closed. + * * @param delegate specified base channel. */ public FilterSeekableByteChannel(SeekableByteChannel delegate) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/HandleLimitFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/HandleLimitFS.java index 0cdbf0ddc30..393c1de94f9 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/HandleLimitFS.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/HandleLimitFS.java @@ -22,17 +22,14 @@ import java.nio.file.FileSystemException; import java.nio.file.Path; import java.util.concurrent.atomic.AtomicInteger; -/** - * FileSystem that throws exception if file handles - * in use exceeds a specified limit - */ +/** FileSystem that throws exception if file handles in use exceeds a specified limit */ public class HandleLimitFS extends HandleTrackingFS { final int limit; final AtomicInteger count = new AtomicInteger(); - + /** - * Create a new instance, limiting the maximum number - * of open files to {@code limit} + * Create a new instance, limiting the maximum number of open files to {@code limit} + * * @param delegate delegate filesystem to wrap. * @param limit maximum number of open files. */ diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/HandleTrackingFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/HandleTrackingFS.java index ee6ceb710fa..056e0882240 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/HandleTrackingFS.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/HandleTrackingFS.java @@ -24,57 +24,58 @@ import java.nio.channels.AsynchronousFileChannel; import java.nio.channels.FileChannel; import java.nio.channels.SeekableByteChannel; import java.nio.file.DirectoryStream; +import java.nio.file.DirectoryStream.Filter; import java.nio.file.FileSystem; import java.nio.file.OpenOption; import java.nio.file.Path; -import java.nio.file.DirectoryStream.Filter; import java.nio.file.attribute.FileAttribute; import java.util.Set; import java.util.concurrent.ExecutorService; - import org.apache.lucene.util.IOUtils; -/** +/** * Base class for tracking file handles. - *

    - * This class adds tracking to all streams/channels and - * provides two hooks to handle file management: + * + *

    This class adds tracking to all streams/channels and provides two hooks to handle file + * management: + * *

    */ public abstract class HandleTrackingFS extends FilterFileSystemProvider { - + /** - * Create a new instance, identified by {@code scheme} and passing - * through operations to {@code delegate}. + * Create a new instance, identified by {@code scheme} and passing through operations to {@code + * delegate}. + * * @param scheme URI scheme for this provider * @param delegate delegate filesystem to wrap. */ public HandleTrackingFS(String scheme, FileSystem delegate) { super(scheme, delegate); } - + /** - * Called when {@code path} is opened via {@code stream}. + * Called when {@code path} is opened via {@code stream}. + * * @param path Path that was opened * @param stream Stream or Channel opened against the path. * @throws IOException if an I/O error occurs. */ protected abstract void onOpen(Path path, Object stream) throws IOException; - + /** - * Called when {@code path} is closed via {@code stream}. + * Called when {@code path} is closed via {@code stream}. + * * @param path Path that was closed * @param stream Stream or Channel closed against the path. * @throws IOException if an I/O error occurs. */ protected abstract void onClose(Path path, Object stream) throws IOException; - /** - * Helper method, to deal with onOpen() throwing exception - */ + /** Helper method, to deal with onOpen() throwing exception */ final void callOpenHook(Path path, Closeable stream) throws IOException { boolean success = false; try { @@ -86,232 +87,248 @@ public abstract class HandleTrackingFS extends FilterFileSystemProvider { } } } - + @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { - InputStream stream = new FilterInputStream2(super.newInputStream(path, options)) { - - boolean closed; - - @Override - public void close() throws IOException { - try { - if (!closed) { - closed = true; - onClose(path, this); + InputStream stream = + new FilterInputStream2(super.newInputStream(path, options)) { + + boolean closed; + + @Override + public void close() throws IOException { + try { + if (!closed) { + closed = true; + onClose(path, this); + } + } finally { + super.close(); + } } - } finally { - super.close(); - } - } - @Override - public String toString() { - return "InputStream(" + path.toString() + ")"; - } + @Override + public String toString() { + return "InputStream(" + path.toString() + ")"; + } - @Override - public int hashCode() { - return System.identityHashCode(this); - } + @Override + public int hashCode() { + return System.identityHashCode(this); + } - @Override - public boolean equals(Object obj) { - return this == obj; - } - }; + @Override + public boolean equals(Object obj) { + return this == obj; + } + }; callOpenHook(path, stream); return stream; } @Override public OutputStream newOutputStream(final Path path, OpenOption... options) throws IOException { - OutputStream stream = new FilterOutputStream2(delegate.newOutputStream(toDelegate(path), options)) { - - boolean closed; + OutputStream stream = + new FilterOutputStream2(delegate.newOutputStream(toDelegate(path), options)) { - @Override - public void close() throws IOException { - try { - if (!closed) { - closed = true; - onClose(path, this); + boolean closed; + + @Override + public void close() throws IOException { + try { + if (!closed) { + closed = true; + onClose(path, this); + } + } finally { + super.close(); + } } - } finally { - super.close(); - } - } - - @Override - public String toString() { - return "OutputStream(" + path.toString() + ")"; - } - - @Override - public int hashCode() { - return System.identityHashCode(this); - } - @Override - public boolean equals(Object obj) { - return this == obj; - } - }; + @Override + public String toString() { + return "OutputStream(" + path.toString() + ")"; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + }; callOpenHook(path, stream); return stream; } - + @Override - public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { - FileChannel channel = new FilterFileChannel(delegate.newFileChannel(toDelegate(path), options, attrs)) { - - boolean closed; - - @Override - protected void implCloseChannel() throws IOException { - if (!closed) { - closed = true; - try { - onClose(path, this); - } finally { - super.implCloseChannel(); + public FileChannel newFileChannel( + Path path, Set options, FileAttribute... attrs) throws IOException { + FileChannel channel = + new FilterFileChannel(delegate.newFileChannel(toDelegate(path), options, attrs)) { + + boolean closed; + + @Override + protected void implCloseChannel() throws IOException { + if (!closed) { + closed = true; + try { + onClose(path, this); + } finally { + super.implCloseChannel(); + } + } } - } - } - @Override - public String toString() { - return "FileChannel(" + path.toString() + ")"; - } - - @Override - public int hashCode() { - return System.identityHashCode(this); - } + @Override + public String toString() { + return "FileChannel(" + path.toString() + ")"; + } - @Override - public boolean equals(Object obj) { - return this == obj; - } - }; + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + }; callOpenHook(path, channel); return channel; } @Override - public AsynchronousFileChannel newAsynchronousFileChannel(Path path, Set options, ExecutorService executor, FileAttribute... attrs) throws IOException { - AsynchronousFileChannel channel = new FilterAsynchronousFileChannel(super.newAsynchronousFileChannel(path, options, executor, attrs)) { - - boolean closed; - - @Override - public void close() throws IOException { - try { - if (!closed) { - closed = true; - onClose(path, this); + public AsynchronousFileChannel newAsynchronousFileChannel( + Path path, + Set options, + ExecutorService executor, + FileAttribute... attrs) + throws IOException { + AsynchronousFileChannel channel = + new FilterAsynchronousFileChannel( + super.newAsynchronousFileChannel(path, options, executor, attrs)) { + + boolean closed; + + @Override + public void close() throws IOException { + try { + if (!closed) { + closed = true; + onClose(path, this); + } + } finally { + super.close(); + } } - } finally { - super.close(); - } - } - @Override - public String toString() { - return "AsynchronousFileChannel(" + path.toString() + ")"; - } - - @Override - public int hashCode() { - return System.identityHashCode(this); - } + @Override + public String toString() { + return "AsynchronousFileChannel(" + path.toString() + ")"; + } - @Override - public boolean equals(Object obj) { - return this == obj; - } - }; + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + }; callOpenHook(path, channel); return channel; } @Override - public SeekableByteChannel newByteChannel(Path path, Set options, FileAttribute... attrs) throws IOException { - SeekableByteChannel channel = new FilterSeekableByteChannel(super.newByteChannel(path, options, attrs)) { - - boolean closed; - - @Override - public void close() throws IOException { - try { - if (!closed) { - closed = true; - onClose(path, this); + public SeekableByteChannel newByteChannel( + Path path, Set options, FileAttribute... attrs) throws IOException { + SeekableByteChannel channel = + new FilterSeekableByteChannel(super.newByteChannel(path, options, attrs)) { + + boolean closed; + + @Override + public void close() throws IOException { + try { + if (!closed) { + closed = true; + onClose(path, this); + } + } finally { + super.close(); + } } - } finally { - super.close(); - } - } - @Override - public String toString() { - return "SeekableByteChannel(" + path.toString() + ")"; - } - - @Override - public int hashCode() { - return System.identityHashCode(this); - } + @Override + public String toString() { + return "SeekableByteChannel(" + path.toString() + ")"; + } - @Override - public boolean equals(Object obj) { - return this == obj; - } - }; + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + }; callOpenHook(path, channel); return channel; } @Override - public DirectoryStream newDirectoryStream(Path dir, Filter filter) throws IOException { - Filter wrappedFilter = new Filter() { - @Override - public boolean accept(Path entry) throws IOException { - return filter.accept(new FilterPath(entry, fileSystem)); - } - }; + public DirectoryStream newDirectoryStream(Path dir, Filter filter) + throws IOException { + Filter wrappedFilter = + new Filter() { + @Override + public boolean accept(Path entry) throws IOException { + return filter.accept(new FilterPath(entry, fileSystem)); + } + }; DirectoryStream stream = delegate.newDirectoryStream(toDelegate(dir), wrappedFilter); - stream = new FilterDirectoryStream(stream, fileSystem) { - - boolean closed; - - @Override - public void close() throws IOException { - try { - if (!closed) { - closed = true; - onClose(dir, this); + stream = + new FilterDirectoryStream(stream, fileSystem) { + + boolean closed; + + @Override + public void close() throws IOException { + try { + if (!closed) { + closed = true; + onClose(dir, this); + } + } finally { + super.close(); + } } - } finally { - super.close(); - } - } - - @Override - public String toString() { - return "DirectoryStream(" + dir + ")"; - } - - @Override - public int hashCode() { - return System.identityHashCode(this); - } - - @Override - public boolean equals(Object obj) { - return this == obj; - } - }; + + @Override + public String toString() { + return "DirectoryStream(" + dir + ")"; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + }; callOpenHook(dir, stream); return stream; } diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/LeakFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/LeakFS.java index f76565a0a5f..296c4a889e2 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/LeakFS.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/LeakFS.java @@ -22,19 +22,19 @@ import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -/** +/** * FileSystem that tracks open handles. - *

    - * When {@link FileSystem#close()} is called, this class will throw - * an exception if any file handles are still open. + * + *

    When {@link FileSystem#close()} is called, this class will throw an exception if any file + * handles are still open. */ public class LeakFS extends HandleTrackingFS { // we explicitly use reference hashcode/equality in our keys - private final Map openHandles = new ConcurrentHashMap<>(); - + private final Map openHandles = new ConcurrentHashMap<>(); + /** - * Create a new instance, tracking file handle leaks for the - * specified delegate filesystem. + * Create a new instance, tracking file handle leaks for the specified delegate filesystem. + * * @param delegate delegate filesystem to wrap. */ public LeakFS(FileSystem delegate) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/MockFileSystemTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/MockFileSystemTestCase.java index 3121ae1a5ed..5ba6a675dd4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/MockFileSystemTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/MockFileSystemTestCase.java @@ -23,19 +23,17 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.InvalidPathException; import java.nio.file.Path; - import org.apache.lucene.util.Constants; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems; -/** - * Base class for testing mockfilesystems. This tests things - * that really need to work: Path equals()/hashcode(), directory listing - * glob and filtering, URI conversion, etc. +/** + * Base class for testing mockfilesystems. This tests things that really need to work: Path + * equals()/hashcode(), directory listing glob and filtering, URI conversion, etc. */ @SuppressFileSystems("*") // we suppress random filesystems and do tests explicitly. public abstract class MockFileSystemTestCase extends LuceneTestCase { - + /** wraps Path with custom behavior */ protected abstract Path wrap(Path path); @@ -46,7 +44,7 @@ public abstract class MockFileSystemTestCase extends LuceneTestCase { Path f1 = dir.resolve("file1"); Path f1Again = dir.resolve("file1"); Path f2 = dir.resolve("file2"); - + assertEquals(f1, f1); assertFalse(f1.equals(null)); assertEquals(f1, f1Again); @@ -54,7 +52,7 @@ public abstract class MockFileSystemTestCase extends LuceneTestCase { assertFalse(f1.equals(f2)); dir.getFileSystem().close(); } - + /** Test that URIs are not corrumpted */ public void testURI() throws IOException { implTestURI("file1"); // plain ASCII @@ -69,14 +67,16 @@ public abstract class MockFileSystemTestCase extends LuceneTestCase { } private void implTestURI(String fileName) throws IOException { - assumeFalse("broken on J9: see https://issues.apache.org/jira/browse/LUCENE-6517", Constants.JAVA_VENDOR.startsWith("IBM")); + assumeFalse( + "broken on J9: see https://issues.apache.org/jira/browse/LUCENE-6517", + Constants.JAVA_VENDOR.startsWith("IBM")); Path dir = wrap(createTempDir()); Path f1 = null; try { f1 = dir.resolve(fileName); } catch (InvalidPathException ipe) { - assumeNoException("couldn't resolve '"+fileName+"'", ipe); + assumeNoException("couldn't resolve '" + fileName + "'", ipe); } URI uri = f1.toUri(); @@ -85,7 +85,7 @@ public abstract class MockFileSystemTestCase extends LuceneTestCase { dir.getFileSystem().close(); } - + /** Tests that newDirectoryStream with a filter works correctly */ public void testDirectoryStreamFiltered() throws IOException { Path dir = wrap(createTempDir()); diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/ShuffleFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/ShuffleFS.java index 01fc2f5f423..212760252e1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/ShuffleFS.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/ShuffleFS.java @@ -29,23 +29,22 @@ import java.util.Random; /** * Gives an unpredictable, but deterministic order to directory listings. - *

    - * This can be useful if for instance, you have build servers on - * linux but developers are using macs. + * + *

    This can be useful if for instance, you have build servers on linux but developers are using + * macs. */ public class ShuffleFS extends FilterFileSystemProvider { final long seed; - - /** - * Create a new instance, wrapping {@code delegate}. - */ + + /** Create a new instance, wrapping {@code delegate}. */ public ShuffleFS(FileSystem delegate, long seed) { super("shuffle://", delegate); this.seed = seed; } @Override - public DirectoryStream newDirectoryStream(Path dir, Filter filter) throws IOException { + public DirectoryStream newDirectoryStream(Path dir, Filter filter) + throws IOException { try (DirectoryStream stream = super.newDirectoryStream(dir, filter)) { // read complete directory listing List contents = new ArrayList<>(); @@ -53,7 +52,10 @@ public class ShuffleFS extends FilterFileSystemProvider { contents.add(path); } // sort first based only on filename - Collections.sort(contents, (path1, path2) -> path1.getFileName().toString().compareTo(path2.getFileName().toString())); + Collections.sort( + contents, + (path1, path2) -> + path1.getFileName().toString().compareTo(path2.getFileName().toString())); // sort based on current class seed Collections.shuffle(contents, new Random(seed)); return new DirectoryStream() { @@ -61,8 +63,9 @@ public class ShuffleFS extends FilterFileSystemProvider { public Iterator iterator() { return contents.iterator(); } + @Override - public void close() throws IOException {} + public void close() throws IOException {} }; } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/VerboseFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/VerboseFS.java index 7056277b2a5..166f1a14dea 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/VerboseFS.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/VerboseFS.java @@ -31,30 +31,27 @@ import java.nio.file.attribute.FileAttribute; import java.util.Arrays; import java.util.Set; import java.util.concurrent.ExecutorService; - import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.InfoStream; -/** - * FileSystem that records all major destructive filesystem activities. - */ +/** FileSystem that records all major destructive filesystem activities. */ public class VerboseFS extends FilterFileSystemProvider { final InfoStream infoStream; final Path root; - + /** - * Create a new instance, recording major filesystem write activities - * (create, delete, etc) to the specified {@code InfoStream}. + * Create a new instance, recording major filesystem write activities (create, delete, etc) to the + * specified {@code InfoStream}. + * * @param delegate delegate filesystem to wrap. - * @param infoStream infoStream to send messages to. The component for - * messages is named "FS". + * @param infoStream infoStream to send messages to. The component for messages is named "FS". */ public VerboseFS(FileSystem delegate, InfoStream infoStream) { super("verbose://", delegate); this.infoStream = infoStream; this.root = this.getFileSystem(null).getPath(".").toAbsolutePath().normalize(); } - + /** Records message, and rethrows exception if not null */ private void sop(String text, Throwable exception) throws IOException { if (exception == null) { @@ -68,7 +65,7 @@ public class VerboseFS extends FilterFileSystemProvider { throw IOUtils.rethrowAlways(exception); } } - + private String path(Path path) { path = root.relativize(path.toAbsolutePath().normalize()); return path.toString(); @@ -106,7 +103,9 @@ public class VerboseFS extends FilterFileSystemProvider { } catch (Throwable t) { exception = t; } finally { - sop("copy" + Arrays.toString(options) + ": " + path(source) + " -> " + path(target), exception); + sop( + "copy" + Arrays.toString(options) + ": " + path(source) + " -> " + path(target), + exception); } } @@ -118,12 +117,15 @@ public class VerboseFS extends FilterFileSystemProvider { } catch (Throwable t) { exception = t; } finally { - sop("move" + Arrays.toString(options) + ": " + path(source) + " -> " + path(target), exception); + sop( + "move" + Arrays.toString(options) + ": " + path(source) + " -> " + path(target), + exception); } } @Override - public void setAttribute(Path path, String attribute, Object value, LinkOption... options) throws IOException { + public void setAttribute(Path path, String attribute, Object value, LinkOption... options) + throws IOException { Throwable exception = null; try { super.setAttribute(path, attribute, value, options); @@ -146,15 +148,16 @@ public class VerboseFS extends FilterFileSystemProvider { } throw new AssertionError(); } - + private boolean containsDestructive(Set options) { - return (options.contains(StandardOpenOption.APPEND) || - options.contains(StandardOpenOption.WRITE) || - options.contains(StandardOpenOption.DELETE_ON_CLOSE)); + return (options.contains(StandardOpenOption.APPEND) + || options.contains(StandardOpenOption.WRITE) + || options.contains(StandardOpenOption.DELETE_ON_CLOSE)); } @Override - public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + public FileChannel newFileChannel( + Path path, Set options, FileAttribute... attrs) throws IOException { Throwable exception = null; try { return super.newFileChannel(path, options, attrs); @@ -173,7 +176,12 @@ public class VerboseFS extends FilterFileSystemProvider { } @Override - public AsynchronousFileChannel newAsynchronousFileChannel(Path path, Set options, ExecutorService executor, FileAttribute... attrs) throws IOException { + public AsynchronousFileChannel newAsynchronousFileChannel( + Path path, + Set options, + ExecutorService executor, + FileAttribute... attrs) + throws IOException { Throwable exception = null; try { return super.newAsynchronousFileChannel(path, options, executor, attrs); @@ -192,7 +200,8 @@ public class VerboseFS extends FilterFileSystemProvider { } @Override - public SeekableByteChannel newByteChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + public SeekableByteChannel newByteChannel( + Path path, Set options, FileAttribute... attrs) throws IOException { Throwable exception = null; try { return super.newByteChannel(path, options, attrs); @@ -211,7 +220,8 @@ public class VerboseFS extends FilterFileSystemProvider { } @Override - public void createSymbolicLink(Path link, Path target, FileAttribute... attrs) throws IOException { + public void createSymbolicLink(Path link, Path target, FileAttribute... attrs) + throws IOException { Throwable exception = null; try { super.createSymbolicLink(link, target, attrs); diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/VirusCheckingFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/VirusCheckingFS.java index eff62b13967..e1b54563e98 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/VirusCheckingFS.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/VirusCheckingFS.java @@ -23,14 +23,14 @@ import java.nio.file.FileSystem; import java.nio.file.Files; import java.nio.file.Path; import java.util.concurrent.atomic.AtomicLong; - import org.apache.lucene.index.IndexWriter; import org.apache.lucene.util.LuceneTestCase; -/** - * Acts like a virus checker on Windows, where random programs may open the files you just wrote in an unfriendly - * way preventing deletion (e.g. not passing FILE_SHARE_DELETE) or renaming or overwriting etc. This is more evil - * than WindowsFS which just prevents deletion of files you still old open. +/** + * Acts like a virus checker on Windows, where random programs may open the files you just wrote in + * an unfriendly way preventing deletion (e.g. not passing FILE_SHARE_DELETE) or renaming or + * overwriting etc. This is more evil than WindowsFS which just prevents deletion of files you still + * old open. */ public class VirusCheckingFS extends FilterFileSystemProvider { @@ -38,9 +38,7 @@ public class VirusCheckingFS extends FilterFileSystemProvider { private final AtomicLong state; - /** - * Create a new instance, wrapping {@code delegate}. - */ + /** Create a new instance, wrapping {@code delegate}. */ public VirusCheckingFS(FileSystem delegate, long salt) { super("viruschecking://", delegate); this.state = new AtomicLong(salt); @@ -63,15 +61,18 @@ public class VirusCheckingFS extends FilterFileSystemProvider { // Fake but deterministic and hopefully portable like-randomness: long hash = state.incrementAndGet() * path.getFileName().hashCode(); - - if (enabled // test infra disables when it's "really" time to delete after test is done, so it can reclaim temp dirs + + if (enabled // test infra disables when it's "really" time to delete after test is done, so it + // can reclaim temp dirs && Files.exists(path) // important that we NOT delay a NoSuchFileException until later - && path.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME) == false // life is particularly difficult if the virus checker hits our lock file + && path.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME) + == false // life is particularly difficult if the virus checker hits our lock file && (hash % 5) == 1) { if (LuceneTestCase.VERBOSE) { System.out.println("NOTE: VirusCheckingFS now refusing to delete " + path); } - throw new AccessDeniedException("VirusCheckingFS is randomly refusing to delete file \"" + path + "\""); + throw new AccessDeniedException( + "VirusCheckingFS is randomly refusing to delete file \"" + path + "\""); } super.delete(path); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/WindowsFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/WindowsFS.java index 28ca3686c06..615ee2192f9 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/WindowsFS.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/WindowsFS.java @@ -26,36 +26,36 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; import java.util.Map; -/** - * FileSystem that (imperfectly) acts like windows. - *

    - * Currently this filesystem only prevents deletion of open files. +/** + * FileSystem that (imperfectly) acts like windows. + * + *

    Currently this filesystem only prevents deletion of open files. */ public class WindowsFS extends HandleTrackingFS { // This map also supports fileKey -> Path -> counts // which is important to effectively support renames etc. - // in the rename case we have to transfer ownership but need to make sure we only transfer ownership for + // in the rename case we have to transfer ownership but need to make sure we only transfer + // ownership for // the path we rename ie. hardlinks will still resolve to the same key - final Map> openFiles = new HashMap<>(); + final Map> openFiles = new HashMap<>(); // TODO: try to make this as realistic as possible... it depends e.g. how you // open files, if you map them, etc, if you can delete them (Uwe knows the rules) - + // TODO: add case-insensitivity - + /** - * Create a new instance, wrapping the delegate filesystem to - * act like Windows. + * Create a new instance, wrapping the delegate filesystem to act like Windows. + * * @param delegate delegate filesystem to wrap. */ public WindowsFS(FileSystem delegate) { super("windows://", delegate); } - - /** - * Returns file "key" (e.g. inode) for the specified path - */ + + /** Returns file "key" (e.g. inode) for the specified path */ private Object getKey(Path existing) throws IOException { - BasicFileAttributeView view = Files.getFileAttributeView(existing, BasicFileAttributeView.class); + BasicFileAttributeView view = + Files.getFileAttributeView(existing, BasicFileAttributeView.class); BasicFileAttributes attributes = view.readAttributes(); return attributes.fileKey(); } @@ -67,7 +67,7 @@ public class WindowsFS extends HandleTrackingFS { // we have to read the key under the lock otherwise me might leak the openFile handle // if we concurrently delete or move this file. Map pathMap = openFiles.computeIfAbsent(key, k -> new HashMap<>()); - pathMap.put(path, pathMap.computeIfAbsent(path, p -> 0).intValue() +1); + pathMap.put(path, pathMap.computeIfAbsent(path, p -> 0).intValue() + 1); } } @@ -83,7 +83,7 @@ public class WindowsFS extends HandleTrackingFS { if (v.intValue() == 1) { pathMap.remove(path); } else { - v = Integer.valueOf(v.intValue()-1); + v = Integer.valueOf(v.intValue() - 1); pathMap.put(path, v); } } @@ -101,15 +101,15 @@ public class WindowsFS extends HandleTrackingFS { } return null; } - - /** - * Checks that it's ok to delete {@code Path}. If the file - * is still open, it throws IOException("access denied"). + + /** + * Checks that it's ok to delete {@code Path}. If the file is still open, it throws + * IOException("access denied"). */ private void checkDeleteAccess(Path path) throws IOException { Object key = getKeyOrNull(path); if (key != null) { - synchronized(openFiles) { + synchronized (openFiles) { if (openFiles.containsKey(key)) { throw new IOException("access denied: " + path); } @@ -134,14 +134,17 @@ public class WindowsFS extends HandleTrackingFS { if (key != null) { Object newKey = getKey(target); if (newKey.equals(key) == false) { - // we need to transfer ownership here if we have open files on this file since the getKey() method will - // return a different i-node next time we call it with the target path and our onClose method will + // we need to transfer ownership here if we have open files on this file since the + // getKey() method will + // return a different i-node next time we call it with the target path and our onClose + // method will // trip an assert Map map = openFiles.get(key); if (map != null) { Integer v = map.remove(target); if (v != null) { - Map pathIntegerMap = openFiles.computeIfAbsent(newKey, k -> new HashMap<>()); + Map pathIntegerMap = + openFiles.computeIfAbsent(newKey, k -> new HashMap<>()); Integer existingValue = pathIntegerMap.getOrDefault(target, 0); pathIntegerMap.put(target, existingValue + v); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/package-info.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/package-info.java index a7bdcaea974..543d0d64b33 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/package-info.java +++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/package-info.java @@ -16,16 +16,19 @@ */ /** - * Support for testing/debugging with virtual filesystems - *

    - * The primary classes are: + * Support for testing/debugging with virtual filesystems + * + *

    The primary classes are: + * *

      *
    • {@link org.apache.lucene.mockfile.LeakFS}: Fails tests if they leave open filehandles. - *
    • {@link org.apache.lucene.mockfile.VerboseFS}: Prints destructive filesystem operations to infostream. + *
    • {@link org.apache.lucene.mockfile.VerboseFS}: Prints destructive filesystem operations to + * infostream. *
    • {@link org.apache.lucene.mockfile.WindowsFS}: Acts like windows. *
    • {@link org.apache.lucene.mockfile.DisableFsyncFS}: Makes actual fsync calls a no-op. *
    • {@link org.apache.lucene.mockfile.ExtrasFS}: Adds 'bonus' files to directories. - *
    • {@link org.apache.lucene.mockfile.ShuffleFS}: Directory listings in an unpredictable but deterministic order. + *
    • {@link org.apache.lucene.mockfile.ShuffleFS}: Directory listings in an unpredictable but + * deterministic order. *
    */ package org.apache.lucene.mockfile; diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkScorer.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkScorer.java index 4445f3de8ea..c81bdeaa838 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkScorer.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingBulkScorer.java @@ -16,14 +16,12 @@ */ package org.apache.lucene.search; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import java.io.IOException; import java.util.Random; - import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.util.Bits; -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; - /** Wraps a Scorer with additional checks */ final class AssertingBulkScorer extends BulkScorer { @@ -73,8 +71,10 @@ final class AssertingBulkScorer extends BulkScorer { } @Override - public int score(LeafCollector collector, Bits acceptDocs, int min, final int max) throws IOException { - assert min >= this.max: "Scoring backward: min=" + min + " while previous max was max=" + this.max; + public int score(LeafCollector collector, Bits acceptDocs, int min, final int max) + throws IOException { + assert min >= this.max + : "Scoring backward: min=" + min + " while previous max was max=" + this.max; assert min <= max : "max must be greater than min, got min=" + min + ", and max=" + max; this.max = max; collector = new AssertingLeafCollector(collector, min, max); @@ -92,5 +92,4 @@ final class AssertingBulkScorer extends BulkScorer { public String toString() { return "AssertingBulkScorer(" + in + ")"; } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingCollector.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingCollector.java index bf842cefb68..4e6aef7b0af 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingCollector.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingCollector.java @@ -17,12 +17,9 @@ package org.apache.lucene.search; import java.io.IOException; - import org.apache.lucene.index.LeafReaderContext; -/** - * A collector that asserts that it is used correctly. - */ +/** A collector that asserts that it is used correctly. */ class AssertingCollector extends FilterCollector { private int maxDoc = -1; @@ -52,13 +49,16 @@ class AssertingCollector extends FilterCollector { public void collect(int doc) throws IOException { // check that documents are scored in order globally, // not only per segment - assert docBase + doc >= maxDoc : "collection is not in order: current doc=" - + (docBase + doc) + " while " + maxDoc + " has already been collected"; + assert docBase + doc >= maxDoc + : "collection is not in order: current doc=" + + (docBase + doc) + + " while " + + maxDoc + + " has already been collected"; super.collect(doc); maxDoc = docBase + doc; } }; } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java index 7ab11c5895e..626139625bc 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java @@ -20,33 +20,33 @@ import java.io.IOException; import java.util.List; import java.util.Random; import java.util.concurrent.ExecutorService; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; /** - * Helper class that adds some extra checks to ensure correct - * usage of {@code IndexSearcher} and {@code Weight}. + * Helper class that adds some extra checks to ensure correct usage of {@code IndexSearcher} and + * {@code Weight}. */ public class AssertingIndexSearcher extends IndexSearcher { final Random random; - public AssertingIndexSearcher(Random random, IndexReader r) { + + public AssertingIndexSearcher(Random random, IndexReader r) { super(r); this.random = new Random(random.nextLong()); } - - public AssertingIndexSearcher(Random random, IndexReaderContext context) { + + public AssertingIndexSearcher(Random random, IndexReaderContext context) { super(context); this.random = new Random(random.nextLong()); } - - public AssertingIndexSearcher(Random random, IndexReader r, ExecutorService ex) { + + public AssertingIndexSearcher(Random random, IndexReader r, ExecutorService ex) { super(r, ex); this.random = new Random(random.nextLong()); } - - public AssertingIndexSearcher(Random random, IndexReaderContext context, ExecutorService ex) { + + public AssertingIndexSearcher(Random random, IndexReaderContext context, ExecutorService ex) { super(context, ex); this.random = new Random(random.nextLong()); } @@ -67,7 +67,8 @@ public class AssertingIndexSearcher extends IndexSearcher { } @Override - protected void search(List leaves, Weight weight, Collector collector) throws IOException { + protected void search(List leaves, Weight weight, Collector collector) + throws IOException { assert weight instanceof AssertingWeight; super.search(leaves, weight, AssertingCollector.wrap(collector)); } @@ -76,5 +77,4 @@ public class AssertingIndexSearcher extends IndexSearcher { public String toString() { return "AssertingIndexSearcher(" + super.toString() + ")"; } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingLeafCollector.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingLeafCollector.java index c43d4cd9ec2..e61b3b9bf4c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingLeafCollector.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingLeafCollector.java @@ -18,8 +18,7 @@ package org.apache.lucene.search; import java.io.IOException; -/** Wraps another Collector and checks that - * order is respected. */ +/** Wraps another Collector and checks that order is respected. */ class AssertingLeafCollector extends FilterLeafCollector { private final int min; @@ -54,5 +53,4 @@ class AssertingLeafCollector extends FilterLeafCollector { public DocIdSetIterator competitiveIterator() throws IOException { return in.competitiveIterator(); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingMatches.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingMatches.java index f57a83b7929..53827783473 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingMatches.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingMatches.java @@ -33,8 +33,7 @@ class AssertingMatches implements Matches { @Override public MatchesIterator getMatches(String field) throws IOException { MatchesIterator mi = in.getMatches(field); - if (mi == null) - return null; + if (mi == null) return null; return new AssertingMatchesIterator(mi); } @@ -50,7 +49,7 @@ class AssertingMatches implements Matches { public static Matches unWrap(Matches m) { while (m instanceof AssertingMatches) { - m = (((AssertingMatches)m).in); + m = (((AssertingMatches) m).in); } return m; } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingMatchesIterator.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingMatchesIterator.java index 36a56338a19..40239590dee 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingMatchesIterator.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingMatchesIterator.java @@ -24,7 +24,11 @@ class AssertingMatchesIterator implements MatchesIterator { private final MatchesIterator in; private State state = State.UNPOSITIONED; - private enum State { UNPOSITIONED, ITERATING, EXHAUSTED } + private enum State { + UNPOSITIONED, + ITERATING, + EXHAUSTED + } AssertingMatchesIterator(MatchesIterator in) { this.in = in; @@ -36,8 +40,7 @@ class AssertingMatchesIterator implements MatchesIterator { boolean more = in.next(); if (more == false) { state = State.EXHAUSTED; - } - else { + } else { state = State.ITERATING; } return more; diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingQuery.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingQuery.java index 8989106b956..2bea8003838 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingQuery.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingQuery.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; import java.io.IOException; import java.util.Random; - import org.apache.lucene.index.IndexReader; /** Assertion-enabled query. */ @@ -39,9 +38,11 @@ public final class AssertingQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { assert boost >= 0; - return new AssertingWeight(new Random(random.nextLong()), in.createWeight(searcher, scoreMode, boost), scoreMode); + return new AssertingWeight( + new Random(random.nextLong()), in.createWeight(searcher, scoreMode, boost), scoreMode); } @Override @@ -51,8 +52,7 @@ public final class AssertingQuery extends Query { @Override public boolean equals(Object other) { - return sameClassAs(other) && - in.equals(((AssertingQuery) other).in); + return sameClassAs(other) && in.equals(((AssertingQuery) other).in); } @Override @@ -82,5 +82,4 @@ public final class AssertingQuery extends Query { public void visit(QueryVisitor visitor) { in.visit(visitor); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorable.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorable.java index 208eb4b02c3..1534b0e95e5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorable.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorable.java @@ -19,10 +19,7 @@ package org.apache.lucene.search; import java.io.IOException; -/** - * Wraps another Scorable and asserts that scores are reasonable - * and only called when positioned - */ +/** Wraps another Scorable and asserts that scores are reasonable and only called when positioned */ public class AssertingScorable extends FilterScorable { public AssertingScorable(Scorable in) { @@ -32,9 +29,10 @@ public class AssertingScorable extends FilterScorable { @Override public float score() throws IOException { int docId = docID(); - assert docId != -1 && docId != DocIdSetIterator.NO_MORE_DOCS : "score() called on unpositioned Scorable docid=" + docID(); + assert docId != -1 && docId != DocIdSetIterator.NO_MORE_DOCS + : "score() called on unpositioned Scorable docid=" + docID(); final float score = in.score(); - assert !Float.isNaN(score) : "NaN score for in="+in; + assert !Float.isNaN(score) : "NaN score for in=" + in; return score; } @@ -52,13 +50,9 @@ public class AssertingScorable extends FilterScorable { public static Scorable unwrap(Scorable in) { while (true) { - if (in instanceof AssertingScorable) - in = ((AssertingScorable)in).in; - else if (in instanceof AssertingScorer) - in = ((AssertingScorer)in).in; - else - return in; + if (in instanceof AssertingScorable) in = ((AssertingScorable) in).in; + else if (in instanceof AssertingScorer) in = ((AssertingScorer) in).in; + else return in; } } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java index 01477f39640..9c231df102e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java @@ -24,7 +24,12 @@ import java.util.Random; /** Wraps a Scorer with additional checks */ public class AssertingScorer extends Scorer { - static enum IteratorState { APPROXIMATING, ITERATING, SHALLOW_ADVANCING, FINISHED }; + static enum IteratorState { + APPROXIMATING, + ITERATING, + SHALLOW_ADVANCING, + FINISHED + }; public static Scorer wrap(Random random, Scorer other, ScoreMode scoreMode) { if (other == null) { @@ -57,11 +62,11 @@ public class AssertingScorer extends Scorer { boolean iterating() { // we cannot assert that state == ITERATING because of CachingScorerWrapper switch (docID()) { - case -1: - case DocIdSetIterator.NO_MORE_DOCS: - return false; - default: - return state == IteratorState.ITERATING; + case -1: + case DocIdSetIterator.NO_MORE_DOCS: + return false; + default: + return state == IteratorState.ITERATING; } } @@ -77,7 +82,11 @@ public class AssertingScorer extends Scorer { @Override public int advanceShallow(int target) throws IOException { assert scoreMode.needsScores(); - assert target >= lastShallowTarget : "called on decreasing targets: target = " + target + " < last target = " + lastShallowTarget; + assert target >= lastShallowTarget + : "called on decreasing targets: target = " + + target + + " < last target = " + + lastShallowTarget; assert target >= docID() : "target = " + target + " < docID = " + docID(); int upTo = in.advanceShallow(target); assert upTo >= target : "upTo = " + upTo + " < target = " + target; @@ -90,7 +99,8 @@ public class AssertingScorer extends Scorer { public float getMaxScore(int upTo) throws IOException { assert scoreMode.needsScores(); assert upTo >= lastShallowTarget : "uTo = " + upTo + " < last target = " + lastShallowTarget; - assert docID() >= 0 || lastShallowTarget >= 0 : "Cannot get max scores until the iterator is positioned or advanceShallow has been called"; + assert docID() >= 0 || lastShallowTarget >= 0 + : "Cannot get max scores until the iterator is positioned or advanceShallow has been called"; float maxScore = in.getMaxScore(upTo); return maxScore; } @@ -100,7 +110,7 @@ public class AssertingScorer extends Scorer { assert scoreMode.needsScores(); assert iterating() : state; final float score = in.score(); - assert !Float.isNaN(score) : "NaN score for in="+in; + assert !Float.isNaN(score) : "NaN score for in=" + in; assert lastShallowTarget == -1 || score <= getMaxScore(docID()); assert Float.compare(score, 0f) >= 0 : score; return score; @@ -130,7 +140,7 @@ public class AssertingScorer extends Scorer { final DocIdSetIterator in = this.in.iterator(); assert in != null; return new DocIdSetIterator() { - + @Override public int docID() { assert AssertingScorer.this.in.docID() == in.docID(); @@ -185,57 +195,58 @@ public class AssertingScorer extends Scorer { } final DocIdSetIterator inApproximation = in.approximation(); assert inApproximation.docID() == doc; - final DocIdSetIterator assertingApproximation = new DocIdSetIterator() { + final DocIdSetIterator assertingApproximation = + new DocIdSetIterator() { - @Override - public int docID() { - return inApproximation.docID(); - } + @Override + public int docID() { + return inApproximation.docID(); + } - @Override - public int nextDoc() throws IOException { - assert state != IteratorState.FINISHED : "advance() called after NO_MORE_DOCS"; - assert docID() + 1 >= lastShallowTarget; - final int nextDoc = inApproximation.nextDoc(); - assert nextDoc > doc : "backwards advance from: " + doc + " to: " + nextDoc; - if (nextDoc == NO_MORE_DOCS) { - state = IteratorState.FINISHED; - } else { - state = IteratorState.APPROXIMATING; - } - assert inApproximation.docID() == nextDoc; - return doc = nextDoc; - } + @Override + public int nextDoc() throws IOException { + assert state != IteratorState.FINISHED : "advance() called after NO_MORE_DOCS"; + assert docID() + 1 >= lastShallowTarget; + final int nextDoc = inApproximation.nextDoc(); + assert nextDoc > doc : "backwards advance from: " + doc + " to: " + nextDoc; + if (nextDoc == NO_MORE_DOCS) { + state = IteratorState.FINISHED; + } else { + state = IteratorState.APPROXIMATING; + } + assert inApproximation.docID() == nextDoc; + return doc = nextDoc; + } - @Override - public int advance(int target) throws IOException { - assert state != IteratorState.FINISHED : "advance() called after NO_MORE_DOCS"; - assert target > doc : "target must be > docID(), got " + target + " <= " + doc; - assert target >= lastShallowTarget; - final int advanced = inApproximation.advance(target); - assert advanced >= target : "backwards advance from: " + target + " to: " + advanced; - if (advanced == NO_MORE_DOCS) { - state = IteratorState.FINISHED; - } else { - state = IteratorState.APPROXIMATING; - } - assert inApproximation.docID() == advanced; - return doc = advanced; - } + @Override + public int advance(int target) throws IOException { + assert state != IteratorState.FINISHED : "advance() called after NO_MORE_DOCS"; + assert target > doc : "target must be > docID(), got " + target + " <= " + doc; + assert target >= lastShallowTarget; + final int advanced = inApproximation.advance(target); + assert advanced >= target : "backwards advance from: " + target + " to: " + advanced; + if (advanced == NO_MORE_DOCS) { + state = IteratorState.FINISHED; + } else { + state = IteratorState.APPROXIMATING; + } + assert inApproximation.docID() == advanced; + return doc = advanced; + } - @Override - public long cost() { - return inApproximation.cost(); - } - - }; + @Override + public long cost() { + return inApproximation.cost(); + } + }; return new TwoPhaseIterator(assertingApproximation) { @Override public boolean matches() throws IOException { assert state == IteratorState.APPROXIMATING : state; final boolean matches = in.matches(); if (matches) { - assert AssertingScorer.this.in.iterator().docID() == inApproximation.docID() : "Approximation and scorer don't advance synchronously"; + assert AssertingScorer.this.in.iterator().docID() == inApproximation.docID() + : "Approximation and scorer don't advance synchronously"; doc = inApproximation.docID(); state = IteratorState.ITERATING; } @@ -245,7 +256,7 @@ public class AssertingScorer extends Scorer { @Override public float matchCost() { float matchCost = in.matchCost(); - assert ! Float.isNaN(matchCost); + assert !Float.isNaN(matchCost); assert matchCost >= 0; return matchCost; } @@ -257,4 +268,3 @@ public class AssertingScorer extends Scorer { }; } } - diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java index 88b9499e537..e61224cf717 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java @@ -16,12 +16,12 @@ */ package org.apache.lucene.search; +import static org.apache.lucene.util.LuceneTestCase.usually; + import java.io.IOException; import java.util.Random; import org.apache.lucene.index.LeafReaderContext; -import static org.apache.lucene.util.LuceneTestCase.usually; - class AssertingWeight extends FilterWeight { final Random random; @@ -36,8 +36,7 @@ class AssertingWeight extends FilterWeight { @Override public Matches matches(LeafReaderContext context, int doc) throws IOException { Matches matches = in.matches(context, doc); - if (matches == null) - return null; + if (matches == null) return null; return new AssertingMatches(matches); } @@ -68,12 +67,14 @@ class AssertingWeight extends FilterWeight { } return new ScorerSupplier() { private boolean getCalled = false; + @Override public Scorer get(long leadCost) throws IOException { assert getCalled == false; getCalled = true; assert leadCost >= 0 : leadCost; - return AssertingScorer.wrap(new Random(random.nextLong()), inScorerSupplier.get(leadCost), scoreMode); + return AssertingScorer.wrap( + new Random(random.nextLong()), inScorerSupplier.get(leadCost), scoreMode); } @Override @@ -99,6 +100,7 @@ class AssertingWeight extends FilterWeight { if (inScorer == null) { return null; } - return AssertingBulkScorer.wrap(new Random(random.nextLong()), inScorer, context.reader().maxDoc(), scoreMode); + return AssertingBulkScorer.wrap( + new Random(random.nextLong()), inScorer, context.reader().maxDoc(), scoreMode); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/BaseExplanationTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/search/BaseExplanationTestCase.java index 24e86e12230..76444c552b4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/BaseExplanationTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/BaseExplanationTestCase.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.search; +import static org.apache.lucene.search.spans.SpanTestUtil.*; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -31,31 +33,25 @@ import org.apache.lucene.util.LuceneTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; -import static org.apache.lucene.search.spans.SpanTestUtil.*; - /** - * Tests primitive queries (ie: that rewrite to themselves) to - * insure they match the expected set of docs, and that the score of each - * match is equal to the value of the scores explanation. - * - *

    - * The assumption is that if all of the "primitive" queries work well, - * then anything that rewrites to a primitive will work well also. - *

    + * Tests primitive queries (ie: that rewrite to themselves) to insure they match the expected set of + * docs, and that the score of each match is equal to the value of the scores explanation. * + *

    The assumption is that if all of the "primitive" queries work well, then anything that + * rewrites to a primitive will work well also. */ public abstract class BaseExplanationTestCase extends LuceneTestCase { protected static IndexSearcher searcher; protected static IndexReader reader; protected static Directory directory; protected static Analyzer analyzer; - + public static final String KEY = "KEY"; // boost on this field is the same as the iterator for the doc public static final String FIELD = "field"; // same contents, but no field boost public static final String ALTFIELD = "alt"; - + @AfterClass public static void afterClassTestExplanations() throws Exception { searcher = null; @@ -66,12 +62,16 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { analyzer.close(); analyzer = null; } - + @BeforeClass public static void beforeClassTestExplanations() throws Exception { directory = newDirectory(); analyzer = new MockAnalyzer(random()); - try (RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy()))) { + try (RandomIndexWriter writer = + new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy()))) { for (int i = 0; i < docFields.length; i++) { writer.addDocument(createDoc(i)); } @@ -82,30 +82,27 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { public static Document createDoc(int index) { Document doc = new Document(); - doc.add(newStringField(KEY, ""+index, Field.Store.NO)); - doc.add(new SortedDocValuesField(KEY, new BytesRef(""+index))); + doc.add(newStringField(KEY, "" + index, Field.Store.NO)); + doc.add(new SortedDocValuesField(KEY, new BytesRef("" + index))); Field f = newTextField(FIELD, docFields[index], Field.Store.NO); doc.add(f); doc.add(newTextField(ALTFIELD, docFields[index], Field.Store.NO)); return doc; } - + protected static final String[] docFields = { - "w1 w2 w3 w4 w5", - "w1 w3 w2 w3 zz", - "w1 xx w2 yy w3", - "w1 w3 xx w2 yy w3 zz" + "w1 w2 w3 w4 w5", "w1 w3 w2 w3 zz", "w1 xx w2 yy w3", "w1 w3 xx w2 yy w3 zz" }; - - /** - * check the expDocNrs match and have scores that match the explanations. - * Query may be randomly wrapped in a BooleanQuery with a term that matches no documents. + + /** + * check the expDocNrs match and have scores that match the explanations. Query may be randomly + * wrapped in a BooleanQuery with a term that matches no documents. */ public void qtest(Query q, int[] expDocNrs) throws Exception { if (random().nextBoolean()) { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(q, BooleanClause.Occur.SHOULD); - bq.add(new TermQuery(new Term("NEVER","MATCH")), BooleanClause.Occur.SHOULD); + bq.add(new TermQuery(new Term("NEVER", "MATCH")), BooleanClause.Occur.SHOULD); q = bq.build(); } CheckHits.checkHitCollector(random(), q, FIELD, searcher, expDocNrs); @@ -113,6 +110,7 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { /** * Tests a query using qtest after wrapping it with both optB and reqB + * * @see #qtest * @see #reqB * @see #optB @@ -121,14 +119,13 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { qtest(reqB(q), expDocNrs); qtest(optB(q), expDocNrs); } - - /** - * Convenience subclass of TermsQuery - */ + + /** Convenience subclass of TermsQuery */ protected Query matchTheseItems(int[] terms) { BooleanQuery.Builder query = new BooleanQuery.Builder(); - for(int term : terms) { - query.add(new BooleanClause(new TermQuery(new Term(KEY, ""+term)), BooleanClause.Occur.SHOULD)); + for (int term : terms) { + query.add( + new BooleanClause(new TermQuery(new Term(KEY, "" + term)), BooleanClause.Occur.SHOULD)); } return query.build(); } @@ -146,7 +143,7 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { public SpanQuery st(String s) { return spanTermQuery(FIELD, s); } - + /** MACRO for SpanNotQuery */ public SpanQuery snot(SpanQuery i, SpanQuery e) { return spanNotQuery(i, e); @@ -156,12 +153,12 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { public SpanQuery sor(String s, String e) { return spanOrQuery(FIELD, s, e); } - + /** MACRO for SpanOrQuery containing two SpanQueries */ public SpanQuery sor(SpanQuery s, SpanQuery e) { return spanOrQuery(s, e); } - + /** MACRO for SpanOrQuery containing three SpanTerm queries */ public SpanQuery sor(String s, String m, String e) { return spanOrQuery(FIELD, s, m, e); @@ -170,12 +167,12 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { public SpanQuery sor(SpanQuery s, SpanQuery m, SpanQuery e) { return spanOrQuery(s, m, e); } - + /** MACRO for SpanNearQuery containing two SpanTerm queries */ public SpanQuery snear(String s, String e, int slop, boolean inOrder) { return snear(st(s), st(e), slop, inOrder); } - + /** MACRO for SpanNearQuery containing two SpanQueries */ public SpanQuery snear(SpanQuery s, SpanQuery e, int slop, boolean inOrder) { if (inOrder) { @@ -184,11 +181,9 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { return spanNearUnorderedQuery(slop, s, e); } } - - + /** MACRO for SpanNearQuery containing three SpanTerm queries */ - public SpanQuery snear(String s, String m, String e, - int slop, boolean inOrder) { + public SpanQuery snear(String s, String m, String e, int slop, boolean inOrder) { return snear(st(s), st(m), st(e), slop, inOrder); } /** MACRO for SpanNearQuery containing three SpanQueries */ @@ -199,31 +194,31 @@ public abstract class BaseExplanationTestCase extends LuceneTestCase { return spanNearUnorderedQuery(slop, s, m, e); } } - + /** MACRO for SpanFirst(SpanTermQuery) */ public SpanQuery sf(String s, int b) { return spanFirstQuery(st(s), b); } /** - * MACRO: Wraps a Query in a BooleanQuery so that it is optional, along - * with a second prohibited clause which will never match anything + * MACRO: Wraps a Query in a BooleanQuery so that it is optional, along with a second prohibited + * clause which will never match anything */ public Query optB(Query q) throws Exception { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(q, BooleanClause.Occur.SHOULD); - bq.add(new TermQuery(new Term("NEVER","MATCH")), BooleanClause.Occur.MUST_NOT); + bq.add(new TermQuery(new Term("NEVER", "MATCH")), BooleanClause.Occur.MUST_NOT); return bq.build(); } /** - * MACRO: Wraps a Query in a BooleanQuery so that it is required, along - * with a second optional clause which will match everything + * MACRO: Wraps a Query in a BooleanQuery so that it is required, along with a second optional + * clause which will match everything */ public Query reqB(Query q) throws Exception { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(q, BooleanClause.Occur.MUST); - bq.add(new TermQuery(new Term(FIELD,"w1")), BooleanClause.Occur.SHOULD); + bq.add(new TermQuery(new Term(FIELD, "w1")), BooleanClause.Occur.SHOULD); return bq.build(); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/BaseRangeFieldQueryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/search/BaseRangeFieldQueryTestCase.java index 864254dcba8..6e1f705a356 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/BaseRangeFieldQueryTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/BaseRangeFieldQueryTestCase.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.Set; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; @@ -42,7 +41,8 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; /** - * Abstract class to do basic tests for a RangeField query. Testing rigor inspired by {@code BaseGeoPointTestCase} + * Abstract class to do basic tests for a RangeField query. Testing rigor inspired by {@code + * BaseGeoPointTestCase} */ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase { protected abstract Field newRangeField(Range box); @@ -85,7 +85,7 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase { int numDocs = atLeast(1000); int dimensions = dimension(); Range[][] ranges = new Range[numDocs][]; - Range[] theRange = new Range[] {nextRange(dimensions)}; + Range[] theRange = new Range[] {nextRange(dimensions)}; Arrays.fill(ranges, theRange); verify(ranges); } @@ -96,14 +96,14 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase { int dimensions = dimension(); int cardinality = TestUtil.nextInt(random(), 2, 20); - Range[][] diffRanges = new Range[cardinality][]; + Range[][] diffRanges = new Range[cardinality][]; for (int i = 0; i < cardinality; i++) { - diffRanges[i] = new Range[] {nextRange(dimensions)}; + diffRanges[i] = new Range[] {nextRange(dimensions)}; } Range[][] ranges = new Range[numDocs][]; for (int i = 0; i < numDocs; i++) { - ranges[i] = diffRanges[random().nextInt(cardinality)]; + ranges[i] = diffRanges[random().nextInt(cardinality)]; } verify(ranges); } @@ -120,7 +120,8 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase { boolean haveRealDoc = true; - nextdoc: for (int id=0; id 0 && x < 9 && haveRealDoc) { int oldID; - int i=0; + int i = 0; // don't step on missing ranges: while (true) { oldID = random().nextInt(id); @@ -156,29 +157,54 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase { } } - if (x == dimensions*2) { + if (x == dimensions * 2) { // Fully identical box (use first box in case current is multivalued but old is not) - for (int d=0; d 50000) { @@ -207,19 +233,19 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase { Set deleted = new HashSet<>(); IndexWriter w = new IndexWriter(dir, iwc); - for (int id=0; id < ranges.length; ++id) { + for (int id = 0; id < ranges.length; ++id) { Document doc = new Document(); - doc.add(newStringField("id", ""+id, Field.Store.NO)); + doc.add(newStringField("id", "" + id, Field.Store.NO)); doc.add(new NumericDocValuesField("id", id)); if (ranges[id][0].isMissing == false) { - for (int n=0; n 0 && random().nextInt(100) == 1) { int idToDelete = random().nextInt(id); - w.deleteDocuments(new Term("id", ""+idToDelete)); + w.deleteDocuments(new Term("id", "" + idToDelete)); deleted.add(idToDelete); if (VERBOSE) { System.out.println(" delete id=" + idToDelete); @@ -239,7 +265,7 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase { Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader()); int maxDoc = s.getIndexReader().maxDoc(); - for (int iter=0; iter 1) ? "es=" : "=").append(ranges[id][0]); - for (int n=1; n ignore = new TreeSet<>(); for (int i = 0; i < results.length; i++) { ignore.add(Integer.valueOf(results[i])); } - + int maxDoc = searcher.getIndexReader().maxDoc(); for (int doc = 0; doc < maxDoc; doc++) { if (ignore.contains(Integer.valueOf(doc))) continue; Explanation exp = searcher.explain(q, doc); - assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", - exp); - assertFalse("Explanation of [["+d+"]] for #"+doc+ - " doesn't indicate non-match: " + exp.toString(), - exp.isMatch()); + assertNotNull("Explanation of [[" + d + "]] for #" + doc + " is null", exp); + assertFalse( + "Explanation of [[" + + d + + "]] for #" + + doc + + " doesn't indicate non-match: " + + exp.toString(), + exp.isMatch()); } - } - + /** - * Tests that a query matches the an expected set of documents using a - * HitCollector. + * Tests that a query matches the an expected set of documents using a HitCollector. + * + *

    Note that when using the HitCollector API, documents will be collected if they "match" + * regardless of what their score is. * - *

    - * Note that when using the HitCollector API, documents will be collected - * if they "match" regardless of what their score is. - *

    * @param query the query to test * @param searcher the searcher to test the query against * @param defaultFieldName used for displaying the query in assertion messages * @param results a list of documentIds that must match the query * @see #checkHits */ - public static void checkHitCollector(Random random, Query query, String defaultFieldName, - IndexSearcher searcher, int[] results) - throws IOException { + public static void checkHitCollector( + Random random, Query query, String defaultFieldName, IndexSearcher searcher, int[] results) + throws IOException { + + QueryUtils.check(random, query, searcher); - QueryUtils.check(random,query,searcher); - Set correct = new TreeSet<>(); for (int i = 0; i < results.length; i++) { correct.add(Integer.valueOf(results[i])); @@ -96,40 +87,39 @@ public class CheckHits { final Collector c = new SetCollector(actual); searcher.search(query, c); - assertEquals("Simple: " + query.toString(defaultFieldName), - correct, actual); + assertEquals("Simple: " + query.toString(defaultFieldName), correct, actual); for (int i = -1; i < 2; i++) { actual.clear(); - IndexSearcher s = QueryUtils.wrapUnderlyingReader - (random, searcher, i); + IndexSearcher s = QueryUtils.wrapUnderlyingReader(random, searcher, i); s.search(query, c); - assertEquals("Wrap Reader " + i + ": " + - query.toString(defaultFieldName), - correct, actual); + assertEquals("Wrap Reader " + i + ": " + query.toString(defaultFieldName), correct, actual); } } - /** - * Just collects document ids into a set. - */ + /** Just collects document ids into a set. */ public static class SetCollector extends SimpleCollector { final Set bag; + public SetCollector(Set bag) { this.bag = bag; } + private int base = 0; + @Override public void setScorer(Scorable scorer) throws IOException {} + @Override public void collect(int doc) { bag.add(Integer.valueOf(doc + base)); } + @Override protected void doSetNextReader(LeafReaderContext context) throws IOException { base = context.docBase; } - + @Override public ScoreMode scoreMode() { return ScoreMode.COMPLETE_NO_SCORES; @@ -139,10 +129,9 @@ public class CheckHits { /** * Tests that a query matches the an expected set of documents using Hits. * - *

    - * Note that when using the Hits API, documents will only be returned - * if they have a positive normalized score. - *

    + *

    Note that when using the Hits API, documents will only be returned if they have a positive + * normalized score. + * * @param query the query to test * @param searcher the searcher to test the query against * @param defaultFieldName used for displaing the query in assertion messages @@ -150,12 +139,8 @@ public class CheckHits { * @see #checkHitCollector */ public static void checkHits( - Random random, - Query query, - String defaultFieldName, - IndexSearcher searcher, - int[] results) - throws IOException { + Random random, Query query, String defaultFieldName, IndexSearcher searcher, int[] results) + throws IOException { ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; @@ -171,7 +156,7 @@ public class CheckHits { assertEquals(query.toString(defaultFieldName), correct, actual); - QueryUtils.check(random, query,searcher, LuceneTestCase.rarely(random)); + QueryUtils.check(random, query, searcher, LuceneTestCase.rarely(random)); } /** Tests that a Hits has an expected order of documents */ @@ -182,14 +167,12 @@ public class CheckHits { } } - /** Tests that two queries have an expected order of documents, - * and that the two queries have the same score values. + /** + * Tests that two queries have an expected order of documents, and that the two queries have the + * same score values. */ public static void checkHitsQuery( - Query query, - ScoreDoc[] hits1, - ScoreDoc[] hits2, - int[] results) { + Query query, ScoreDoc[] hits1, ScoreDoc[] hits2, int[] results) { checkDocIds("hits1", results, hits1); checkDocIds("hits2", results, hits2); @@ -197,49 +180,62 @@ public class CheckHits { } public static void checkEqual(Query query, ScoreDoc[] hits1, ScoreDoc[] hits2) { - final float scoreTolerance = 1.0e-6f; - if (hits1.length != hits2.length) { - fail("Unequal lengths: hits1="+hits1.length+",hits2="+hits2.length); - } + final float scoreTolerance = 1.0e-6f; + if (hits1.length != hits2.length) { + fail("Unequal lengths: hits1=" + hits1.length + ",hits2=" + hits2.length); + } for (int i = 0; i < hits1.length; i++) { if (hits1[i].doc != hits2[i].doc) { - fail("Hit " + i + " docnumbers don't match\n" - + hits2str(hits1, hits2,0,0) - + "for query:" + query.toString()); + fail( + ("Hit " + i) + + (" docnumbers don't match\n" + hits2str(hits1, hits2, 0, 0)) + + ("for query:" + query.toString())); } if ((hits1[i].doc != hits2[i].doc) - || Math.abs(hits1[i].score - hits2[i].score) > scoreTolerance) - { - fail("Hit " + i + ", doc nrs " + hits1[i].doc + " and " + hits2[i].doc - + "\nunequal : " + hits1[i].score - + "\n and: " + hits2[i].score - + "\nfor query:" + query.toString()); + || Math.abs(hits1[i].score - hits2[i].score) > scoreTolerance) { + fail( + ("Hit " + i) + + (", doc nrs " + hits1[i].doc) + + (" and " + hits2[i].doc) + + ("\nunequal : " + hits1[i].score) + + ("\n and: " + hits2[i].score) + + ("\nfor query:" + query.toString())); } } } public static String hits2str(ScoreDoc[] hits1, ScoreDoc[] hits2, int start, int end) { StringBuilder sb = new StringBuilder(); - int len1=hits1==null ? 0 : hits1.length; - int len2=hits2==null ? 0 : hits2.length; - if (end<=0) { - end = Math.max(len1,len2); + int len1 = hits1 == null ? 0 : hits1.length; + int len2 = hits2 == null ? 0 : hits2.length; + if (end <= 0) { + end = Math.max(len1, len2); } - sb.append("Hits length1=").append(len1).append("\tlength2=").append(len2); + sb.append("Hits length1=").append(len1).append("\tlength2=").append(len2); sb.append('\n'); - for (int i=start; i 0); } if (detail.length > 0) { - if (detail.length==1 && COMPUTED_FROM_PATTERN.matcher(descr).matches() == false) { - // simple containment, unless it's a freq of: (which lets a query explain how the freq is calculated), + if (detail.length == 1 && COMPUTED_FROM_PATTERN.matcher(descr).matches() == false) { + // simple containment, unless it's a freq of: (which lets a query explain how the freq is + // calculated), // just verify contained expl has same score if (expl.getDescription().endsWith("with freq of:") == false // with dismax, even if there is a single sub explanation, its // score might be different if the score is negative && (score >= 0 || expl.getDescription().endsWith("times others of:") == false)) { - verifyExplanation(q,doc,score,deep,detail[0]); + verifyExplanation(q, doc, score, deep, detail[0]); } } else { // explanation must either: @@ -370,16 +370,17 @@ public class CheckHits { boolean productOf = descr.endsWith("product of:"); boolean sumOf = descr.endsWith("sum of:"); boolean maxOf = descr.endsWith("max of:"); - boolean computedOf = descr.indexOf("computed as") > 0 && COMPUTED_FROM_PATTERN.matcher(descr).matches(); + boolean computedOf = + descr.indexOf("computed as") > 0 && COMPUTED_FROM_PATTERN.matcher(descr).matches(); boolean maxTimesOthers = false; if (!(productOf || sumOf || maxOf || computedOf)) { // maybe 'max plus x times others' int k1 = descr.indexOf("max plus "); - if (k1>=0) { + if (k1 >= 0) { k1 += "max plus ".length(); - int k2 = descr.indexOf(" ",k1); + int k2 = descr.indexOf(" ", k1); try { - x = Float.parseFloat(descr.substring(k1,k2).trim()); + x = Float.parseFloat(descr.substring(k1, k2).trim()); if (descr.substring(k2).trim().equals("times others of:")) { maxTimesOthers = true; } @@ -390,20 +391,23 @@ public class CheckHits { // TODO: this is a TERRIBLE assertion!!!! if (false == (productOf || sumOf || maxOf || computedOf || maxTimesOthers)) { fail( - q+": multi valued explanation description=\""+descr - +"\" must be 'max of plus x times others', 'computed as x from:' or end with 'product of'" - +" or 'sum of:' or 'max of:' - "+expl); + q + + ": multi valued explanation description=\"" + + descr + + "\" must be 'max of plus x times others', 'computed as x from:' or end with 'product of'" + + " or 'sum of:' or 'max of:' - " + + expl); } double sum = 0; float product = 1; float max = Float.NEGATIVE_INFINITY; double maxError = 0; - for (int i=0; iNOTE: this HitCollector should only be used with the Query and Searcher specified at when it + * is constructed. * * @see CheckHits#verifyExplanation */ @@ -495,26 +500,27 @@ public class CheckHits { IndexSearcher s; String d; boolean deep; - + Scorable scorer; private int base = 0; /** Constructs an instance which does shallow tests on the Explanation */ public ExplanationAsserter(Query q, String defaultFieldName, IndexSearcher s) { - this(q,defaultFieldName,s,false); - } + this(q, defaultFieldName, s, false); + } + public ExplanationAsserter(Query q, String defaultFieldName, IndexSearcher s, boolean deep) { - this.q=q; - this.s=s; + this.q = q; + this.s = s; this.d = q.toString(defaultFieldName); - this.deep=deep; - } - + this.deep = deep; + } + @Override public void setScorer(Scorable scorer) throws IOException { - this.scorer = scorer; + this.scorer = scorer; } - + @Override public void collect(int doc) throws IOException { Explanation exp = null; @@ -522,21 +528,26 @@ public class CheckHits { try { exp = s.explain(q, doc); } catch (IOException e) { - throw new RuntimeException - ("exception in hitcollector of [["+d+"]] for #"+doc, e); + throw new RuntimeException("exception in hitcollector of [[" + d + "]] for #" + doc, e); } - - assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", exp); - verifyExplanation(d,doc,scorer.score(),deep,exp); - assertTrue("Explanation of [["+d+"]] for #"+ doc + - " does not indicate match: " + exp.toString(), - exp.isMatch()); + + assertNotNull("Explanation of [[" + d + "]] for #" + doc + " is null", exp); + verifyExplanation(d, doc, scorer.score(), deep, exp); + assertTrue( + "Explanation of [[" + + d + + "]] for #" + + doc + + " does not indicate match: " + + exp.toString(), + exp.isMatch()); } + @Override protected void doSetNextReader(LeafReaderContext context) throws IOException { base = context.docBase; } - + @Override public ScoreMode scoreMode() { return ScoreMode.COMPLETE; @@ -544,10 +555,10 @@ public class CheckHits { } /** - * Asserts that the {@link Matches} from a query is non-null whenever - * the document its created for is a hit. + * Asserts that the {@link Matches} from a query is non-null whenever the document its created for + * is a hit. * - * Also checks that the previous non-matching document has a {@code null} {@link Matches} + *

    Also checks that the previous non-matching document has a {@code null} {@link Matches} */ public static class MatchesAsserter extends SimpleCollector { @@ -568,9 +579,15 @@ public class CheckHits { @Override public void collect(int doc) throws IOException { Matches matches = this.weight.matches(context, doc); - assertNotNull("Unexpected null Matches object in doc" + doc + " for query " + this.weight.getQuery(), matches); + assertNotNull( + "Unexpected null Matches object in doc" + doc + " for query " + this.weight.getQuery(), + matches); if (lastCheckedDoc != doc - 1) { - assertNull("Unexpected non-null Matches object in non-matching doc" + doc + " for query " + this.weight.getQuery(), + assertNull( + "Unexpected non-null Matches object in non-matching doc" + + doc + + " for query " + + this.weight.getQuery(), this.weight.matches(context, doc - 1)); } lastCheckedDoc = doc; @@ -582,7 +599,8 @@ public class CheckHits { } } - public static void checkTopScores(Random random, Query query, IndexSearcher searcher) throws IOException { + public static void checkTopScores(Random random, Query query, IndexSearcher searcher) + throws IOException { // Check it computed the top hits correctly doCheckTopScores(query, searcher, 1); doCheckTopScores(query, searcher, 10); @@ -591,15 +609,18 @@ public class CheckHits { doCheckMaxScores(random, query, searcher); } - private static void doCheckTopScores(Query query, IndexSearcher searcher, int numHits) throws IOException { - TopScoreDocCollector collector1 = TopScoreDocCollector.create(numHits, null, Integer.MAX_VALUE); // COMPLETE + private static void doCheckTopScores(Query query, IndexSearcher searcher, int numHits) + throws IOException { + TopScoreDocCollector collector1 = + TopScoreDocCollector.create(numHits, null, Integer.MAX_VALUE); // COMPLETE TopScoreDocCollector collector2 = TopScoreDocCollector.create(numHits, null, 1); // TOP_SCORES searcher.search(query, collector1); searcher.search(query, collector2); checkEqual(query, collector1.topDocs().scoreDocs, collector2.topDocs().scoreDocs); } - private static void doCheckMaxScores(Random random, Query query, IndexSearcher searcher) throws IOException { + private static void doCheckMaxScores(Random random, Query query, IndexSearcher searcher) + throws IOException { query = searcher.rewrite(query); Weight w1 = searcher.createWeight(query, ScoreMode.COMPLETE, 1); Weight w2 = searcher.createWeight(query, ScoreMode.TOP_SCORES, 1); diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java index e4168f3a817..f308f470ac1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java @@ -16,10 +16,13 @@ */ package org.apache.lucene.search; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.List; import java.util.Random; - import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.Fields; @@ -41,13 +44,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.Version; import org.junit.Assert; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Utility class for sanity-checking queries. - */ +/** Utility class for sanity-checking queries. */ public class QueryUtils { /** Check the types of things query objects should be able to do. */ @@ -57,30 +54,30 @@ public class QueryUtils { /** check very basic hashCode and equals */ public static void checkHashEquals(Query q) { - checkEqual(q,q); + checkEqual(q, q); // test that a class check is done so that no exception is thrown // in the implementation of equals() - Query whacky = new Query() { - @Override - public String toString(String field) { - return "My Whacky Query"; - } + Query whacky = + new Query() { + @Override + public String toString(String field) { + return "My Whacky Query"; + } - @Override - public void visit(QueryVisitor visitor) { } + @Override + public void visit(QueryVisitor visitor) {} - @Override - public boolean equals(Object o) { - return o == this; - } + @Override + public boolean equals(Object o) { + return o == this; + } - @Override - public int hashCode() { - return System.identityHashCode(this); - } - - }; + @Override + public int hashCode() { + return System.identityHashCode(this); + } + }; checkUnequal(q, whacky); // null test @@ -98,13 +95,13 @@ public class QueryUtils { } /** deep check that explanations of a query 'score' correctly */ - public static void checkExplanations (final Query q, final IndexSearcher s) throws IOException { + public static void checkExplanations(final Query q, final IndexSearcher s) throws IOException { CheckHits.checkExplanations(q, null, s, true); } /** - * Various query sanity checks on a searcher, some checks are only done for - * instanceof IndexSearcher. + * Various query sanity checks on a searcher, some checks are only done for instanceof + * IndexSearcher. * * @see #check(Query) * @see #checkFirstSkipTo @@ -116,19 +113,20 @@ public class QueryUtils { public static void check(Random random, Query q1, IndexSearcher s) { check(random, q1, s, true); } + public static void check(Random random, Query q1, IndexSearcher s, boolean wrap) { try { check(q1); - if (s!=null) { - checkFirstSkipTo(q1,s); - checkSkipTo(q1,s); + if (s != null) { + checkFirstSkipTo(q1, s); + checkSkipTo(q1, s); checkBulkScorerSkipTo(random, q1, s); if (wrap) { check(random, q1, wrapUnderlyingReader(random, s, -1), false); - check(random, q1, wrapUnderlyingReader(random, s, 0), false); + check(random, q1, wrapUnderlyingReader(random, s, 0), false); check(random, q1, wrapUnderlyingReader(random, s, +1), false); } - checkExplanations(q1,s); + checkExplanations(q1, s); CheckHits.checkMatches(q1, s); } } catch (IOException e) { @@ -137,33 +135,37 @@ public class QueryUtils { } /** - * Given an IndexSearcher, returns a new IndexSearcher whose IndexReader - * is a MultiReader containing the Reader of the original IndexSearcher, - * as well as several "empty" IndexReaders -- some of which will have - * deleted documents in them. This new IndexSearcher should - * behave exactly the same as the original IndexSearcher. + * Given an IndexSearcher, returns a new IndexSearcher whose IndexReader is a MultiReader + * containing the Reader of the original IndexSearcher, as well as several "empty" IndexReaders -- + * some of which will have deleted documents in them. This new IndexSearcher should behave exactly + * the same as the original IndexSearcher. + * * @param s the searcher to wrap - * @param edge if negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub + * @param edge if negative, s will be the first sub; if 0, s will be in the middle, if positive s + * will be the last sub */ - public static IndexSearcher wrapUnderlyingReader(Random random, final IndexSearcher s, final int edge) - throws IOException { + public static IndexSearcher wrapUnderlyingReader( + Random random, final IndexSearcher s, final int edge) throws IOException { IndexReader r = s.getIndexReader(); // we can't put deleted docs before the nested reader, because // it will throw off the docIds - IndexReader[] readers = new IndexReader[] { - edge < 0 ? r : new MultiReader(), - new MultiReader(), - new MultiReader(edge < 0 ? emptyReader(4) : new MultiReader(), + IndexReader[] readers = + new IndexReader[] { + edge < 0 ? r : new MultiReader(), new MultiReader(), - 0 == edge ? r : new MultiReader()), - 0 < edge ? new MultiReader() : emptyReader(7), - new MultiReader(), - new MultiReader(0 < edge ? new MultiReader() : emptyReader(5), + new MultiReader( + edge < 0 ? emptyReader(4) : new MultiReader(), + new MultiReader(), + 0 == edge ? r : new MultiReader()), + 0 < edge ? new MultiReader() : emptyReader(7), new MultiReader(), - 0 < edge ? r : new MultiReader()) - }; + new MultiReader( + 0 < edge ? new MultiReader() : emptyReader(5), + new MultiReader(), + 0 < edge ? r : new MultiReader()) + }; IndexSearcher out = LuceneTestCase.newSearcher(new MultiReader(readers)); out.setSimilarity(s.getSimilarity()); @@ -219,6 +221,7 @@ public class QueryUtils { } final Bits liveDocs = new Bits.MatchNoBits(maxDoc); + @Override public Bits getLiveDocs() { return liveDocs; @@ -270,103 +273,285 @@ public class QueryUtils { }; } - /** alternate scorer advance(),advance(),next(),next(),advance(),advance(), etc - * and ensure a hitcollector receives same docs and scores + /** + * alternate scorer advance(),advance(),next(),next(),advance(),advance(), etc and ensure a + * hitcollector receives same docs and scores */ public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException { - //System.out.println("Checking "+q); + // System.out.println("Checking "+q); final List readerContextArray = s.getTopReaderContext().leaves(); final int skip_op = 0; final int next_op = 1; - final int orders [][] = { - {next_op}, - {skip_op}, - {skip_op, next_op}, - {next_op, skip_op}, - {skip_op, skip_op, next_op, next_op}, - {next_op, next_op, skip_op, skip_op}, - {skip_op, skip_op, skip_op, next_op, next_op}, + final int orders[][] = { + {next_op}, + {skip_op}, + {skip_op, next_op}, + {next_op, skip_op}, + {skip_op, skip_op, next_op, next_op}, + {next_op, next_op, skip_op, skip_op}, + {skip_op, skip_op, skip_op, next_op, next_op}, }; for (int k = 0; k < orders.length; k++) { - final int order[] = orders[k]; - // System.out.print("Order:");for (int i = 0; i < order.length; i++) - // System.out.print(order[i]==skip_op ? " skip()":" next()"); - // System.out.println(); - final int opidx[] = { 0 }; - final int lastDoc[] = {-1}; + final int order[] = orders[k]; + // System.out.print("Order:");for (int i = 0; i < order.length; i++) + // System.out.print(order[i]==skip_op ? " skip()":" next()"); + // System.out.println(); + final int opidx[] = {0}; + final int lastDoc[] = {-1}; - // FUTURE: ensure scorer.doc()==-1 + // FUTURE: ensure scorer.doc()==-1 - final float maxDiff = 1e-5f; - final LeafReader lastReader[] = {null}; + final float maxDiff = 1e-5f; + final LeafReader lastReader[] = {null}; - s.search(q, new SimpleCollector() { - private Scorable sc; - private Scorer scorer; - private DocIdSetIterator iterator; + s.search( + q, + new SimpleCollector() { + private Scorable sc; + private Scorer scorer; + private DocIdSetIterator iterator; + private int leafPtr; + + @Override + public void setScorer(Scorable scorer) { + this.sc = scorer; + } + + @Override + public void collect(int doc) throws IOException { + float score = sc.score(); + lastDoc[0] = doc; + try { + if (scorer == null) { + Query rewritten = s.rewrite(q); + Weight w = s.createWeight(rewritten, ScoreMode.COMPLETE, 1); + LeafReaderContext context = readerContextArray.get(leafPtr); + scorer = w.scorer(context); + iterator = scorer.iterator(); + } + + int op = order[(opidx[0]++) % order.length]; + // System.out.println(op==skip_op ? + // "skip("+(sdoc[0]+1)+")":"next()"); + boolean more = + op == skip_op + ? iterator.advance(scorer.docID() + 1) != DocIdSetIterator.NO_MORE_DOCS + : iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS; + int scorerDoc = scorer.docID(); + float scorerScore = scorer.score(); + float scorerScore2 = scorer.score(); + float scoreDiff = Math.abs(score - scorerScore); + float scorerDiff = Math.abs(scorerScore2 - scorerScore); + + boolean success = false; + try { + assertTrue(more); + assertEquals("scorerDoc=" + scorerDoc + ",doc=" + doc, scorerDoc, doc); + assertTrue( + "score=" + score + ", scorerScore=" + scorerScore, scoreDiff <= maxDiff); + assertTrue( + "scorerScorer=" + scorerScore + ", scorerScore2=" + scorerScore2, + scorerDiff <= maxDiff); + success = true; + } finally { + if (!success) { + if (LuceneTestCase.VERBOSE) { + StringBuilder sbord = new StringBuilder(); + for (int i = 0; i < order.length; i++) { + sbord.append(order[i] == skip_op ? " skip()" : " next()"); + } + System.out.println( + "ERROR matching docs:" + + "\n\t" + + (doc != scorerDoc ? "--> " : "") + + "doc=" + + doc + + ", scorerDoc=" + + scorerDoc + + "\n\t" + + (!more ? "--> " : "") + + "tscorer.more=" + + more + + "\n\t" + + (scoreDiff > maxDiff ? "--> " : "") + + "scorerScore=" + + scorerScore + + " scoreDiff=" + + scoreDiff + + " maxDiff=" + + maxDiff + + "\n\t" + + (scorerDiff > maxDiff ? "--> " : "") + + "scorerScore2=" + + scorerScore2 + + " scorerDiff=" + + scorerDiff + + "\n\thitCollector.doc=" + + doc + + " score=" + + score + + "\n\t Scorer=" + + scorer + + "\n\t Query=" + + q + + " " + + q.getClass().getName() + + "\n\t Searcher=" + + s + + "\n\t Order=" + + sbord + + "\n\t Op=" + + (op == skip_op ? " skip()" : " next()")); + } + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; + } + + @Override + protected void doSetNextReader(LeafReaderContext context) throws IOException { + // confirm that skipping beyond the last doc, on the + // previous reader, hits NO_MORE_DOCS + if (lastReader[0] != null) { + final LeafReader previousReader = lastReader[0]; + IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false); + indexSearcher.setSimilarity(s.getSimilarity()); + Query rewritten = indexSearcher.rewrite(q); + Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1); + LeafReaderContext ctx = (LeafReaderContext) indexSearcher.getTopReaderContext(); + Scorer scorer = w.scorer(ctx); + if (scorer != null) { + DocIdSetIterator iterator = scorer.iterator(); + boolean more = false; + final Bits liveDocs = context.reader().getLiveDocs(); + for (int d = iterator.advance(lastDoc[0] + 1); + d != DocIdSetIterator.NO_MORE_DOCS; + d = iterator.nextDoc()) { + if (liveDocs == null || liveDocs.get(d)) { + more = true; + break; + } + } + Assert.assertFalse( + "query's last doc was " + + lastDoc[0] + + " but advance(" + + (lastDoc[0] + 1) + + ") got to " + + scorer.docID(), + more); + } + leafPtr++; + } + lastReader[0] = context.reader(); + assert readerContextArray.get(leafPtr).reader() == context.reader(); + this.scorer = null; + lastDoc[0] = -1; + } + }); + + if (lastReader[0] != null) { + // confirm that skipping beyond the last doc, on the + // previous reader, hits NO_MORE_DOCS + final LeafReader previousReader = lastReader[0]; + IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false); + indexSearcher.setSimilarity(s.getSimilarity()); + Query rewritten = indexSearcher.rewrite(q); + Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1); + LeafReaderContext ctx = previousReader.getContext(); + Scorer scorer = w.scorer(ctx); + if (scorer != null) { + DocIdSetIterator iterator = scorer.iterator(); + boolean more = false; + final Bits liveDocs = lastReader[0].getLiveDocs(); + for (int d = iterator.advance(lastDoc[0] + 1); + d != DocIdSetIterator.NO_MORE_DOCS; + d = iterator.nextDoc()) { + if (liveDocs == null || liveDocs.get(d)) { + more = true; + break; + } + } + assertFalse( + "query's last doc was " + + lastDoc[0] + + " but advance(" + + (lastDoc[0] + 1) + + ") got to " + + scorer.docID(), + more); + } + } + } + } + + /** check that first skip on just created scorers always goes to the right doc */ + public static void checkFirstSkipTo(final Query q, final IndexSearcher s) throws IOException { + // System.out.println("checkFirstSkipTo: "+q); + final float maxDiff = 1e-3f; + final int lastDoc[] = {-1}; + final LeafReader lastReader[] = {null}; + final List context = s.getTopReaderContext().leaves(); + Query rewritten = s.rewrite(q); + s.search( + q, + new SimpleCollector() { + private Scorable scorer; private int leafPtr; @Override public void setScorer(Scorable scorer) { - this.sc = scorer; + this.scorer = scorer; } @Override public void collect(int doc) throws IOException { - float score = sc.score(); - lastDoc[0] = doc; + float score = scorer.score(); try { - if (scorer == null) { - Query rewritten = s.rewrite(q); + long startMS = System.currentTimeMillis(); + for (int i = lastDoc[0] + 1; i <= doc; i++) { Weight w = s.createWeight(rewritten, ScoreMode.COMPLETE, 1); - LeafReaderContext context = readerContextArray.get(leafPtr); - scorer = w.scorer(context); - iterator = scorer.iterator(); - } + Scorer scorer = w.scorer(context.get(leafPtr)); + assertTrue( + "query collected " + doc + " but advance(" + i + ") says no more docs!", + scorer.iterator().advance(i) != DocIdSetIterator.NO_MORE_DOCS); + assertEquals( + "query collected " + doc + " but advance(" + i + ") got to " + scorer.docID(), + doc, + scorer.docID()); + float advanceScore = scorer.score(); + assertEquals( + "unstable advance(" + i + ") score!", advanceScore, scorer.score(), maxDiff); + assertEquals( + "query assigned doc " + + doc + + " a score of <" + + score + + "> but advance(" + + i + + ") has <" + + advanceScore + + ">!", + score, + advanceScore, + maxDiff); - int op = order[(opidx[0]++) % order.length]; - // System.out.println(op==skip_op ? - // "skip("+(sdoc[0]+1)+")":"next()"); - boolean more = op == skip_op ? iterator.advance(scorer.docID() + 1) != DocIdSetIterator.NO_MORE_DOCS - : iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS; - int scorerDoc = scorer.docID(); - float scorerScore = scorer.score(); - float scorerScore2 = scorer.score(); - float scoreDiff = Math.abs(score - scorerScore); - float scorerDiff = Math.abs(scorerScore2 - scorerScore); - - boolean success = false; - try { - assertTrue(more); - assertEquals("scorerDoc=" + scorerDoc + ",doc=" + doc, scorerDoc, doc); - assertTrue("score=" + score + ", scorerScore=" + scorerScore, scoreDiff <= maxDiff); - assertTrue("scorerScorer=" + scorerScore + ", scorerScore2=" + scorerScore2, scorerDiff <= maxDiff); - success = true; - } finally { - if (!success) { - if (LuceneTestCase.VERBOSE) { - StringBuilder sbord = new StringBuilder(); - for (int i = 0; i < order.length; i++) { - sbord.append(order[i] == skip_op ? " skip()" : " next()"); - } - System.out.println("ERROR matching docs:" + "\n\t" - + (doc != scorerDoc ? "--> " : "") + "doc=" + doc + ", scorerDoc=" + scorerDoc - + "\n\t" + (!more ? "--> " : "") + "tscorer.more=" + more - + "\n\t" + (scoreDiff > maxDiff ? "--> " : "") - + "scorerScore=" + scorerScore + " scoreDiff=" + scoreDiff - + " maxDiff=" + maxDiff + "\n\t" - + (scorerDiff > maxDiff ? "--> " : "") + "scorerScore2=" - + scorerScore2 + " scorerDiff=" + scorerDiff - + "\n\thitCollector.doc=" + doc + " score=" + score - + "\n\t Scorer=" + scorer + "\n\t Query=" + q + " " - + q.getClass().getName() + "\n\t Searcher=" + s - + "\n\t Order=" + sbord + "\n\t Op=" - + (op == skip_op ? " skip()" : " next()")); - } + // Hurry things along if they are going slow (eg + // if you got SimpleText codec this will kick in): + if (i < doc && System.currentTimeMillis() - startMS > 5) { + i = doc - 1; } } + lastDoc[0] = doc; } catch (IOException e) { throw new RuntimeException(e); } @@ -385,133 +570,37 @@ public class QueryUtils { final LeafReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false); indexSearcher.setSimilarity(s.getSimilarity()); - Query rewritten = indexSearcher.rewrite(q); Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1); - LeafReaderContext ctx = (LeafReaderContext)indexSearcher.getTopReaderContext(); - Scorer scorer = w.scorer(ctx); + Scorer scorer = w.scorer((LeafReaderContext) indexSearcher.getTopReaderContext()); if (scorer != null) { DocIdSetIterator iterator = scorer.iterator(); boolean more = false; final Bits liveDocs = context.reader().getLiveDocs(); - for (int d = iterator.advance(lastDoc[0] + 1); d != DocIdSetIterator.NO_MORE_DOCS; d = iterator.nextDoc()) { + for (int d = iterator.advance(lastDoc[0] + 1); + d != DocIdSetIterator.NO_MORE_DOCS; + d = iterator.nextDoc()) { if (liveDocs == null || liveDocs.get(d)) { more = true; break; } } - Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but advance("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); + assertFalse( + "query's last doc was " + + lastDoc[0] + + " but advance(" + + (lastDoc[0] + 1) + + ") got to " + + scorer.docID(), + more); } leafPtr++; } + lastReader[0] = context.reader(); - assert readerContextArray.get(leafPtr).reader() == context.reader(); - this.scorer = null; lastDoc[0] = -1; } }); - if (lastReader[0] != null) { - // confirm that skipping beyond the last doc, on the - // previous reader, hits NO_MORE_DOCS - final LeafReader previousReader = lastReader[0]; - IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false); - indexSearcher.setSimilarity(s.getSimilarity()); - Query rewritten = indexSearcher.rewrite(q); - Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1); - LeafReaderContext ctx = previousReader.getContext(); - Scorer scorer = w.scorer(ctx); - if (scorer != null) { - DocIdSetIterator iterator = scorer.iterator(); - boolean more = false; - final Bits liveDocs = lastReader[0].getLiveDocs(); - for (int d = iterator.advance(lastDoc[0] + 1); d != DocIdSetIterator.NO_MORE_DOCS; d = iterator.nextDoc()) { - if (liveDocs == null || liveDocs.get(d)) { - more = true; - break; - } - } - assertFalse("query's last doc was "+ lastDoc[0] +" but advance("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); - } - } - } - } - - /** check that first skip on just created scorers always goes to the right doc */ - public static void checkFirstSkipTo(final Query q, final IndexSearcher s) throws IOException { - //System.out.println("checkFirstSkipTo: "+q); - final float maxDiff = 1e-3f; - final int lastDoc[] = {-1}; - final LeafReader lastReader[] = {null}; - final List context = s.getTopReaderContext().leaves(); - Query rewritten = s.rewrite(q); - s.search(q,new SimpleCollector() { - private Scorable scorer; - private int leafPtr; - @Override - public void setScorer(Scorable scorer) { - this.scorer = scorer; - } - @Override - public void collect(int doc) throws IOException { - float score = scorer.score(); - try { - long startMS = System.currentTimeMillis(); - for (int i=lastDoc[0]+1; i<=doc; i++) { - Weight w = s.createWeight(rewritten, ScoreMode.COMPLETE, 1); - Scorer scorer = w.scorer(context.get(leafPtr)); - assertTrue("query collected "+doc+" but advance("+i+") says no more docs!",scorer.iterator().advance(i) != DocIdSetIterator.NO_MORE_DOCS); - assertEquals("query collected "+doc+" but advance("+i+") got to "+scorer.docID(),doc,scorer.docID()); - float advanceScore = scorer.score(); - assertEquals("unstable advance("+i+") score!",advanceScore,scorer.score(),maxDiff); - assertEquals("query assigned doc "+doc+" a score of <"+score+"> but advance("+i+") has <"+advanceScore+">!",score,advanceScore,maxDiff); - - // Hurry things along if they are going slow (eg - // if you got SimpleText codec this will kick in): - if (i < doc && System.currentTimeMillis() - startMS > 5) { - i = doc-1; - } - } - lastDoc[0] = doc; - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public ScoreMode scoreMode() { - return ScoreMode.COMPLETE; - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - // confirm that skipping beyond the last doc, on the - // previous reader, hits NO_MORE_DOCS - if (lastReader[0] != null) { - final LeafReader previousReader = lastReader[0]; - IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false); - indexSearcher.setSimilarity(s.getSimilarity()); - Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1); - Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext()); - if (scorer != null) { - DocIdSetIterator iterator = scorer.iterator(); - boolean more = false; - final Bits liveDocs = context.reader().getLiveDocs(); - for (int d = iterator.advance(lastDoc[0] + 1); d != DocIdSetIterator.NO_MORE_DOCS; d = iterator.nextDoc()) { - if (liveDocs == null || liveDocs.get(d)) { - more = true; - break; - } - } - assertFalse("query's last doc was "+ lastDoc[0] +" but advance("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); - } - leafPtr++; - } - - lastReader[0] = context.reader(); - lastDoc[0] = -1; - } - }); - if (lastReader[0] != null) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS @@ -519,24 +608,34 @@ public class QueryUtils { IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false); indexSearcher.setSimilarity(s.getSimilarity()); Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1); - Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext()); + Scorer scorer = w.scorer((LeafReaderContext) indexSearcher.getTopReaderContext()); if (scorer != null) { DocIdSetIterator iterator = scorer.iterator(); boolean more = false; final Bits liveDocs = lastReader[0].getLiveDocs(); - for (int d = iterator.advance(lastDoc[0] + 1); d != DocIdSetIterator.NO_MORE_DOCS; d = iterator.nextDoc()) { + for (int d = iterator.advance(lastDoc[0] + 1); + d != DocIdSetIterator.NO_MORE_DOCS; + d = iterator.nextDoc()) { if (liveDocs == null || liveDocs.get(d)) { more = true; break; } } - assertFalse("query's last doc was "+ lastDoc[0] +" but advance("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); + assertFalse( + "query's last doc was " + + lastDoc[0] + + " but advance(" + + (lastDoc[0] + 1) + + ") got to " + + scorer.docID(), + more); } } } /** Check that the scorer and bulk scorer advance consistently. */ - public static void checkBulkScorerSkipTo(Random r, Query query, IndexSearcher searcher) throws IOException { + public static void checkBulkScorerSkipTo(Random r, Query query, IndexSearcher searcher) + throws IOException { query = searcher.rewrite(query); Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE, 1); for (LeafReaderContext context : searcher.getIndexReader().leaves()) { @@ -557,36 +656,47 @@ public class QueryUtils { if (scorer.docID() < min) { iterator.advance(min); } - final int next = bulkScorer.score(new LeafCollector() { - Scorable scorer2; - @Override - public void setScorer(Scorable scorer) throws IOException { - this.scorer2 = scorer; - } - @Override - public void collect(int doc) throws IOException { - assert doc >= min; - assert doc < max; - assertEquals(scorer.docID(), doc); - assertEquals(scorer.score(), scorer2.score(), 0.01f); - iterator.nextDoc(); - } - }, null, min, max); + final int next = + bulkScorer.score( + new LeafCollector() { + Scorable scorer2; + + @Override + public void setScorer(Scorable scorer) throws IOException { + this.scorer2 = scorer; + } + + @Override + public void collect(int doc) throws IOException { + assert doc >= min; + assert doc < max; + assertEquals(scorer.docID(), doc); + assertEquals(scorer.score(), scorer2.score(), 0.01f); + iterator.nextDoc(); + } + }, + null, + min, + max); assert max <= next; assert next <= scorer.docID(); upTo = max; if (scorer.docID() == DocIdSetIterator.NO_MORE_DOCS) { - bulkScorer.score(new LeafCollector() { - @Override - public void setScorer(Scorable scorer) throws IOException {} + bulkScorer.score( + new LeafCollector() { + @Override + public void setScorer(Scorable scorer) throws IOException {} - @Override - public void collect(int doc) throws IOException { - // no more matches - assert false; - } - }, null, upTo, DocIdSetIterator.NO_MORE_DOCS); + @Override + public void collect(int doc) throws IOException { + // no more matches + assert false; + } + }, + null, + upTo, + DocIdSetIterator.NO_MORE_DOCS); break; } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/RandomApproximationQuery.java b/lucene/test-framework/src/java/org/apache/lucene/search/RandomApproximationQuery.java index 445a39a1b2c..48318a7625c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/RandomApproximationQuery.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/RandomApproximationQuery.java @@ -16,16 +16,13 @@ */ package org.apache.lucene.search; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import java.io.IOException; import java.util.Random; - -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -/** - * A {@link Query} that adds random approximations to its scorers. - */ +/** A {@link Query} that adds random approximations to its scorers. */ public class RandomApproximationQuery extends Query { private final Query query; @@ -52,8 +49,7 @@ public class RandomApproximationQuery extends Query { @Override public boolean equals(Object other) { - return sameClassAs(other) && - query.equals(((RandomApproximationQuery) other).query); + return sameClassAs(other) && query.equals(((RandomApproximationQuery) other).query); } @Override @@ -67,7 +63,8 @@ public class RandomApproximationQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { final Weight weight = query.createWeight(searcher, scoreMode, boost); return new RandomApproximationWeight(weight, new Random(random.nextLong())); } @@ -89,7 +86,6 @@ public class RandomApproximationQuery extends Query { } return new RandomApproximationScorer(scorer, new Random(random.nextLong())); } - } private static class RandomApproximationScorer extends Scorer { @@ -136,9 +132,8 @@ public class RandomApproximationQuery extends Query { @Override public DocIdSetIterator iterator() { - return TwoPhaseIterator.asDocIdSetIterator(twoPhaseView); + return TwoPhaseIterator.asDocIdSetIterator(twoPhaseView); } - } private static class RandomTwoPhaseView extends TwoPhaseIterator { @@ -156,10 +151,12 @@ public class RandomApproximationQuery extends Query { @Override public boolean matches() throws IOException { if (approximation.docID() == -1 || approximation.docID() == DocIdSetIterator.NO_MORE_DOCS) { - throw new AssertionError("matches() should not be called on doc ID " + approximation.docID()); + throw new AssertionError( + "matches() should not be called on doc ID " + approximation.docID()); } if (lastDoc == approximation.docID()) { - throw new AssertionError("matches() has been called twice on doc ID " + approximation.docID()); + throw new AssertionError( + "matches() has been called twice on doc ID " + approximation.docID()); } lastDoc = approximation.docID(); return approximation.docID() == disi.docID(); @@ -209,5 +206,4 @@ public class RandomApproximationQuery extends Query { return disi.cost(); } } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/ScorerIndexSearcher.java b/lucene/test-framework/src/java/org/apache/lucene/search/ScorerIndexSearcher.java index 46f31be76d4..5a7d0f3db1c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/ScorerIndexSearcher.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/ScorerIndexSearcher.java @@ -19,31 +19,35 @@ package org.apache.lucene.search; import java.io.IOException; import java.util.List; import java.util.concurrent.Executor; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.Bits; -/** - * An {@link IndexSearcher} that always uses the {@link Scorer} API, never {@link BulkScorer}. - */ +/** An {@link IndexSearcher} that always uses the {@link Scorer} API, never {@link BulkScorer}. */ public class ScorerIndexSearcher extends IndexSearcher { - /** Creates a searcher searching the provided index. Search on individual - * segments will be run in the provided {@link Executor}. - * @see IndexSearcher#IndexSearcher(IndexReader, Executor) */ + /** + * Creates a searcher searching the provided index. Search on individual segments will be run in + * the provided {@link Executor}. + * + * @see IndexSearcher#IndexSearcher(IndexReader, Executor) + */ public ScorerIndexSearcher(IndexReader r, Executor executor) { super(r, executor); } - /** Creates a searcher searching the provided index. - * @see IndexSearcher#IndexSearcher(IndexReader) */ + /** + * Creates a searcher searching the provided index. + * + * @see IndexSearcher#IndexSearcher(IndexReader) + */ public ScorerIndexSearcher(IndexReader r) { super(r); } @Override - protected void search(List leaves, Weight weight, Collector collector) throws IOException { + protected void search(List leaves, Weight weight, Collector collector) + throws IOException { for (LeafReaderContext ctx : leaves) { // search each subreader // we force the use of Scorer (not BulkScorer) to make sure // that the scorer passed to LeafCollector.setScorer supports @@ -54,7 +58,9 @@ public class ScorerIndexSearcher extends IndexSearcher { final LeafCollector leafCollector = collector.getLeafCollector(ctx); leafCollector.setScorer(scorer); final Bits liveDocs = ctx.reader().getLiveDocs(); - for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) { + for (int doc = iterator.nextDoc(); + doc != DocIdSetIterator.NO_MORE_DOCS; + doc = iterator.nextDoc()) { if (liveDocs == null || liveDocs.get(doc)) { leafCollector.collect(doc); } @@ -62,5 +68,4 @@ public class ScorerIndexSearcher extends IndexSearcher { } } } - -} \ No newline at end of file +} diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java index 63d97882e83..23c1ee07c0a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; import java.util.BitSet; import java.util.Random; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -40,11 +39,9 @@ import org.junit.AfterClass; import org.junit.BeforeClass; /** - * Simple base class for checking search equivalence. - * Extend it, and write tests that create {@link #randomTerm()}s - * (all terms are single characters a-z), and use - * {@link #assertSameSet(Query, Query)} and - * {@link #assertSubsetOf(Query, Query)} + * Simple base class for checking search equivalence. Extend it, and write tests that create {@link + * #randomTerm()}s (all terms are single characters a-z), and use {@link #assertSameSet(Query, + * Query)} and {@link #assertSubsetOf(Query, Query)} */ @SuppressCodecs("SimpleText") public abstract class SearchEquivalenceTestBase extends LuceneTestCase { @@ -53,7 +50,7 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { protected static IndexReader reader; protected static Analyzer analyzer; protected static String stopword; // we always pick a character as a stopword - + @BeforeClass public static void beforeClass() throws Exception { Random random = random(); @@ -67,7 +64,7 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { Field field = new TextField("field", "", Field.Store.NO); doc.add(id); doc.add(field); - + // index some docs int numDocs = TEST_NIGHTLY ? atLeast(1000) : atLeast(100); for (int i = 0; i < numDocs; i++) { @@ -75,9 +72,9 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { field.setStringValue(randomFieldContents()); iw.addDocument(doc); } - + // delete some docs - int numDeletes = numDocs/20; + int numDeletes = numDocs / 20; for (int i = 0; i < numDeletes; i++) { Term toDelete = new Term("id", Integer.toString(random.nextInt(numDocs))); if (random.nextBoolean()) { @@ -86,13 +83,13 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { iw.deleteDocuments(new TermQuery(toDelete)); } } - + reader = iw.getReader(); s1 = newSearcher(reader); s2 = newSearcher(reader); iw.close(); } - + @AfterClass public static void afterClass() throws Exception { reader.close(); @@ -103,10 +100,9 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { analyzer = null; s1 = s2 = null; } - + /** - * populate a field with random contents. - * terms should be single characters in lowercase (a-z) + * populate a field with random contents. terms should be single characters in lowercase (a-z) * tokenization can be assumed to be on whitespace. */ static String randomFieldContents() { @@ -122,24 +118,17 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { return sb.toString(); } - /** - * returns random character (a-z) - */ + /** returns random character (a-z) */ static char randomChar() { return (char) TestUtil.nextInt(random(), 'a', 'z'); } - /** - * returns a term suitable for searching. - * terms are single characters in lowercase (a-z) - */ + /** returns a term suitable for searching. terms are single characters in lowercase (a-z) */ protected Term randomTerm() { return new Term("field", "" + randomChar()); } - - /** - * Returns a random filter over the document set - */ + + /** Returns a random filter over the document set */ protected Query randomFilter() { final Query query; if (random().nextBoolean()) { @@ -153,22 +142,22 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { } /** - * Asserts that the documents returned by q1 - * are the same as of those returned by q2 + * Asserts that the documents returned by q1 are the same as of those returned by + * q2 */ public void assertSameSet(Query q1, Query q2) throws Exception { assertSubsetOf(q1, q2); assertSubsetOf(q2, q1); } - + /** - * Asserts that the documents returned by q1 - * are a subset of those returned by q2 + * Asserts that the documents returned by q1 are a subset of those returned by + * q2 */ - public void assertSubsetOf(Query q1, Query q2) throws Exception { + public void assertSubsetOf(Query q1, Query q2) throws Exception { // test without a filter assertSubsetOf(q1, q2, null); - + // test with some filters (this will sometimes cause advance'ing enough to test it) int numFilters = TEST_NIGHTLY ? atLeast(10) : atLeast(3); for (int i = 0; i < numFilters; i++) { @@ -178,40 +167,36 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { assertSubsetOf(filteredQuery(q1, filter), filteredQuery(q2, filter), null); } } - + /** - * Asserts that the documents returned by q1 - * are a subset of those returned by q2. - * - * Both queries will be filtered by filter + * Asserts that the documents returned by q1 are a subset of those returned by + * q2. + * + *

    Both queries will be filtered by filter */ protected void assertSubsetOf(Query q1, Query q2, Query filter) throws Exception { QueryUtils.check(q1); QueryUtils.check(q2); if (filter != null) { - q1 = new BooleanQuery.Builder() - .add(q1, Occur.MUST) - .add(filter, Occur.FILTER) - .build(); - q2 = new BooleanQuery.Builder() - .add(q2, Occur.MUST) - .add(filter, Occur.FILTER) - .build(); + q1 = new BooleanQuery.Builder().add(q1, Occur.MUST).add(filter, Occur.FILTER).build(); + q2 = new BooleanQuery.Builder().add(q2, Occur.MUST).add(filter, Occur.FILTER).build(); } // we test both INDEXORDER and RELEVANCE because we want to test needsScores=true/false - for (Sort sort : new Sort[] { Sort.INDEXORDER, Sort.RELEVANCE }) { + for (Sort sort : new Sort[] {Sort.INDEXORDER, Sort.RELEVANCE}) { // not efficient, but simple! TopDocs td1 = s1.search(q1, reader.maxDoc(), sort); TopDocs td2 = s2.search(q2, reader.maxDoc(), sort); - assertTrue("too many hits: " + td1.totalHits.value + " > " + td2.totalHits.value, td1.totalHits.value <= td2.totalHits.value); - + assertTrue( + "too many hits: " + td1.totalHits.value + " > " + td2.totalHits.value, + td1.totalHits.value <= td2.totalHits.value); + // fill the superset into a bitset BitSet bitset = new BitSet(); for (int i = 0; i < td2.scoreDocs.length; i++) { bitset.set(td2.scoreDocs[i].doc); } - + // check in the subset, that every bit was set by the super for (int i = 0; i < td1.scoreDocs.length; i++) { assertTrue(bitset.get(td1.scoreDocs[i].doc)); @@ -219,9 +204,7 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { } } - /** - * Assert that two queries return the same documents and with the same scores. - */ + /** Assert that two queries return the same documents and with the same scores. */ protected void assertSameScores(Query q1, Query q2) throws Exception { assertSameSet(q1, q2); @@ -239,14 +222,8 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { protected void assertSameScores(Query q1, Query q2, Query filter) throws Exception { // not efficient, but simple! if (filter != null) { - q1 = new BooleanQuery.Builder() - .add(q1, Occur.MUST) - .add(filter, Occur.FILTER) - .build(); - q2 = new BooleanQuery.Builder() - .add(q2, Occur.MUST) - .add(filter, Occur.FILTER) - .build(); + q1 = new BooleanQuery.Builder().add(q1, Occur.MUST).add(filter, Occur.FILTER).build(); + q2 = new BooleanQuery.Builder().add(q2, Occur.MUST).add(filter, Occur.FILTER).build(); } TopDocs td1 = s1.search(q1, reader.maxDoc()); TopDocs td2 = s2.search(q2, reader.maxDoc()); @@ -256,11 +233,8 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { assertEquals(td1.scoreDocs[i].score, td2.scoreDocs[i].score, 10e-5); } } - + protected Query filteredQuery(Query query, Query filter) { - return new BooleanQuery.Builder() - .add(query, Occur.MUST) - .add(filter, Occur.FILTER) - .build(); + return new BooleanQuery.Builder().add(query, Occur.MUST).add(filter, Occur.FILTER).build(); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java index cd83f4416f3..7f62db17a22 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.Random; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; @@ -41,15 +40,11 @@ import org.apache.lucene.util.TestUtil; // - doc blocks? so we can test joins/grouping... // - controlled consistency (NRTMgr) -/** - * Base test class for simulating distributed search across multiple shards. - */ +/** Base test class for simulating distributed search across multiple shards. */ public abstract class ShardSearchingTestBase extends LuceneTestCase { // TODO: maybe SLM should throw this instead of returning null... - /** - * Thrown when the lease for a searcher has expired. - */ + /** Thrown when the lease for a searcher has expired. */ public static class SearcherExpiredException extends RuntimeException { public SearcherExpiredException(String message) { super(message); @@ -85,7 +80,13 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { @Override public String toString() { - return "FieldAndShardVersion(field=" + field + " nodeID=" + nodeID + " version=" + version+ ")"; + return "FieldAndShardVersion(field=" + + field + + " nodeID=" + + nodeID + + " version=" + + version + + ")"; } } @@ -129,12 +130,18 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { void broadcastNodeReopen(int nodeID, long version, IndexSearcher newSearcher) throws IOException { if (VERBOSE) { - System.out.println("REOPEN: nodeID=" + nodeID + " version=" + version + " maxDoc=" + newSearcher.getIndexReader().maxDoc()); + System.out.println( + "REOPEN: nodeID=" + + nodeID + + " version=" + + version + + " maxDoc=" + + newSearcher.getIndexReader().maxDoc()); } // Broadcast new collection stats for this node to all // other nodes: - for(String field : fieldsToShare) { + for (String field : fieldsToShare) { final CollectionStatistics stats = newSearcher.collectionStatistics(field); if (stats != null) { for (NodeState node : nodes) { @@ -157,7 +164,9 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { // MOCK: in a real env you have to hit the wire // (send this query to all remote nodes // concurrently): - TopDocs searchNode(int nodeID, long[] nodeVersions, Query q, Sort sort, int numHits, ScoreDoc searchAfter) throws IOException { + TopDocs searchNode( + int nodeID, long[] nodeVersions, Query q, Sort sort, int numHits, ScoreDoc searchAfter) + throws IOException { final NodeState.ShardIndexSearcher s = nodes[nodeID].acquire(nodeVersions); try { if (sort == null) { @@ -167,7 +176,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { return s.localSearch(q, numHits); } } else { - assert searchAfter == null; // not supported yet + assert searchAfter == null; // not supported yet return s.localSearch(q, numHits, sort); } } finally { @@ -177,15 +186,16 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { // Mock: in a real env, this would hit the wire and get // term stats from remote node - Map getNodeTermStats(Set terms, int nodeID, long version) throws IOException { + Map getNodeTermStats(Set terms, int nodeID, long version) + throws IOException { final NodeState node = nodes[nodeID]; - final Map stats = new HashMap<>(); + final Map stats = new HashMap<>(); final IndexSearcher s = node.searchers.acquire(version); if (s == null) { throw new SearcherExpiredException("node=" + nodeID + " version=" + version); } try { - for(Term term : terms) { + for (Term term : terms) { final TermStates ts = TermStates.build(s.getIndexReader().getContext(), term, true); if (ts.docFreq() > 0) { stats.put(term, s.termStatistics(term, ts.docFreq(), ts.totalTermFreq())); @@ -211,13 +221,15 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { // local cache...? And still LRU otherwise (for the // still-live searchers). - private final Map collectionStatsCache = new ConcurrentHashMap<>(); - private final Map termStatsCache = new ConcurrentHashMap<>(); - - /** Matches docs in the local shard but scores based on - * aggregated stats ("mock distributed scoring") from all - * nodes. */ + private final Map collectionStatsCache = + new ConcurrentHashMap<>(); + private final Map termStatsCache = + new ConcurrentHashMap<>(); + /** + * Matches docs in the local shard but scores based on aggregated stats ("mock distributed + * scoring") from all nodes. + */ public class ShardIndexSearcher extends IndexSearcher { // Version for the node searchers we search: public final long[] nodeVersions; @@ -227,7 +239,8 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { super(localReader); this.nodeVersions = nodeVersions; myNodeID = nodeID; - assert myNodeID == NodeState.this.myNodeID: "myNodeID=" + nodeID + " NodeState.this.myNodeID=" + NodeState.this.myNodeID; + assert myNodeID == NodeState.this.myNodeID + : "myNodeID=" + nodeID + " NodeState.this.myNodeID=" + NodeState.this.myNodeID; } @Override @@ -239,22 +252,25 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { // Make a single request to remote nodes for term // stats: - for(int nodeID=0;nodeID missing = new HashSet<>(); - for(Term term : terms) { - final TermAndShardVersion key = new TermAndShardVersion(nodeID, nodeVersions[nodeID], term); + for (Term term : terms) { + final TermAndShardVersion key = + new TermAndShardVersion(nodeID, nodeVersions[nodeID], term); if (!termStatsCache.containsKey(key)) { missing.add(term); } } if (missing.size() != 0) { - for(Map.Entry ent : getNodeTermStats(missing, nodeID, nodeVersions[nodeID]).entrySet()) { + for (Map.Entry ent : + getNodeTermStats(missing, nodeID, nodeVersions[nodeID]).entrySet()) { if (ent.getValue() != null) { - final TermAndShardVersion key = new TermAndShardVersion(nodeID, nodeVersions[nodeID], ent.getKey()); + final TermAndShardVersion key = + new TermAndShardVersion(nodeID, nodeVersions[nodeID], ent.getKey()); termStatsCache.put(key, ent.getValue()); } } @@ -265,17 +281,19 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { } @Override - public TermStatistics termStatistics(Term term, int docFreq, long totalTermFreq) throws IOException { + public TermStatistics termStatistics(Term term, int docFreq, long totalTermFreq) + throws IOException { assert term != null; long distributedDocFreq = 0; long distributedTotalTermFreq = 0; - for(int nodeID=0;nodeID= 0; maxDoc += nodeStats.maxDoc(); } @@ -337,7 +356,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { @Override public TopDocs search(Query query, int numHits) throws IOException { final TopDocs[] shardHits = new TopDocs[nodeVersions.length]; - for(int nodeID=0;nodeID 0; assert state.getPosition() >= 0; assert state.getOffset() >= 0; - assert state.getMaxTermFrequency() >= 0; // TODO: seems to be 0 for omitTFAP? + assert state.getMaxTermFrequency() >= 0; // TODO: seems to be 0 for omitTFAP? assert state.getMaxTermFrequency() <= state.getLength(); assert state.getNumOverlap() >= 0; assert state.getNumOverlap() < state.getLength(); @@ -47,7 +47,8 @@ public class AssertingSimilarity extends Similarity { } @Override - public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + public SimScorer scorer( + float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { assert boost >= 0; assert collectionStats != null; assert termStats.length > 0; @@ -59,11 +60,11 @@ public class AssertingSimilarity extends Similarity { assert scorer != null; return new AssertingSimScorer(scorer, boost); } - + static class AssertingSimScorer extends SimScorer { final SimScorer delegate; final float boost; - + AssertingSimScorer(SimScorer delegate, float boost) { super(); this.delegate = delegate; @@ -85,7 +86,7 @@ public class AssertingSimilarity extends Similarity { @Override public Explanation explain(Explanation freq, long norm) { - // freq in bounds + // freq in bounds assert freq != null; assert Float.isFinite(freq.getValue().floatValue()); // result in bounds @@ -93,7 +94,8 @@ public class AssertingSimilarity extends Similarity { assert explanation != null; assert Float.isFinite(explanation.getValue().floatValue()); // result matches score exactly - assert explanation.getValue().floatValue() == delegate.score(freq.getValue().floatValue(), norm); + assert explanation.getValue().floatValue() + == delegate.score(freq.getValue().floatValue(), norm); return explanation; } } @@ -102,5 +104,4 @@ public class AssertingSimilarity extends Similarity { public String toString() { return "Asserting(" + delegate + ")"; } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/similarities/BaseSimilarityTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/search/similarities/BaseSimilarityTestCase.java index 94cc04c8b94..23943062dfa 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/similarities/BaseSimilarityTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/similarities/BaseSimilarityTestCase.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.similarities; import java.io.IOException; import java.util.Random; - import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; @@ -39,18 +38,16 @@ import org.junit.AfterClass; import org.junit.BeforeClass; /** - * Abstract class to do basic tests for a similarity. - * NOTE: This test focuses on the similarity impl, nothing else. - * The [stretch] goal is for this test to be - * so thorough in testing a new Similarity that if this - * test passes, then all Lucene/Solr tests should also pass. Ie, - * if there is some bug in a given Similarity that this - * test fails to catch then this test needs to be improved! */ + * Abstract class to do basic tests for a similarity. NOTE: This test focuses on the similarity + * impl, nothing else. The [stretch] goal is for this test to be so thorough in testing a new + * Similarity that if this test passes, then all Lucene/Solr tests should also pass. Ie, if there is + * some bug in a given Similarity that this test fails to catch then this test needs to be improved! + */ public abstract class BaseSimilarityTestCase extends LuceneTestCase { static LeafReader READER; static Directory DIR; - + @BeforeClass public static void beforeClass() throws Exception { // with norms @@ -64,7 +61,7 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { READER = getOnlyLeafReader(writer.getReader()); writer.close(); } - + @AfterClass public static void afterClass() throws Exception { IOUtils.close(READER, DIR); @@ -72,25 +69,22 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { DIR = null; } - /** - * Return a new similarity with all parameters randomized within valid ranges. - */ + /** Return a new similarity with all parameters randomized within valid ranges. */ protected abstract Similarity getSimilarity(Random random); - + static final long MAXDOC_FORTESTING = 1L << 48; // must be at least MAXDOC_FORTESTING + Integer.MAX_VALUE static final long MAXTOKENS_FORTESTING = 1L << 49; /** - * returns a random corpus that is at least possible given - * the norm value for a single document. + * returns a random corpus that is at least possible given the norm value for a single document. */ static CollectionStatistics newCorpus(Random random, int norm) { // lower bound of tokens in the collection (you produced this norm somehow) final int lowerBound; if (norm == 0) { // norms are omitted, but there must have been at least one token to produce that norm - lowerBound = 1; + lowerBound = 1; } else { // minimum value that would decode to such a norm lowerBound = SmallFloat.byte4ToInt((byte) norm); @@ -181,12 +175,10 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { } return new CollectionStatistics("field", maxDoc, docCount, sumTotalTermFreq, sumDocFreq); } - + private static final BytesRef TERM = new BytesRef("term"); - /** - * returns new random term, that fits within the bounds of the corpus - */ + /** returns new random term, that fits within the bounds of the corpus */ static TermStatistics newTerm(Random random, CollectionStatistics corpus) { final long docFreq; switch (random.nextInt(3)) { @@ -207,7 +199,8 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { // can't require docs to have > 2B tokens long upperBound; try { - upperBound = Math.min(corpus.sumTotalTermFreq(), Math.multiplyExact(docFreq, Integer.MAX_VALUE)); + upperBound = + Math.min(corpus.sumTotalTermFreq(), Math.multiplyExact(docFreq, Integer.MAX_VALUE)); } catch (ArithmeticException overflow) { upperBound = corpus.sumTotalTermFreq(); } @@ -234,19 +227,26 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { } /** - * Tests scoring across a bunch of random terms/corpora/frequencies for each possible document length. - * It does the following checks: + * Tests scoring across a bunch of random terms/corpora/frequencies for each possible document + * length. It does the following checks: + * *

      *
    • scores are non-negative and finite. *
    • score matches the explanation exactly. - *
    • internal explanations calculations are sane (e.g. sum of: and so on actually compute sums) - *
    • scores don't decrease as term frequencies increase: e.g. score(freq=N + 1) >= score(freq=N) + *
    • internal explanations calculations are sane (e.g. sum of: and so on actually compute + * sums) + *
    • scores don't decrease as term frequencies increase: e.g. score(freq=N + 1) >= + * score(freq=N) *
    • scores don't decrease as documents get shorter, e.g. score(len=M) >= score(len=M+1) *
    • scores don't decrease as terms get rarer, e.g. score(term=N) >= score(term=N+1) - *
    • scoring works for floating point frequencies (e.g. sloppy phrase and span queries will work) - *
    • scoring works for reasonably large 64-bit statistic values (e.g. distributed search will work) - *
    • scoring works for reasonably large boost values (0 .. Integer.MAX_VALUE, e.g. query boosts will work) - *
    • scoring works for parameters randomized within valid ranges (see {@link #getSimilarity(Random)}) + *
    • scoring works for floating point frequencies (e.g. sloppy phrase and span queries will + * work) + *
    • scoring works for reasonably large 64-bit statistic values (e.g. distributed search will + * work) + *
    • scoring works for reasonably large boost values (0 .. Integer.MAX_VALUE, e.g. query + * boosts will work) + *
    • scoring works for parameters randomized within valid ranges (see {@link + * #getSimilarity(Random)}) *
    */ public void testRandomScoring() throws Exception { @@ -270,7 +270,9 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { freq = Math.toIntExact(term.totalTermFreq()); } else { // there is at least one other document, and those must have at least 1 instance each. - int upperBound = Math.toIntExact(Math.min(term.totalTermFreq() - term.docFreq() + 1, Integer.MAX_VALUE)); + int upperBound = + Math.toIntExact( + Math.min(term.totalTermFreq() - term.docFreq() + 1, Integer.MAX_VALUE)); if (random.nextBoolean()) { // integer freq switch (random.nextInt(3)) { @@ -300,8 +302,10 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { freqCandidate = upperBound * random.nextFloat(); break; } - // we need to be 2nd float value at a minimum, the pairwise test will check MIN_VALUE in this case. - // this avoids testing frequencies of 0 which seem wrong to allow (we should enforce computeSlopFactor etc) + // we need to be 2nd float value at a minimum, the pairwise test will check + // MIN_VALUE in this case. + // this avoids testing frequencies of 0 which seem wrong to allow (we should enforce + // computeSlopFactor etc) if (freqCandidate <= Float.MIN_VALUE) { freqCandidate = Math.nextUp(Float.MIN_VALUE); } @@ -309,8 +313,10 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { } } // we just limit the test to "reasonable" boost values but don't enforce this anywhere. - // too big, and you are asking for overflow. that's hard for a sim to enforce (but definitely possible) - // for now, we just want to detect overflow where its a real bug/hazard in the computation with reasonable inputs. + // too big, and you are asking for overflow. that's hard for a sim to enforce (but + // definitely possible) + // for now, we just want to detect overflow where its a real bug/hazard in the + // computation with reasonable inputs. final float boost; switch (random.nextInt(5)) { case 0: @@ -340,9 +346,19 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { } } } - - /** runs for a single test case, so that if you hit a test failure you can write a reproducer just for that scenario */ - private static void doTestScoring(Similarity similarity, CollectionStatistics corpus, TermStatistics term, float boost, float freq, int norm) throws IOException { + + /** + * runs for a single test case, so that if you hit a test failure you can write a reproducer just + * for that scenario + */ + private static void doTestScoring( + Similarity similarity, + CollectionStatistics corpus, + TermStatistics term, + float boost, + float freq, + int norm) + throws IOException { boolean success = false; SimScorer scorer = similarity.scorer(boost, corpus, term); try { @@ -355,30 +371,35 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { assertTrue("negative score: " + score, score >= 0); assertTrue("greater than maxScore: " + score + ">" + maxScore, score <= maxScore); // check explanation matches - Explanation explanation = scorer.explain(Explanation.match(freq, "freq, occurrences of term within document"), norm); + Explanation explanation = + scorer.explain( + Explanation.match(freq, "freq, occurrences of term within document"), norm); if (score != explanation.getValue().doubleValue()) { fail("expected: " + score + ", got: " + explanation); } if (rarely()) { CheckHits.verifyExplanation("", 0, score, true, explanation); } - - // check score(freq-1), given the same norm it should be <= score(freq) [scores non-decreasing for more term occurrences] + + // check score(freq-1), given the same norm it should be <= score(freq) [scores non-decreasing + // for more term occurrences] final float prevFreq; - if (random().nextBoolean() && freq == (int)freq && freq > 1 && term.docFreq() > 1) { + if (random().nextBoolean() && freq == (int) freq && freq > 1 && term.docFreq() > 1) { // previous in integer space prevFreq = freq - 1; } else { // previous in float space (e.g. for sloppyPhrase) prevFreq = Math.nextDown(freq); } - + float prevScore = scorer.score(prevFreq, norm); // check that score isn't infinite or negative assertTrue(Float.isFinite(prevScore)); assertTrue(prevScore >= 0); // check explanation matches - Explanation prevExplanation = scorer.explain(Explanation.match(prevFreq, "freq, occurrences of term within document"), norm); + Explanation prevExplanation = + scorer.explain( + Explanation.match(prevFreq, "freq, occurrences of term within document"), norm); if (prevScore != prevExplanation.getValue().doubleValue()) { fail("expected: " + prevScore + ", got: " + prevExplanation); } @@ -391,52 +412,85 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { System.out.println(explanation); fail("score(" + prevFreq + ")=" + prevScore + " > score(" + freq + ")=" + score); } - - // check score(norm-1), given the same freq it should be >= score(norm) [scores non-decreasing as docs get shorter] + + // check score(norm-1), given the same freq it should be >= score(norm) [scores non-decreasing + // as docs get shorter] if (norm > 1) { float prevNormScore = scorer.score(freq, norm - 1); // check that score isn't infinite or negative assertTrue(Float.isFinite(prevNormScore)); assertTrue(prevNormScore >= 0); // check explanation matches - Explanation prevNormExplanation = scorer.explain(Explanation.match(freq, "freq, occurrences of term within document"), norm - 1); + Explanation prevNormExplanation = + scorer.explain( + Explanation.match(freq, "freq, occurrences of term within document"), norm - 1); if (prevNormScore != prevNormExplanation.getValue().doubleValue()) { fail("expected: " + prevNormScore + ", got: " + prevNormExplanation); } if (rarely()) { - CheckHits.verifyExplanation("test query (prevNorm)", 0, prevNormScore, true, prevNormExplanation); + CheckHits.verifyExplanation( + "test query (prevNorm)", 0, prevNormScore, true, prevNormExplanation); } if (prevNormScore < score) { System.out.println(prevNormExplanation); System.out.println(explanation); - fail("score(" + freq + "," + (norm-1) + ")=" + prevNormScore + " < score(" + freq + "," + norm + ")=" + score); + fail( + "score(" + + freq + + "," + + (norm - 1) + + ")=" + + prevNormScore + + " < score(" + + freq + + "," + + norm + + ")=" + + score); } } - - // check score(term-1), given the same freq/norm it should be >= score(term) [scores non-decreasing as terms get rarer] + + // check score(term-1), given the same freq/norm it should be >= score(term) [scores + // non-decreasing as terms get rarer] if (term.docFreq() > 1 && freq < term.totalTermFreq()) { - TermStatistics prevTerm = new TermStatistics(term.term(), term.docFreq() - 1, term.totalTermFreq() - 1); + TermStatistics prevTerm = + new TermStatistics(term.term(), term.docFreq() - 1, term.totalTermFreq() - 1); SimScorer prevTermScorer = similarity.scorer(boost, corpus, term); float prevTermScore = prevTermScorer.score(freq, norm); // check that score isn't infinite or negative assertTrue(Float.isFinite(prevTermScore)); assertTrue(prevTermScore >= 0); // check explanation matches - Explanation prevTermExplanation = prevTermScorer.explain(Explanation.match(freq, "freq, occurrences of term within document"), norm); + Explanation prevTermExplanation = + prevTermScorer.explain( + Explanation.match(freq, "freq, occurrences of term within document"), norm); if (prevTermScore != prevTermExplanation.getValue().doubleValue()) { fail("expected: " + prevTermScore + ", got: " + prevTermExplanation); } if (rarely()) { - CheckHits.verifyExplanation("test query (prevTerm)", 0, prevTermScore, true, prevTermExplanation); + CheckHits.verifyExplanation( + "test query (prevTerm)", 0, prevTermScore, true, prevTermExplanation); } if (prevTermScore < score) { System.out.println(prevTermExplanation); System.out.println(explanation); - fail("score(" + freq + "," + (prevTerm) + ")=" + prevTermScore + " < score(" + freq + "," + term + ")=" + score); + fail( + "score(" + + freq + + "," + + (prevTerm) + + ")=" + + prevTermScore + + " < score(" + + freq + + "," + + term + + ")=" + + score); } } - + success = true; } finally { if (!success) { @@ -446,10 +500,11 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase { if (norm == 0) { System.out.println("norms=omitted"); } else { - System.out.println("norm=" + norm + " (doc length ~ " + SmallFloat.byte4ToInt((byte) norm) + ")"); + System.out.println( + "norm=" + norm + " (doc length ~ " + SmallFloat.byte4ToInt((byte) norm) + ")"); } System.out.println("freq=" + freq); } } - } + } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/similarities/RandomSimilarity.java b/lucene/test-framework/src/java/org/apache/lucene/search/similarities/RandomSimilarity.java index 0925aee3ac6..08f917f2a98 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/similarities/RandomSimilarity.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/similarities/RandomSimilarity.java @@ -24,46 +24,43 @@ import java.util.Map; import java.util.Random; /** - * Similarity implementation that randomizes Similarity implementations - * per-field. - *

    - * The choices are 'sticky', so the selected algorithm is always used - * for the same field. + * Similarity implementation that randomizes Similarity implementations per-field. + * + *

    The choices are 'sticky', so the selected algorithm is always used for the same field. */ public class RandomSimilarity extends PerFieldSimilarityWrapper { final BM25Similarity defaultSim = new BM25Similarity(); final List knownSims; - Map previousMappings = new HashMap<>(); + Map previousMappings = new HashMap<>(); final int perFieldSeed; final boolean shouldQueryNorm; - + public RandomSimilarity(Random random) { perFieldSeed = random.nextInt(); shouldQueryNorm = random.nextBoolean(); knownSims = new ArrayList<>(allSims); Collections.shuffle(knownSims, random); } - + @Override public synchronized Similarity get(String field) { assert field != null; Similarity sim = previousMappings.get(field); if (sim == null) { - sim = knownSims.get(Math.max(0, Math.abs(perFieldSeed ^ field.hashCode())) % knownSims.size()); + sim = + knownSims.get(Math.max(0, Math.abs(perFieldSeed ^ field.hashCode())) % knownSims.size()); previousMappings.put(field, sim); } return sim; } - + // all the similarities that we rotate through /** The DFR basic models to test. */ static BasicModel[] BASIC_MODELS = { new BasicModelG(), new BasicModelIF(), new BasicModelIn(), new BasicModelIne(), }; /** The DFR aftereffects to test. */ - static AfterEffect[] AFTER_EFFECTS = { - new AfterEffectB(), new AfterEffectL() - }; + static AfterEffect[] AFTER_EFFECTS = {new AfterEffectB(), new AfterEffectL()}; /** The DFR normalizations to test. */ static Normalization[] NORMALIZATIONS = { new NormalizationH1(), new NormalizationH2(), @@ -73,18 +70,16 @@ public class RandomSimilarity extends PerFieldSimilarityWrapper { // new Normalization.NoNormalization() }; /** The distributions for IB. */ - static Distribution[] DISTRIBUTIONS = { - new DistributionLL(), new DistributionSPL() - }; + static Distribution[] DISTRIBUTIONS = {new DistributionLL(), new DistributionSPL()}; /** Lambdas for IB. */ - static Lambda[] LAMBDAS = { - new LambdaDF(), new LambdaTTF() - }; + static Lambda[] LAMBDAS = {new LambdaDF(), new LambdaTTF()}; /** Independence measures for DFI */ static Independence[] INDEPENDENCE_MEASURES = { - new IndependenceStandardized(), new IndependenceSaturated(), new IndependenceChiSquared() + new IndependenceStandardized(), new IndependenceSaturated(), new IndependenceChiSquared() }; + static List allSims; + static { allSims = new ArrayList<>(); allSims.add(new ClassicSimilarity()); @@ -116,7 +111,7 @@ public class RandomSimilarity extends PerFieldSimilarityWrapper { allSims.add(new DFISimilarity(independence)); } } - + @Override public synchronized String toString() { return "RandomSimilarity(queryNorm=" + shouldQueryNorm + "): " + previousMappings.toString(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanQuery.java b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanQuery.java index 1040a3a7678..c797fcc9d62 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanQuery.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanQuery.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.spans; import java.io.IOException; import java.util.Objects; - import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -28,7 +27,7 @@ import org.apache.lucene.search.ScoreMode; /** Wraps a span query with asserts */ public class AssertingSpanQuery extends SpanQuery { private final SpanQuery in; - + public AssertingSpanQuery(SpanQuery in) { this.in = in; } @@ -44,7 +43,8 @@ public class AssertingSpanQuery extends SpanQuery { } @Override - public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { SpanWeight weight = in.createWeight(searcher, scoreMode, boost); return new AssertingSpanWeight(searcher, weight); } @@ -73,8 +73,7 @@ public class AssertingSpanQuery extends SpanQuery { @Override public boolean equals(Object o) { - return sameClassAs(o) && - equalsTo(getClass().cast(o)); + return sameClassAs(o) && equalsTo(getClass().cast(o)); } private boolean equalsTo(AssertingSpanQuery other) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanWeight.java b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanWeight.java index 9c73a4396ac..ab616846d71 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanWeight.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanWeight.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.spans; import java.io.IOException; import java.util.Map; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermStates; @@ -26,15 +25,14 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafSimScorer; -/** - * Wraps a SpanWeight with additional asserts - */ +/** Wraps a SpanWeight with additional asserts */ public class AssertingSpanWeight extends SpanWeight { final SpanWeight in; /** * Create an AssertingSpanWeight + * * @param in the SpanWeight to wrap * @throws IOException on error */ @@ -51,8 +49,7 @@ public class AssertingSpanWeight extends SpanWeight { @Override public Spans getSpans(LeafReaderContext context, Postings requiredPostings) throws IOException { Spans spans = in.getSpans(context, requiredPostings); - if (spans == null) - return null; + if (spans == null) return null; return new AssertingSpans(spans); } @@ -60,6 +57,7 @@ public class AssertingSpanWeight extends SpanWeight { public LeafSimScorer getSimScorer(LeafReaderContext context) throws IOException { return in.getSimScorer(context); } + @Override public SpanScorer scorer(LeafReaderContext context) throws IOException { return in.scorer(context); diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpans.java b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpans.java index 9dba2c78b30..ec5a2edad36 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpans.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpans.java @@ -17,106 +17,99 @@ package org.apache.lucene.search.spans; import java.io.IOException; - import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.TwoPhaseIterator; -/** - * Wraps a Spans with additional asserts - */ +/** Wraps a Spans with additional asserts */ class AssertingSpans extends Spans { final Spans in; int doc = -1; - - /** - * tracks current state of this spans - */ - static enum State { - /** - * document iteration has not yet begun ({@link #docID()} = -1) - */ + + /** tracks current state of this spans */ + static enum State { + /** document iteration has not yet begun ({@link #docID()} = -1) */ DOC_START, - + /** - * two-phase iterator has moved to a new docid, but {@link TwoPhaseIterator#matches()} has - * not been called or it returned false (so you should not do things with the enum) + * two-phase iterator has moved to a new docid, but {@link TwoPhaseIterator#matches()} has not + * been called or it returned false (so you should not do things with the enum) */ DOC_UNVERIFIED, - + /** - * iterator set to a valid docID, but position iteration has not yet begun ({@link #startPosition() == -1}) + * iterator set to a valid docID, but position iteration has not yet begun ({@link + * #startPosition() == -1}) */ POS_START, - + /** - * iterator set to a valid docID, and positioned (-1 < {@link #startPosition()} < {@link #NO_MORE_POSITIONS}) + * iterator set to a valid docID, and positioned (-1 < {@link #startPosition()} < {@link + * #NO_MORE_POSITIONS}) */ ITERATING, - - /** - * positions exhausted ({@link #startPosition()} = {@link #NO_MORE_POSITIONS}) - */ + + /** positions exhausted ({@link #startPosition()} = {@link #NO_MORE_POSITIONS}) */ POS_FINISHED, - - /** - * documents exhausted ({@link #docID()} = {@link #NO_MORE_DOCS}) - */ - DOC_FINISHED + + /** documents exhausted ({@link #docID()} = {@link #NO_MORE_DOCS}) */ + DOC_FINISHED }; - + State state = State.DOC_START; - + AssertingSpans(Spans in) { this.in = in; } - + @Override public int nextStartPosition() throws IOException { assert state != State.DOC_START : "invalid position access, state=" + state + ": " + in; assert state != State.DOC_FINISHED : "invalid position access, state=" + state + ": " + in; assert state != State.DOC_UNVERIFIED : "invalid position access, state=" + state + ": " + in; - + checkCurrentPositions(); - + // move to next position int prev = in.startPosition(); int start = in.nextStartPosition(); - assert start >= prev : "invalid startPosition (positions went backwards, previous=" + prev + "): " + in; - + assert start >= prev + : "invalid startPosition (positions went backwards, previous=" + prev + "): " + in; + // transition state if necessary if (start == NO_MORE_POSITIONS) { state = State.POS_FINISHED; } else { state = State.ITERATING; } - + // check new positions checkCurrentPositions(); return start; } - - private void checkCurrentPositions() { + + private void checkCurrentPositions() { int start = in.startPosition(); int end = in.endPosition(); - + if (state == State.DOC_START || state == State.DOC_UNVERIFIED || state == State.POS_START) { assert start == -1 : "invalid startPosition (should be -1): " + in; assert end == -1 : "invalid endPosition (should be -1): " + in; } else if (state == State.POS_FINISHED) { - assert start == NO_MORE_POSITIONS : "invalid startPosition (should be NO_MORE_POSITIONS): " + in; + assert start == NO_MORE_POSITIONS + : "invalid startPosition (should be NO_MORE_POSITIONS): " + in; assert end == NO_MORE_POSITIONS : "invalid endPosition (should be NO_MORE_POSITIONS): " + in; } else { assert start >= 0 : "invalid startPosition (negative): " + in; assert start <= end : "invalid startPosition (> endPosition): " + in; - } + } } - + @Override public int startPosition() { checkCurrentPositions(); return in.startPosition(); } - + @Override public int endPosition() { checkCurrentPositions(); @@ -140,10 +133,16 @@ class AssertingSpans extends Spans { @Override public int docID() { int doc = in.docID(); - assert doc == this.doc : "broken docID() impl: docID() = " + doc + ", but next/advance last returned: " + this.doc + ": " + in; + assert doc == this.doc + : "broken docID() impl: docID() = " + + doc + + ", but next/advance last returned: " + + this.doc + + ": " + + in; return doc; } - + @Override public int nextDoc() throws IOException { assert state != State.DOC_FINISHED : "nextDoc() called after NO_MORE_DOCS: " + in; @@ -159,13 +158,14 @@ class AssertingSpans extends Spans { doc = nextDoc; return docID(); } - + @Override public int advance(int target) throws IOException { assert state != State.DOC_FINISHED : "advance() called after NO_MORE_DOCS: " + in; assert target > doc : "target must be > docID(), got " + target + " <= " + doc + ": " + in; int advanced = in.advance(target); - assert advanced >= target : "backwards advance from: " + target + " to: " + advanced + ": " + in; + assert advanced >= target + : "backwards advance from: " + target + " to: " + advanced + ": " + in; if (advanced == DocIdSetIterator.NO_MORE_DOCS) { state = State.DOC_FINISHED; } else { @@ -176,7 +176,7 @@ class AssertingSpans extends Spans { doc = advanced; return docID(); } - + @Override public String toString() { return "Asserting(" + in + ")"; @@ -190,7 +190,7 @@ class AssertingSpans extends Spans { @Override public float positionsCost() { float cost = in.positionsCost(); - assert ! Float.isNaN(cost) : "positionsCost() should not be NaN"; + assert !Float.isNaN(cost) : "positionsCost() should not be NaN"; assert cost > 0 : "positionsCost() must be positive"; return cost; } @@ -207,19 +207,21 @@ class AssertingSpans extends Spans { class AssertingTwoPhaseView extends TwoPhaseIterator { final TwoPhaseIterator in; int lastDoc = -1; - + AssertingTwoPhaseView(TwoPhaseIterator iterator) { super(new AssertingDISI(iterator.approximation())); this.in = iterator; } - + @Override public boolean matches() throws IOException { if (approximation.docID() == -1 || approximation.docID() == DocIdSetIterator.NO_MORE_DOCS) { - throw new AssertionError("matches() should not be called on doc ID " + approximation.docID()); + throw new AssertionError( + "matches() should not be called on doc ID " + approximation.docID()); } if (lastDoc == approximation.docID()) { - throw new AssertionError("matches() has been called twice on doc ID " + approximation.docID()); + throw new AssertionError( + "matches() has been called twice on doc ID " + approximation.docID()); } lastDoc = approximation.docID(); boolean v = in.matches(); @@ -233,28 +235,30 @@ class AssertingSpans extends Spans { public float matchCost() { float cost = in.matchCost(); if (Float.isNaN(cost)) { - throw new AssertionError("matchCost()=" + cost + " should not be NaN on doc ID " + approximation.docID()); + throw new AssertionError( + "matchCost()=" + cost + " should not be NaN on doc ID " + approximation.docID()); } if (cost < 0) { - throw new AssertionError("matchCost()=" + cost + " should be non negative on doc ID " + approximation.docID()); + throw new AssertionError( + "matchCost()=" + cost + " should be non negative on doc ID " + approximation.docID()); } return cost; } } - + class AssertingDISI extends DocIdSetIterator { final DocIdSetIterator in; - + AssertingDISI(DocIdSetIterator in) { this.in = in; } - + @Override public int docID() { assert in.docID() == AssertingSpans.this.docID(); return in.docID(); } - + @Override public int nextDoc() throws IOException { assert state != State.DOC_FINISHED : "nextDoc() called after NO_MORE_DOCS: " + in; @@ -268,13 +272,14 @@ class AssertingSpans extends Spans { doc = nextDoc; return docID(); } - + @Override public int advance(int target) throws IOException { assert state != State.DOC_FINISHED : "advance() called after NO_MORE_DOCS: " + in; assert target > doc : "target must be > docID(), got " + target + " <= " + doc + ": " + in; int advanced = in.advance(target); - assert advanced >= target : "backwards advance from: " + target + " to: " + advanced + ": " + in; + assert advanced >= target + : "backwards advance from: " + target + " to: " + advanced + ": " + in; if (advanced == DocIdSetIterator.NO_MORE_DOCS) { state = State.DOC_FINISHED; } else { @@ -283,7 +288,7 @@ class AssertingSpans extends Spans { doc = advanced; return docID(); } - + @Override public long cost() { return in.cost(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/spans/SpanTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/search/spans/SpanTestUtil.java index 138804366d5..ffed4d03e92 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/spans/SpanTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/spans/SpanTestUtil.java @@ -16,35 +16,30 @@ */ package org.apache.lucene.search.spans; -import java.io.IOException; +import static org.junit.Assert.*; +import java.io.IOException; import org.apache.lucene.index.Term; import org.apache.lucene.search.QueryUtils; -import static org.junit.Assert.*; - /** Some utility methods used for testing span queries */ public class SpanTestUtil { - - /** - * Adds additional asserts to a spanquery. Highly recommended - * if you want tests to actually be debuggable. + + /** + * Adds additional asserts to a spanquery. Highly recommended if you want tests to actually be + * debuggable. */ public static SpanQuery spanQuery(SpanQuery query) { QueryUtils.check(query); return new AssertingSpanQuery(query); } - - /** - * Makes a new SpanTermQuery (with additional asserts). - */ + + /** Makes a new SpanTermQuery (with additional asserts). */ public static SpanQuery spanTermQuery(String field, String term) { return spanQuery(new SpanTermQuery(new Term(field, term))); } - - /** - * Makes a new SpanOrQuery (with additional asserts) from the provided {@code terms}. - */ + + /** Makes a new SpanOrQuery (with additional asserts) from the provided {@code terms}. */ public static SpanQuery spanOrQuery(String field, String... terms) { SpanQuery[] subqueries = new SpanQuery[terms.length]; for (int i = 0; i < terms.length; i++) { @@ -52,59 +47,43 @@ public class SpanTestUtil { } return spanOrQuery(subqueries); } - - /** - * Makes a new SpanOrQuery (with additional asserts). - */ + + /** Makes a new SpanOrQuery (with additional asserts). */ public static SpanQuery spanOrQuery(SpanQuery... subqueries) { return spanQuery(new SpanOrQuery(subqueries)); } - - /** - * Makes a new SpanNotQuery (with additional asserts). - */ + + /** Makes a new SpanNotQuery (with additional asserts). */ public static SpanQuery spanNotQuery(SpanQuery include, SpanQuery exclude) { return spanQuery(new SpanNotQuery(include, exclude)); } - - /** - * Makes a new SpanNotQuery (with additional asserts). - */ + + /** Makes a new SpanNotQuery (with additional asserts). */ public static SpanQuery spanNotQuery(SpanQuery include, SpanQuery exclude, int pre, int post) { return spanQuery(new SpanNotQuery(include, exclude, pre, post)); } - - /** - * Makes a new SpanFirstQuery (with additional asserts). - */ + + /** Makes a new SpanFirstQuery (with additional asserts). */ public static SpanQuery spanFirstQuery(SpanQuery query, int end) { return spanQuery(new SpanFirstQuery(query, end)); } - - /** - * Makes a new SpanPositionRangeQuery (with additional asserts). - */ + + /** Makes a new SpanPositionRangeQuery (with additional asserts). */ public static SpanQuery spanPositionRangeQuery(SpanQuery query, int start, int end) { return spanQuery(new SpanPositionRangeQuery(query, start, end)); } - - /** - * Makes a new SpanContainingQuery (with additional asserts). - */ + + /** Makes a new SpanContainingQuery (with additional asserts). */ public static SpanQuery spanContainingQuery(SpanQuery big, SpanQuery little) { return spanQuery(new SpanContainingQuery(big, little)); } - - /** - * Makes a new SpanWithinQuery (with additional asserts). - */ + + /** Makes a new SpanWithinQuery (with additional asserts). */ public static SpanQuery spanWithinQuery(SpanQuery big, SpanQuery little) { return spanQuery(new SpanWithinQuery(big, little)); } - - /** - * Makes a new ordered SpanNearQuery (with additional asserts) from the provided {@code terms} - */ + + /** Makes a new ordered SpanNearQuery (with additional asserts) from the provided {@code terms} */ public static SpanQuery spanNearOrderedQuery(String field, int slop, String... terms) { SpanQuery[] subqueries = new SpanQuery[terms.length]; for (int i = 0; i < terms.length; i++) { @@ -112,14 +91,12 @@ public class SpanTestUtil { } return spanNearOrderedQuery(slop, subqueries); } - - /** - * Makes a new ordered SpanNearQuery (with additional asserts) - */ + + /** Makes a new ordered SpanNearQuery (with additional asserts) */ public static SpanQuery spanNearOrderedQuery(int slop, SpanQuery... subqueries) { return spanQuery(new SpanNearQuery(subqueries, slop, true)); } - + /** * Makes a new unordered SpanNearQuery (with additional asserts) from the provided {@code terms} */ @@ -131,24 +108,25 @@ public class SpanTestUtil { } return spanQuery(builder.build()); } - - /** - * Makes a new unordered SpanNearQuery (with additional asserts) - */ + + /** Makes a new unordered SpanNearQuery (with additional asserts) */ public static SpanQuery spanNearUnorderedQuery(int slop, SpanQuery... subqueries) { return spanQuery(new SpanNearQuery(subqueries, slop, false)); } - - /** - * Assert the next iteration from {@code spans} is a match - * from {@code start} to {@code end} in {@code doc}. + + /** + * Assert the next iteration from {@code spans} is a match from {@code start} to {@code end} in + * {@code doc}. */ public static void assertNext(Spans spans, int doc, int start, int end) throws IOException { if (spans.docID() >= doc) { assertEquals("docId", doc, spans.docID()); } else { // nextDoc needed before testing start/end if (spans.docID() >= 0) { - assertEquals("nextStartPosition of previous doc", Spans.NO_MORE_POSITIONS, spans.nextStartPosition()); + assertEquals( + "nextStartPosition of previous doc", + Spans.NO_MORE_POSITIONS, + spans.nextStartPosition()); assertEquals("endPosition of previous doc", Spans.NO_MORE_POSITIONS, spans.endPosition()); } assertEquals("nextDoc", doc, spans.nextDoc()); @@ -163,10 +141,8 @@ public class SpanTestUtil { assertEquals("endPosition", end, spans.endPosition()); } } - - /** - * Assert that {@code spans} is exhausted. - */ + + /** Assert that {@code spans} is exhausted. */ public static void assertFinished(Spans spans) throws Exception { if (spans != null) { // null Spans is empty assertNext(spans, Spans.NO_MORE_DOCS, -2, -2); // start and end positions will be ignored diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java index 212f53b1854..3e42751f656 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java @@ -16,6 +16,9 @@ */ package org.apache.lucene.store; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.generators.RandomBytes; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; @@ -39,7 +42,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.zip.CRC32; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexNotFoundException; @@ -49,28 +51,23 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.junit.Assert; -import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.generators.RandomBytes; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; - -/** - * Base class for {@link Directory} implementations. - */ +/** Base class for {@link Directory} implementations. */ public abstract class BaseDirectoryTestCase extends LuceneTestCase { - /** A subclass returns the Directory to be tested; if it's - * an FS-based directory it should point to the specified - * path, else it can ignore it. */ + /** + * A subclass returns the Directory to be tested; if it's an FS-based directory it should point to + * the specified path, else it can ignore it. + */ protected abstract Directory getDirectory(Path path) throws IOException; public void testCopyFrom() throws Exception { try (Directory source = getDirectory(createTempDir("testCopy")); - Directory dest = newDirectory()) { + Directory dest = newDirectory()) { runCopyFrom(source, dest); } try (Directory source = newDirectory(); - Directory dest = getDirectory(createTempDir("testCopyDestination"))) { + Directory dest = getDirectory(createTempDir("testCopyDestination"))) { runCopyFrom(source, dest); } } @@ -125,9 +122,11 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { dir.deleteFile("foo.txt"); Assert.assertFalse(Arrays.asList(dir.listAll()).contains(file)); - expectThrowsAnyOf(Arrays.asList(NoSuchFileException.class, FileNotFoundException.class), () -> { - dir.deleteFile("foo.txt"); - }); + expectThrowsAnyOf( + Arrays.asList(NoSuchFileException.class, FileNotFoundException.class), + () -> { + dir.deleteFile("foo.txt"); + }); } } @@ -224,14 +223,14 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { final int offset = random().nextInt(8); final int length = TestUtil.nextInt(random(), 1, 16); try (IndexOutput out = dir.createOutput("littleEndianLongs", newIOContext(random()))) { - byte[] b = new byte[offset + length * Long.BYTES - TestUtil.nextInt(random(), 1, Long.BYTES)]; + byte[] b = + new byte[offset + length * Long.BYTES - TestUtil.nextInt(random(), 1, Long.BYTES)]; random().nextBytes(b); out.writeBytes(b, b.length); } try (IndexInput input = dir.openInput("littleEndianLongs", newIOContext(random()))) { input.seek(offset); - expectThrows(EOFException.class, - () -> input.readLELongs(new long[length], 0, length)); + expectThrows(EOFException.class, () -> input.readLELongs(new long[length], 0, length)); } } } @@ -356,23 +355,29 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { Set set1 = input.readSetOfStrings(); assertEquals(asSet("test1", "test2"), set1); // set should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - set1.add("bogus"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + set1.add("bogus"); + }); Set set2 = input.readSetOfStrings(); assertEquals(Collections.emptySet(), set2); // set should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - set2.add("bogus"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + set2.add("bogus"); + }); Set set3 = input.readSetOfStrings(); assertEquals(Collections.singleton("test3"), set3); // set should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - set3.add("bogus"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + set3.add("bogus"); + }); assertEquals(input.length(), input.getFilePointer()); input.close(); @@ -380,7 +385,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { } public void testMapOfStrings() throws Exception { - Map m = new HashMap<>(); + Map m = new HashMap<>(); m.put("test1", "value1"); m.put("test2", "value2"); @@ -395,23 +400,29 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { Map map1 = input.readMapOfStrings(); assertEquals(m, map1); // map should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - map1.put("bogus1", "bogus2"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + map1.put("bogus1", "bogus2"); + }); Map map2 = input.readMapOfStrings(); assertEquals(Collections.emptyMap(), map2); // map should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - map2.put("bogus1", "bogus2"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + map2.put("bogus1", "bogus2"); + }); Map map3 = input.readMapOfStrings(); assertEquals(Collections.singletonMap("key", "value"), map3); // map should be immutable - expectThrows(UnsupportedOperationException.class, () -> { - map3.put("bogus1", "bogus2"); - }); + expectThrows( + UnsupportedOperationException.class, + () -> { + map3.put("bogus1", "bogus2"); + }); assertEquals(input.length(), input.getFilePointer()); input.close(); @@ -439,15 +450,16 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { } } - /** Make sure directory throws AlreadyClosedException if - * you try to createOutput after closing. */ + /** Make sure directory throws AlreadyClosedException if you try to createOutput after closing. */ public void testDetectClose() throws Throwable { Directory dir = getDirectory(createTempDir("testDetectClose")); dir.close(); - expectThrows(AlreadyClosedException.class, () -> { - dir.createOutput("test", newIOContext(random())); - }); + expectThrows( + AlreadyClosedException.class, + () -> { + dir.createOutput("test", newIOContext(random())); + }); } public void testThreadSafetyInListAll() throws Exception { @@ -462,51 +474,61 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { } AtomicBoolean stop = new AtomicBoolean(); - Thread writer = new Thread(() -> { - try { - for (int i = 0, max = RandomizedTest.randomIntBetween(500, 1000); i < max; i++) { - String fileName = "file-" + i; - try (IndexOutput output = dir.createOutput(fileName, newIOContext(random()))) { - assert output != null; - // Add some lags so that the other thread can read the content of the directory. - Thread.yield(); - } - assertTrue(slowFileExists(dir, fileName)); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } finally { - stop.set(true); - } - }); - - Thread reader = new Thread(() -> { - try { - Random rnd = new Random(RandomizedTest.randomLong()); - while (!stop.get()) { - String [] files = Arrays.stream(dir.listAll()) - .filter(name -> !ExtrasFS.isExtra(name)) // Ignore anything from ExtraFS. - .toArray(String[]::new); - - if (files.length > 0) { - do { - String file = RandomPicks.randomFrom(rnd, files); - try (IndexInput input = dir.openInput(file, newIOContext(random()))) { - // Just open, nothing else. - assert input != null; - } catch (AccessDeniedException e) { - // Access denied is allowed for files for which the output is still open (MockDirectoryWriter enforces - // this, for example). Since we don't synchronize with the writer thread, just ignore it. + Thread writer = + new Thread( + () -> { + try { + for (int i = 0, max = RandomizedTest.randomIntBetween(500, 1000); i < max; i++) { + String fileName = "file-" + i; + try (IndexOutput output = dir.createOutput(fileName, newIOContext(random()))) { + assert output != null; + // Add some lags so that the other thread can read the content of the + // directory. + Thread.yield(); + } + assertTrue(slowFileExists(dir, fileName)); + } } catch (IOException e) { - throw new UncheckedIOException("Something went wrong when opening: " + file, e); + throw new UncheckedIOException(e); + } finally { + stop.set(true); } - } while (rnd.nextInt(3) != 0); // Sometimes break and list files again. - } - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); + }); + + Thread reader = + new Thread( + () -> { + try { + Random rnd = new Random(RandomizedTest.randomLong()); + while (!stop.get()) { + String[] files = + Arrays.stream(dir.listAll()) + .filter( + name -> !ExtrasFS.isExtra(name)) // Ignore anything from ExtraFS. + .toArray(String[]::new); + + if (files.length > 0) { + do { + String file = RandomPicks.randomFrom(rnd, files); + try (IndexInput input = dir.openInput(file, newIOContext(random()))) { + // Just open, nothing else. + assert input != null; + } catch (AccessDeniedException e) { + // Access denied is allowed for files for which the output is still open + // (MockDirectoryWriter enforces + // this, for example). Since we don't synchronize with the writer thread, + // just ignore it. + } catch (IOException e) { + throw new UncheckedIOException( + "Something went wrong when opening: " + file, e); + } + } while (rnd.nextInt(3) != 0); // Sometimes break and list files again. + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); reader.start(); writer.start(); @@ -516,10 +538,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { } } - /** - * LUCENE-1468: once we create an output, we should see - * it in the dir listing. - */ + /** LUCENE-1468: once we create an output, we should see it in the dir listing. */ public void testFileExistsInListAfterCreated() throws IOException { try (Directory dir = getDirectory(createTempDir("testFileExistsInListAfterCreated"))) { String name = "file"; @@ -570,16 +589,19 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { IndexInput i = dir.openInput("out", newIOContext(random())); // Seeking past EOF should always throw EOFException - expectThrows(EOFException.class, () -> i.seek(len + RandomizedTest.randomIntBetween(1, 2048))); + expectThrows( + EOFException.class, () -> i.seek(len + RandomizedTest.randomIntBetween(1, 2048))); // Seeking exactly to EOF should never throw any exception. i.seek(len); // But any read following the seek(len) should throw an EOFException. expectThrows(EOFException.class, i::readByte); - expectThrows(EOFException.class, () -> { - i.readBytes(new byte [1], 0, 1); - }); + expectThrows( + EOFException.class, + () -> { + i.readBytes(new byte[1], 0, 1); + }); i.close(); } @@ -593,18 +615,24 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { o.writeBytes(b, 0, len); o.close(); IndexInput i = dir.openInput("out", newIOContext(random())); - expectThrows(IllegalArgumentException.class, () -> { - i.slice("slice1", 0, len + 1); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + i.slice("slice1", 0, len + 1); + }); - expectThrows(IllegalArgumentException.class, () -> { - i.slice("slice2", -1, len); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + i.slice("slice2", -1, len); + }); IndexInput slice = i.slice("slice3", 4, len / 2); - expectThrows(IllegalArgumentException.class, () -> { - slice.slice("slice3sub", 1, len / 2); - }); + expectThrows( + IllegalArgumentException.class, + () -> { + slice.slice("slice3sub", 1, len / 2); + }); i.close(); } @@ -615,9 +643,11 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { Path tempDir = createTempDir("doesnotexist"); IOUtils.rm(tempDir); try (Directory dir = getDirectory(tempDir)) { - expectThrowsAnyOf(Arrays.asList(NoSuchFileException.class, IndexNotFoundException.class), () -> { - DirectoryReader.open(dir); - }); + expectThrowsAnyOf( + Arrays.asList(NoSuchFileException.class, IndexNotFoundException.class), + () -> { + DirectoryReader.open(dir); + }); } } @@ -653,8 +683,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { out.writeByte(in.readByte()); upto++; } else { - final int chunk = Math.min( - TestUtil.nextInt(random(), 1, bytes.length), size - upto); + final int chunk = Math.min(TestUtil.nextInt(random(), 1, bytes.length), size - upto); out.copyBytes(in, chunk); upto += chunk; } @@ -672,8 +701,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { assertEquals(value(upto), v); upto++; } else { - final int limit = Math.min( - TestUtil.nextInt(random(), 1, bytes.length), size - upto); + final int limit = Math.min(TestUtil.nextInt(random(), 1, bytes.length), size - upto); in2.readBytes(bytes, 0, limit); for (int byteIdx = 0; byteIdx < limit; byteIdx++) { assertEquals(value(upto), bytes[byteIdx]); @@ -711,23 +739,27 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { // now make N copies of the remaining bytes int threads = 10; CyclicBarrier start = new CyclicBarrier(threads); - Thread copies [] = IntStream.range(0, threads) - .mapToObj((i) -> { - IndexInput src = input.clone(); - Thread t = new Thread(() -> { - try { - start.await(); - IndexOutput dst = d.createOutput("copy" + i, IOContext.DEFAULT); - dst.copyBytes(src, src.length() - headerLen); - dst.close(); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - t.start(); - return t; - }) - .toArray(Thread[]::new); + Thread copies[] = + IntStream.range(0, threads) + .mapToObj( + (i) -> { + IndexInput src = input.clone(); + Thread t = + new Thread( + () -> { + try { + start.await(); + IndexOutput dst = d.createOutput("copy" + i, IOContext.DEFAULT); + dst.copyBytes(src, src.length() - headerLen); + dst.close(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + t.start(); + return t; + }) + .toArray(Thread[]::new); for (Thread t : copies) { t.join(); @@ -751,7 +783,8 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { Path path = createTempDir("nocreate"); try (Directory fsdir = getDirectory(path)) { // this test backdoors the directory via the filesystem. so it must be an FSDir (for now) - // TODO: figure a way to test this better/clean it up. E.g. we should be testing for FileSwitchDir, + // TODO: figure a way to test this better/clean it up. E.g. we should be testing for + // FileSwitchDir, // if it's using two FSdirs and so on if (fsdir instanceof FSDirectory == false) { assumeTrue("test only works for FSDirectory subclasses", false); @@ -769,15 +802,17 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { int fileCount = fsdir.listAll().length; // fsync it - expectThrowsAnyOf(Arrays.asList(FileNotFoundException.class, NoSuchFileException.class), () -> { - fsdir.sync(Collections.singleton("afile")); - }); + expectThrowsAnyOf( + Arrays.asList(FileNotFoundException.class, NoSuchFileException.class), + () -> { + fsdir.sync(Collections.singleton("afile")); + }); // no new files created assertEquals(fileCount, fsdir.listAll().length); } } - + // random access APIs public void testRandomLong() throws Exception { try (Directory dir = getDirectory(createTempDir("testLongs"))) { @@ -827,7 +862,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { input.close(); } } - + public void testRandomInt() throws Exception { try (Directory dir = getDirectory(createTempDir("testInts"))) { IndexOutput output = dir.createOutput("ints", newIOContext(random())); @@ -875,7 +910,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { input.close(); } } - + public void testRandomShort() throws Exception { try (Directory dir = getDirectory(createTempDir("testShorts"))) { IndexOutput output = dir.createOutput("shorts", newIOContext(random())); @@ -923,7 +958,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { input.close(); } } - + public void testRandomByte() throws Exception { try (Directory dir = getDirectory(createTempDir("testBytes"))) { IndexOutput output = dir.createOutput("bytes", newIOContext(random())); @@ -971,7 +1006,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { input.close(); } } - + /** try to stress slices of slices */ public void testSliceOfSlice() throws Exception { try (Directory dir = getDirectory(createTempDir("sliceOfSlice"))) { @@ -1023,10 +1058,10 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { input.close(); } } - - /** - * This test that writes larger than the size of the buffer output - * will correctly increment the file pointer. + + /** + * This test that writes larger than the size of the buffer output will correctly increment the + * file pointer. */ public void testLargeWrites() throws IOException { try (Directory dir = getDirectory(createTempDir("largeWrites"))) { @@ -1054,7 +1089,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { out.close(); } } - + public void testDoubleCloseOutput() throws Throwable { try (Directory dir = getDirectory(createTempDir())) { IndexOutput out = dir.createOutput("foobar", newIOContext(random())); @@ -1063,7 +1098,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { out.close(); // close again } } - + public void testDoubleCloseInput() throws Throwable { try (Directory dir = getDirectory(createTempDir())) { IndexOutput out = dir.createOutput("foobar", newIOContext(random())); @@ -1092,9 +1127,10 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { in.close(); } - Set files = Arrays.stream(dir.listAll()) - .filter(file -> !ExtrasFS.isExtra(file)) // remove any ExtrasFS stuff. - .collect(Collectors.toSet()); + Set files = + Arrays.stream(dir.listAll()) + .filter(file -> !ExtrasFS.isExtra(file)) // remove any ExtrasFS stuff. + .collect(Collectors.toSet()); assertEquals(new HashSet(names), files); } @@ -1108,11 +1144,13 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { } // Try to create an existing file should fail. - expectThrows(FileAlreadyExistsException.class, () -> { - try (IndexOutput out = dir.createOutput(name, IOContext.DEFAULT)) { - assert out != null; - } - }); + expectThrows( + FileAlreadyExistsException.class, + () -> { + try (IndexOutput out = dir.createOutput(name, IOContext.DEFAULT)) { + assert out != null; + } + }); // Delete file and try to recreate it. dir.deleteFile(name); @@ -1146,24 +1184,33 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { try (IndexInput in = dir.openInput("a", IOContext.DEFAULT)) { in.seek(100); assertEquals(100, in.getFilePointer()); - expectThrows(EOFException.class, () -> { - in.seek(1025); - }); + expectThrows( + EOFException.class, + () -> { + in.seek(1025); + }); } } } - // Make sure the FSDirectory impl properly "emulates" deletions on filesystems (Windows) with buggy deleteFile: + // Make sure the FSDirectory impl properly "emulates" deletions on filesystems (Windows) with + // buggy deleteFile: public void testPendingDeletions() throws IOException { try (Directory dir = getDirectory(addVirusChecker(createTempDir()))) { - assumeTrue("we can only install VirusCheckingFS on an FSDirectory", dir instanceof FSDirectory); + assumeTrue( + "we can only install VirusCheckingFS on an FSDirectory", dir instanceof FSDirectory); FSDirectory fsDir = (FSDirectory) dir; // Keep trying until virus checker refuses to delete: final String fileName; while (true) { - // create a random filename (segment file name style), so it cannot hit windows problem with special filenames ("con", "com1",...): - String candidate = IndexFileNames.segmentFileName(TestUtil.randomSimpleString(random(), 1, 6), TestUtil.randomSimpleString(random()), "test"); + // create a random filename (segment file name style), so it cannot hit windows problem with + // special filenames ("con", "com1",...): + String candidate = + IndexFileNames.segmentFileName( + TestUtil.randomSimpleString(random(), 1, 6), + TestUtil.randomSimpleString(random()), + "test"); try (IndexOutput out = dir.createOutput(candidate, IOContext.DEFAULT)) { out.getFilePointer(); // just fake access to prevent compiler warning } @@ -1179,24 +1226,32 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { assertFalse(Arrays.asList(fsDir.listAll()).contains(fileName)); // Make sure fileLength claims it's deleted: - expectThrows(NoSuchFileException.class, () -> { - fsDir.fileLength(fileName); - }); + expectThrows( + NoSuchFileException.class, + () -> { + fsDir.fileLength(fileName); + }); // Make sure rename fails: - expectThrows(NoSuchFileException.class, () -> { - fsDir.rename(fileName, "file2"); - }); + expectThrows( + NoSuchFileException.class, + () -> { + fsDir.rename(fileName, "file2"); + }); // Make sure delete fails: - expectThrows(NoSuchFileException.class, () -> { - fsDir.deleteFile(fileName); - }); + expectThrows( + NoSuchFileException.class, + () -> { + fsDir.deleteFile(fileName); + }); // Make sure we cannot open it for reading: - expectThrows(NoSuchFileException.class, () -> { - fsDir.openInput(fileName, IOContext.DEFAULT); - }); + expectThrows( + NoSuchFileException.class, + () -> { + fsDir.openInput(fileName, IOContext.DEFAULT); + }); } } @@ -1205,8 +1260,13 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase { int count = atLeast(20); Set names = new HashSet<>(); while (names.size() < count) { - // create a random filename (segment file name style), so it cannot hit windows problem with special filenames ("con", "com1",...): - String name = IndexFileNames.segmentFileName(TestUtil.randomSimpleString(random(), 1, 6), TestUtil.randomSimpleString(random()), "test"); + // create a random filename (segment file name style), so it cannot hit windows problem with + // special filenames ("con", "com1",...): + String name = + IndexFileNames.segmentFileName( + TestUtil.randomSimpleString(random(), 1, 6), + TestUtil.randomSimpleString(random()), + "test"); if (random().nextInt(5) == 1) { IndexOutput out = dir.createTempOutput(name, "foo", IOContext.DEFAULT); names.add(out.getName()); diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java index e73a57c7fbc..5d4cbb1e0e0 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java @@ -17,18 +17,15 @@ package org.apache.lucene.store; import java.io.IOException; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.util.TestUtil; -/** - * Calls check index on close. - */ +/** Calls check index on close. */ // do NOT make any methods in this class synchronized, volatile // do NOT import anything from the concurrency package. // no randoms, no nothing. public abstract class BaseDirectoryWrapper extends FilterDirectory { - + private boolean checkIndexOnClose = true; private boolean doSlowChecksOnClose = true; protected volatile boolean isOpen = true; @@ -47,19 +44,16 @@ public abstract class BaseDirectoryWrapper extends FilterDirectory { } super.close(); } - + public boolean isOpen() { return isOpen; } - - /** - * Set whether or not checkindex should be run - * on close - */ + + /** Set whether or not checkindex should be run on close */ public void setCheckIndexOnClose(boolean value) { this.checkIndexOnClose = value; } - + public boolean getCheckIndexOnClose() { return checkIndexOnClose; } diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java index 7a93ca251c8..8e482696133 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java @@ -26,15 +26,14 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -46,43 +45,46 @@ import org.apache.lucene.util.TestUtil; /** Base class for per-LockFactory tests. */ public abstract class BaseLockFactoryTestCase extends LuceneTestCase { - - /** Subclass returns the Directory to be tested; if it's - * an FS-based directory it should point to the specified - * path, else it can ignore it. */ + + /** + * Subclass returns the Directory to be tested; if it's an FS-based directory it should point to + * the specified path, else it can ignore it. + */ protected abstract Directory getDirectory(Path path) throws IOException; - + /** Test obtaining and releasing locks, checking validity */ public void testBasics() throws IOException { Path tempPath = createTempDir(); Directory dir = getDirectory(tempPath); - + Lock l = dir.obtainLock("commit"); // shouldn't be able to get the lock twice - expectThrows(LockObtainFailedException.class, () -> { - dir.obtainLock("commit"); - }); + expectThrows( + LockObtainFailedException.class, + () -> { + dir.obtainLock("commit"); + }); l.close(); - + // Make sure we can obtain first one again: l = dir.obtainLock("commit"); l.close(); - + dir.close(); } - + /** Test closing locks twice */ public void testDoubleClose() throws IOException { Path tempPath = createTempDir(); Directory dir = getDirectory(tempPath); - + Lock l = dir.obtainLock("commit"); l.close(); l.close(); // close again, should be no exception - + dir.close(); } - + /** Test ensureValid returns true after acquire */ public void testValidAfterAcquire() throws IOException { Path tempPath = createTempDir(); @@ -92,22 +94,24 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase { l.close(); dir.close(); } - + /** Test ensureValid throws exception after close */ public void testInvalidAfterClose() throws IOException { Path tempPath = createTempDir(); Directory dir = getDirectory(tempPath); - + Lock l = dir.obtainLock("commit"); l.close(); - expectThrows(AlreadyClosedException.class, () -> { - l.ensureValid(); - }); + expectThrows( + AlreadyClosedException.class, + () -> { + l.ensureValid(); + }); dir.close(); } - + public void testObtainConcurrently() throws InterruptedException, IOException { Path tempPath = createTempDir(); final Directory directory = getDirectory(tempPath); @@ -119,41 +123,42 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase { CyclicBarrier barrier = new CyclicBarrier(numThreads); Thread[] threads = new Thread[numThreads]; for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - barrier.await(); - } catch (Exception e) { - throw new RuntimeException(e); - } - while (running.get()) { - try (Lock lock = directory.obtainLock("foo.lock")) { - assertFalse(assertingLock.isLocked()); - if (assertingLock.tryLock()) { - assertingLock.unlock(); - } else { - fail(); + threads[i] = + new Thread() { + @Override + public void run() { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (running.get()) { + try (Lock lock = directory.obtainLock("foo.lock")) { + assertFalse(assertingLock.isLocked()); + if (assertingLock.tryLock()) { + assertingLock.unlock(); + } else { + fail(); + } + assert lock != null; // stupid compiler + } catch (IOException ex) { + // + } + if (atomicCounter.incrementAndGet() > runs) { + running.set(false); + } } - assert lock != null; // stupid compiler - } catch (IOException ex) { - // } - if (atomicCounter.incrementAndGet() > runs) { - running.set(false); - } - } - } - }; + }; threads[i].start(); } - + for (int i = 0; i < threads.length; i++) { threads[i].join(); } directory.close(); } - + // Verify: do stress test, by opening IndexReaders and // IndexWriters over & over in 2 threads and making sure // no unexpected exceptions are raised: @@ -164,10 +169,12 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase { Directory dir = getDirectory(tempPath); // First create a 1 doc index: - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); + IndexWriter w = + new IndexWriter( + dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); addDoc(w); w.close(); - + int numIterations = atLeast(20); WriterThread writer = new WriterThread(numIterations, dir); SearcherThread searcher = new SearcherThread(numIterations, dir); @@ -179,20 +186,21 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase { assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException); assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException); - + dir.close(); } - + private void addDoc(IndexWriter writer) throws IOException { Document doc = new Document(); doc.add(newTextField("content", "aaa", Field.Store.NO)); writer.addDocument(doc); } - - private class WriterThread extends Thread { + + private class WriterThread extends Thread { private Directory dir; private int numIteration; public boolean hitException = false; + public WriterThread(int numIteration, Directory dir) { this.numIteration = numIteration; this.dir = dir; @@ -206,12 +214,12 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase { throw new RuntimeException(uee); } } - + @Override public void run() { IndexWriter writer = null; ByteArrayOutputStream baos = new ByteArrayOutputStream(); - for(int i=0;i= in.length()) { - throw new IllegalArgumentException("byteToCorrupt=" + byteToCorrupt + " but file \"" + out.getName() + "\" is only length=" + in.length()); + throw new IllegalArgumentException( + "byteToCorrupt=" + + byteToCorrupt + + " but file \"" + + out.getName() + + "\" is only length=" + + in.length()); } tmpOut.copyBytes(in, byteToCorrupt); // Flip the 0th bit: tmpOut.writeByte((byte) (in.readByte() ^ 1)); - tmpOut.copyBytes(in, in.length()-byteToCorrupt-1); + tmpOut.copyBytes(in, in.length() - byteToCorrupt - 1); } // Delete original and copy corrupt version back: @@ -94,8 +101,8 @@ public class CorruptingIndexOutput extends IndexOutput { @Override public void writeBytes(byte[] b, int offset, int length) throws IOException { - for(int i=0;i - *

  7. Instances created by {@link LuceneTestCase#newDirectory()} are tracked - * to ensure they are closed by the test. - *
  8. When a MockDirectoryWrapper is closed, it will throw an exception if - * it has any open files against it (with a stacktrace indicating where - * they were opened from). - *
  9. When a MockDirectoryWrapper is closed, it runs CheckIndex to test if - * the index was corrupted. - *
  10. MockDirectoryWrapper simulates some "features" of Windows, such as - * refusing to write/delete to open files. + *
  11. Instances created by {@link LuceneTestCase#newDirectory()} are tracked to ensure they are + * closed by the test. + *
  12. When a MockDirectoryWrapper is closed, it will throw an exception if it has any open files + * against it (with a stacktrace indicating where they were opened from). + *
  13. When a MockDirectoryWrapper is closed, it runs CheckIndex to test if the index was + * corrupted. + *
  14. MockDirectoryWrapper simulates some "features" of Windows, such as refusing to write/delete + * to open files. * */ public class MockDirectoryWrapper extends BaseDirectoryWrapper { @@ -85,10 +83,11 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { private Set unSyncedFiles; private Set createdFiles; private Set openFilesForWrite = new HashSet<>(); - ConcurrentMap openLocks = new ConcurrentHashMap<>(); + ConcurrentMap openLocks = new ConcurrentHashMap<>(); volatile boolean crashed; private ThrottledIndexOutput throttledOutput; - private Throttling throttling = LuceneTestCase.TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER; + private Throttling throttling = + LuceneTestCase.TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER; // for testing boolean alwaysCorrupt; @@ -97,28 +96,27 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { // use this for tracking files for crash. // additionally: provides debugging information in case you leave one open - private Map openFileHandles = Collections.synchronizedMap(new IdentityHashMap()); + private Map openFileHandles = + Collections.synchronizedMap(new IdentityHashMap()); // NOTE: we cannot initialize the Map here due to the // order in which our constructor actually does this // member initialization vs when it calls super. It seems // like super is called, then our members are initialized: - private Map openFiles; + private Map openFiles; // Only tracked if noDeleteOpenFile is true: if an attempt // is made to delete an open file, we enroll it here. private Set openFilesDeleted; - + private synchronized void init() { if (openFiles == null) { openFiles = new HashMap<>(); openFilesDeleted = new HashSet<>(); } - if (createdFiles == null) - createdFiles = new HashSet<>(); - if (unSyncedFiles == null) - unSyncedFiles = new HashSet<>(); + if (createdFiles == null) createdFiles = new HashSet<>(); + if (unSyncedFiles == null) unSyncedFiles = new HashSet<>(); } public MockDirectoryWrapper(Random random, Directory delegate) { @@ -127,20 +125,23 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { // called from different threads; else test failures may // not be reproducible from the original seed this.randomState = new Random(random.nextInt()); - this.throttledOutput = new ThrottledIndexOutput(ThrottledIndexOutput - .mBitsToBytes(40 + randomState.nextInt(10)), 1 + randomState.nextInt(5), null); + this.throttledOutput = + new ThrottledIndexOutput( + ThrottledIndexOutput.mBitsToBytes(40 + randomState.nextInt(10)), + 1 + randomState.nextInt(5), + null); init(); } public int getInputCloneCount() { return inputCloneCount.get(); } - + boolean verboseClone; - - /** - * If set to true, we print a fake exception - * with filename and stacktrace on every indexinput clone() + + /** + * If set to true, we print a fake exception with filename and stacktrace on every indexinput + * clone() */ public void setVerboseClone(boolean v) { verboseClone = v; @@ -150,25 +151,24 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { trackDiskUsage = v; } - /** If set to true (the default), when we throw random - * IOException on openInput or createOutput, we may - * sometimes throw FileNotFoundException or - * NoSuchFileException. */ + /** + * If set to true (the default), when we throw random IOException on openInput or createOutput, we + * may sometimes throw FileNotFoundException or NoSuchFileException. + */ public void setAllowRandomFileNotFoundException(boolean value) { allowRandomFileNotFoundException = value; } - - /** If set to true, you can open an inputstream on a file - * that is still open for writes. */ + + /** If set to true, you can open an inputstream on a file that is still open for writes. */ public void setAllowReadingFilesStillOpenForWrite(boolean value) { allowReadingFilesStillOpenForWrite = value; } - + /** - * Enum for controlling hard disk throttling. - * Set via {@link MockDirectoryWrapper #setThrottling(Throttling)} - *

    - * WARNING: can make tests very slow. + * Enum for controlling hard disk throttling. Set via {@link MockDirectoryWrapper + * #setThrottling(Throttling)} + * + *

    WARNING: can make tests very slow. */ public static enum Throttling { /** always emulate a slow hard disk. could be very slow! */ @@ -178,15 +178,15 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { /** never throttle output */ NEVER } - + public void setThrottling(Throttling throttling) { this.throttling = throttling; } - - /** + + /** * Add a rare small sleep to catch race conditions in open/close - *

    - * You can enable this if you need it. + * + *

    You can enable this if you need it. */ public void setUseSlowOpenClosers(boolean v) { useSlowOpenClosers = v; @@ -217,13 +217,21 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { if (crashed) { throw new IOException("cannot rename after crash"); } - + if (openFiles.containsKey(source) && assertNoDeleteOpenFile) { - throw fillOpenTrace(new AssertionError("MockDirectoryWrapper: source file \"" + source + "\" is still open: cannot rename"), source, true); + throw fillOpenTrace( + new AssertionError( + "MockDirectoryWrapper: source file \"" + source + "\" is still open: cannot rename"), + source, + true); } if (openFiles.containsKey(dest) && assertNoDeleteOpenFile) { - throw fillOpenTrace(new AssertionError("MockDirectoryWrapper: dest file \"" + dest + "\" is still open: cannot rename"), dest, true); + throw fillOpenTrace( + new AssertionError( + "MockDirectoryWrapper: dest file \"" + dest + "\" is still open: cannot rename"), + dest, + true); } boolean success = false; @@ -254,7 +262,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { in.syncMetaData(); } - public synchronized final long sizeInBytes() throws IOException { + public final synchronized long sizeInBytes() throws IOException { long size = 0; for (String file : in.listAll()) { // hack 2: see TODO in ExtrasFS (ideally it would always return 0 byte @@ -271,7 +279,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { System.out.println("MDW: corrupt unknown files"); } Set knownFiles = new HashSet<>(); - for(String fileName : listAll()) { + for (String fileName : listAll()) { if (fileName.startsWith(IndexFileNames.SEGMENTS)) { if (LuceneTestCase.VERBOSE) { System.out.println("MDW: read " + fileName + " to gather files it references"); @@ -281,7 +289,11 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { infos = SegmentInfos.readCommit(this, fileName); } catch (IOException ioe) { if (LuceneTestCase.VERBOSE) { - System.out.println("MDW: exception reading segment infos " + fileName + "; files: " + Arrays.toString(listAll())); + System.out.println( + "MDW: exception reading segment infos " + + fileName + + "; files: " + + Arrays.toString(listAll())); } throw ioe; } @@ -291,11 +303,11 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { Set toCorrupt = new HashSet<>(); Matcher m = IndexFileNames.CODEC_FILE_PATTERN.matcher(""); - for(String fileName : listAll()) { + for (String fileName : listAll()) { m.reset(fileName); - if (knownFiles.contains(fileName) == false && - fileName.endsWith("write.lock") == false && - (m.matches() || fileName.startsWith(IndexFileNames.PENDING_SEGMENTS))) { + if (knownFiles.contains(fileName) == false + && fileName.endsWith("write.lock") == false + && (m.matches() || fileName.startsWith(IndexFileNames.PENDING_SEGMENTS))) { toCorrupt.add(fileName); } } @@ -313,156 +325,174 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } } } - + private synchronized void _corruptFiles(Collection files) throws IOException { // TODO: we should also mess with any recent file renames, file deletions, if // syncMetaData was not called!! - + // Must make a copy because we change the incoming unsyncedFiles // when we create temp files, delete, etc., below: final List filesToCorrupt = new ArrayList<>(files); // sort the files otherwise we have reproducibility issues // across JVMs if the incoming collection is a hashSet etc. CollectionUtil.timSort(filesToCorrupt); - for(String name : filesToCorrupt) { + for (String name : filesToCorrupt) { int damage = randomState.nextInt(6); if (alwaysCorrupt && damage == 3) { damage = 4; } String action = null; - switch(damage) { + switch (damage) { + case 0: + action = "deleted"; + deleteFile(name); + break; - case 0: - action = "deleted"; - deleteFile(name); - break; - - case 1: - action = "zeroed"; - // Zero out file entirely - long length; - try { - length = fileLength(name); - } catch (IOException ioe) { - throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); - } - - // Delete original and write zeros back: - deleteFile(name); - - byte[] zeroes = new byte[256]; - long upto = 0; - try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState))) { - while(upto < length) { - final int limit = (int) Math.min(length-upto, zeroes.length); - out.writeBytes(zeroes, 0, limit); - upto += limit; - } - } catch (IOException ioe) { - throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); - } - break; - - case 2: - { - action = "partially truncated"; - // Partially Truncate the file: - - // First, make temp file and copy only half this - // file over: - String tempFileName = null; - try (IndexOutput tempOut = in.createTempOutput("name", "mdw_corrupt", LuceneTestCase.newIOContext(randomState)); - IndexInput ii = in.openInput(name, LuceneTestCase.newIOContext(randomState))) { - tempFileName = tempOut.getName(); - tempOut.copyBytes(ii, ii.length()/2); + case 1: + action = "zeroed"; + // Zero out file entirely + long length; + try { + length = fileLength(name); } catch (IOException ioe) { - throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); + throw new RuntimeException( + "hit unexpected IOException while trying to corrupt file " + name, ioe); } - // Delete original and copy bytes back: + // Delete original and write zeros back: deleteFile(name); - try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState)); - IndexInput ii = in.openInput(tempFileName, LuceneTestCase.newIOContext(randomState))) { - out.copyBytes(ii, ii.length()); - } catch (IOException ioe) { - throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); - } - deleteFile(tempFileName); - } - break; - - case 3: - // The file survived intact: - action = "didn't change"; - break; - - case 4: - // Corrupt one bit randomly in the file: - - { - - String tempFileName = null; - try (IndexOutput tempOut = in.createTempOutput("name", "mdw_corrupt", LuceneTestCase.newIOContext(randomState)); - IndexInput ii = in.openInput(name, LuceneTestCase.newIOContext(randomState))) { - tempFileName = tempOut.getName(); - if (ii.length() > 0) { - // Copy first part unchanged: - long byteToCorrupt = (long) (randomState.nextDouble() * ii.length()); - if (byteToCorrupt > 0) { - tempOut.copyBytes(ii, byteToCorrupt); - } - - // Randomly flip one bit from this byte: - byte b = ii.readByte(); - int bitToFlip = randomState.nextInt(8); - b = (byte) (b ^ (1 << bitToFlip)); - tempOut.writeByte(b); - - action = "flip bit " + bitToFlip + " of byte " + byteToCorrupt + " out of " + ii.length() + " bytes"; - - // Copy last part unchanged: - long bytesLeft = ii.length() - byteToCorrupt - 1; - if (bytesLeft > 0) { - tempOut.copyBytes(ii, bytesLeft); - } - } else { - action = "didn't change"; + byte[] zeroes = new byte[256]; + long upto = 0; + try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState))) { + while (upto < length) { + final int limit = (int) Math.min(length - upto, zeroes.length); + out.writeBytes(zeroes, 0, limit); + upto += limit; } } catch (IOException ioe) { - throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); + throw new RuntimeException( + "hit unexpected IOException while trying to corrupt file " + name, ioe); } + break; - // Delete original and copy bytes back: + case 2: + { + action = "partially truncated"; + // Partially Truncate the file: + + // First, make temp file and copy only half this + // file over: + String tempFileName = null; + try (IndexOutput tempOut = + in.createTempOutput( + "name", "mdw_corrupt", LuceneTestCase.newIOContext(randomState)); + IndexInput ii = in.openInput(name, LuceneTestCase.newIOContext(randomState))) { + tempFileName = tempOut.getName(); + tempOut.copyBytes(ii, ii.length() / 2); + } catch (IOException ioe) { + throw new RuntimeException( + "hit unexpected IOException while trying to corrupt file " + name, ioe); + } + + // Delete original and copy bytes back: + deleteFile(name); + + try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState)); + IndexInput ii = + in.openInput(tempFileName, LuceneTestCase.newIOContext(randomState))) { + out.copyBytes(ii, ii.length()); + } catch (IOException ioe) { + throw new RuntimeException( + "hit unexpected IOException while trying to corrupt file " + name, ioe); + } + deleteFile(tempFileName); + } + break; + + case 3: + // The file survived intact: + action = "didn't change"; + break; + + case 4: + // Corrupt one bit randomly in the file: + + { + String tempFileName = null; + try (IndexOutput tempOut = + in.createTempOutput( + "name", "mdw_corrupt", LuceneTestCase.newIOContext(randomState)); + IndexInput ii = in.openInput(name, LuceneTestCase.newIOContext(randomState))) { + tempFileName = tempOut.getName(); + if (ii.length() > 0) { + // Copy first part unchanged: + long byteToCorrupt = (long) (randomState.nextDouble() * ii.length()); + if (byteToCorrupt > 0) { + tempOut.copyBytes(ii, byteToCorrupt); + } + + // Randomly flip one bit from this byte: + byte b = ii.readByte(); + int bitToFlip = randomState.nextInt(8); + b = (byte) (b ^ (1 << bitToFlip)); + tempOut.writeByte(b); + + action = + "flip bit " + + bitToFlip + + " of byte " + + byteToCorrupt + + " out of " + + ii.length() + + " bytes"; + + // Copy last part unchanged: + long bytesLeft = ii.length() - byteToCorrupt - 1; + if (bytesLeft > 0) { + tempOut.copyBytes(ii, bytesLeft); + } + } else { + action = "didn't change"; + } + } catch (IOException ioe) { + throw new RuntimeException( + "hit unexpected IOException while trying to corrupt file " + name, ioe); + } + + // Delete original and copy bytes back: + deleteFile(name); + + try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState)); + IndexInput ii = + in.openInput(tempFileName, LuceneTestCase.newIOContext(randomState))) { + out.copyBytes(ii, ii.length()); + } catch (IOException ioe) { + throw new RuntimeException( + "hit unexpected IOException while trying to corrupt file " + name, ioe); + } + + deleteFile(tempFileName); + } + break; + + case 5: + action = "fully truncated"; + // Totally truncate the file to zero bytes deleteFile(name); - try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState)); - IndexInput ii = in.openInput(tempFileName, LuceneTestCase.newIOContext(randomState))) { - out.copyBytes(ii, ii.length()); + try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState))) { + out.getFilePointer(); // just fake access to prevent compiler warning } catch (IOException ioe) { - throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); + throw new RuntimeException( + "hit unexpected IOException while trying to corrupt file " + name, ioe); } + break; - deleteFile(tempFileName); - } - break; - - case 5: - action = "fully truncated"; - // Totally truncate the file to zero bytes - deleteFile(name); - - try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState))) { - out.getFilePointer(); // just fake access to prevent compiler warning - } catch (IOException ioe) { - throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); - } - break; - - default: - throw new AssertionError(); + default: + throw new AssertionError(); } if (LuceneTestCase.VERBOSE) { @@ -471,19 +501,19 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } } - /** Simulates a crash of OS or machine by overwriting - * unsynced files. */ + /** Simulates a crash of OS or machine by overwriting unsynced files. */ public synchronized void crash() throws IOException { openFiles = new HashMap<>(); openFilesForWrite = new HashSet<>(); openFilesDeleted = new HashSet<>(); // first force-close all files, so we can corrupt on windows etc. // clone the file map, as these guys want to remove themselves on close. - Map m = new IdentityHashMap<>(openFileHandles); + Map m = new IdentityHashMap<>(openFileHandles); for (Closeable f : m.keySet()) { try { f.close(); - } catch (Exception ignored) {} + } catch (Exception ignored) { + } } corruptFiles(unSyncedFiles); crashed = true; @@ -498,67 +528,63 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { public void setMaxSizeInBytes(long maxSize) { this.maxSize = maxSize; } + public long getMaxSizeInBytes() { return this.maxSize; } - /** - * Returns the peek actual storage used (bytes) in this - * directory. - */ + /** Returns the peek actual storage used (bytes) in this directory. */ public long getMaxUsedSizeInBytes() { return this.maxUsedSize; } + public void resetMaxUsedSizeInBytes() throws IOException { this.maxUsedSize = sizeInBytes(); } - /** - * Trip a test assert if there is an attempt - * to delete an open file. - */ + /** Trip a test assert if there is an attempt to delete an open file. */ public void setAssertNoDeleteOpenFile(boolean value) { this.assertNoDeleteOpenFile = value; } - + public boolean getAssertNoDeleteOpenFile() { return assertNoDeleteOpenFile; } /** - * If 0.0, no exceptions will be thrown. Else this should - * be a double 0.0 - 1.0. We will randomly throw an - * IOException on the first write to an OutputStream based - * on this probability. + * If 0.0, no exceptions will be thrown. Else this should be a double 0.0 - 1.0. We will randomly + * throw an IOException on the first write to an OutputStream based on this probability. */ public void setRandomIOExceptionRate(double rate) { randomIOExceptionRate = rate; } - + public double getRandomIOExceptionRate() { return randomIOExceptionRate; } /** - * If 0.0, no exceptions will be thrown during openInput - * and createOutput. Else this should - * be a double 0.0 - 1.0 and we will randomly throw an - * IOException in openInput and createOutput with + * If 0.0, no exceptions will be thrown during openInput and createOutput. Else this should be a + * double 0.0 - 1.0 and we will randomly throw an IOException in openInput and createOutput with * this probability. */ public void setRandomIOExceptionRateOnOpen(double rate) { randomIOExceptionRateOnOpen = rate; } - + public double getRandomIOExceptionRateOnOpen() { return randomIOExceptionRateOnOpen; } void maybeThrowIOException(String message) throws IOException { if (randomState.nextDouble() < randomIOExceptionRate) { - IOException ioe = new IOException("a random IOException" + (message == null ? "" : " (" + message + ")")); + IOException ioe = + new IOException("a random IOException" + (message == null ? "" : " (" + message + ")")); if (LuceneTestCase.VERBOSE) { - System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception" + (message == null ? "" : " (" + message + ")")); + System.out.println( + Thread.currentThread().getName() + + ": MockDirectoryWrapper: now throw random exception" + + (message == null ? "" : " (" + message + ")")); ioe.printStackTrace(System.out); } throw ioe; @@ -568,17 +594,22 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { void maybeThrowIOExceptionOnOpen(String name) throws IOException { if (randomState.nextDouble() < randomIOExceptionRateOnOpen) { if (LuceneTestCase.VERBOSE) { - System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception during open file=" + name); + System.out.println( + Thread.currentThread().getName() + + ": MockDirectoryWrapper: now throw random exception during open file=" + + name); new Throwable().printStackTrace(System.out); } if (allowRandomFileNotFoundException == false || randomState.nextBoolean()) { throw new IOException("a random IOException (" + name + ")"); } else { - throw randomState.nextBoolean() ? new FileNotFoundException("a random IOException (" + name + ")") : new NoSuchFileException("a random IOException (" + name + ")"); + throw randomState.nextBoolean() + ? new FileNotFoundException("a random IOException (" + name + ")") + : new NoSuchFileException("a random IOException (" + name + ")"); } } } - + /** returns current open file handle count */ public synchronized long getFileHandleCount() { return openFileHandles.size(); @@ -597,7 +628,11 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { if (openFiles.containsKey(name)) { openFilesDeleted.add(name); if (assertNoDeleteOpenFile) { - throw fillOpenTrace(new IOException("MockDirectoryWrapper: file \"" + name + "\" is still open: cannot delete"), name, true); + throw fillOpenTrace( + new IOException( + "MockDirectoryWrapper: file \"" + name + "\" is still open: cannot delete"), + name, + true); } } else { openFilesDeleted.remove(name); @@ -611,11 +646,15 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { // sets the cause of the incoming ioe to be the stack // trace when the offending file name was opened private synchronized T fillOpenTrace(T t, String name, boolean input) { - for(Map.Entry ent : openFileHandles.entrySet()) { - if (input && ent.getKey() instanceof MockIndexInputWrapper && ((MockIndexInputWrapper) ent.getKey()).name.equals(name)) { + for (Map.Entry ent : openFileHandles.entrySet()) { + if (input + && ent.getKey() instanceof MockIndexInputWrapper + && ((MockIndexInputWrapper) ent.getKey()).name.equals(name)) { t.initCause(ent.getValue()); break; - } else if (!input && ent.getKey() instanceof MockIndexOutputWrapper && ((MockIndexOutputWrapper) ent.getKey()).name.equals(name)) { + } else if (!input + && ent.getKey() instanceof MockIndexOutputWrapper + && ((MockIndexOutputWrapper) ent.getKey()).name.equals(name)) { t.initCause(ent.getValue()); break; } @@ -638,7 +677,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { public void setFailOnCreateOutput(boolean v) { failOnCreateOutput = v; } - + @Override public synchronized IndexOutput createOutput(String name, IOContext context) throws IOException { maybeThrowDeterministicException(); @@ -657,26 +696,26 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } if (assertNoDeleteOpenFile && openFiles.containsKey(name)) { - throw new AssertionError("MockDirectoryWrapper: file \"" + name + "\" is still open: cannot overwrite"); + throw new AssertionError( + "MockDirectoryWrapper: file \"" + name + "\" is still open: cannot overwrite"); } - + unSyncedFiles.add(name); createdFiles.add(name); - - //System.out.println(Thread.currentThread().getName() + ": MDW: create " + name); - IndexOutput delegateOutput = in.createOutput(name, LuceneTestCase.newIOContext(randomState, context)); + + // System.out.println(Thread.currentThread().getName() + ": MDW: create " + name); + IndexOutput delegateOutput = + in.createOutput(name, LuceneTestCase.newIOContext(randomState, context)); final IndexOutput io = new MockIndexOutputWrapper(this, delegateOutput, name); addFileHandle(io, name, Handle.Output); openFilesForWrite.add(name); return maybeThrottle(name, io); - - } private IndexOutput maybeThrottle(String name, IndexOutput output) { // throttling REALLY slows down tests, so don't do it very often for SOMETIMES. - if (throttling == Throttling.ALWAYS || - (throttling == Throttling.SOMETIMES && randomState.nextInt(200) == 0)) { + if (throttling == Throttling.ALWAYS + || (throttling == Throttling.SOMETIMES && randomState.nextInt(200) == 0)) { if (LuceneTestCase.VERBOSE) { System.out.println("MockDirectoryWrapper: throttling indexOutput (" + name + ")"); } @@ -687,7 +726,8 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } @Override - public synchronized IndexOutput createTempOutput(String prefix, String suffix, IOContext context) throws IOException { + public synchronized IndexOutput createTempOutput(String prefix, String suffix, IOContext context) + throws IOException { maybeThrowDeterministicException(); maybeThrowIOExceptionOnOpen("temp: prefix=" + prefix + " suffix=" + suffix); maybeYield(); @@ -698,11 +738,13 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { throw new IOException("cannot createTempOutput after crash"); } init(); - - IndexOutput delegateOutput = in.createTempOutput(prefix, suffix, LuceneTestCase.newIOContext(randomState, context)); + + IndexOutput delegateOutput = + in.createTempOutput(prefix, suffix, LuceneTestCase.newIOContext(randomState, context)); String name = delegateOutput.getName(); if (name.toLowerCase(Locale.ROOT).endsWith(".tmp") == false) { - throw new IllegalStateException("wrapped directory failed to use .tmp extension: got: " + name); + throw new IllegalStateException( + "wrapped directory failed to use .tmp extension: got: " + name); } unSyncedFiles.add(name); @@ -710,23 +752,25 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { final IndexOutput io = new MockIndexOutputWrapper(this, delegateOutput, name); addFileHandle(io, name, Handle.Output); openFilesForWrite.add(name); - + return maybeThrottle(name, io); } private static enum Handle { - Input, Output, Slice + Input, + Output, + Slice } synchronized void addFileHandle(Closeable c, String name, Handle handle) { Integer v = openFiles.get(name); if (v != null) { - v = Integer.valueOf(v.intValue()+1); + v = Integer.valueOf(v.intValue() + 1); openFiles.put(name, v); } else { openFiles.put(name, Integer.valueOf(1)); } - + openFileHandles.put(c, new RuntimeException("unclosed Index" + handle.name() + ": " + name)); } @@ -745,26 +789,35 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { maybeThrowDeterministicException(); } if (!LuceneTestCase.slowFileExists(in, name)) { - throw randomState.nextBoolean() ? new FileNotFoundException(name + " in dir=" + in) : new NoSuchFileException(name + " in dir=" + in); + throw randomState.nextBoolean() + ? new FileNotFoundException(name + " in dir=" + in) + : new NoSuchFileException(name + " in dir=" + in); } // cannot open a file for input if it's still open for output. if (!allowReadingFilesStillOpenForWrite && openFilesForWrite.contains(name)) { - throw fillOpenTrace(new AccessDeniedException("MockDirectoryWrapper: file \"" + name + "\" is still open for writing"), name, false); + throw fillOpenTrace( + new AccessDeniedException( + "MockDirectoryWrapper: file \"" + name + "\" is still open for writing"), + name, + false); } - IndexInput delegateInput = in.openInput(name, LuceneTestCase.newIOContext(randomState, context)); + IndexInput delegateInput = + in.openInput(name, LuceneTestCase.newIOContext(randomState, context)); final IndexInput ii; int randomInt = randomState.nextInt(500); if (useSlowOpenClosers && randomInt == 0) { if (LuceneTestCase.VERBOSE) { - System.out.println("MockDirectoryWrapper: using SlowClosingMockIndexInputWrapper for file " + name); + System.out.println( + "MockDirectoryWrapper: using SlowClosingMockIndexInputWrapper for file " + name); } ii = new SlowClosingMockIndexInputWrapper(this, name, delegateInput); - } else if (useSlowOpenClosers && randomInt == 1) { + } else if (useSlowOpenClosers && randomInt == 1) { if (LuceneTestCase.VERBOSE) { - System.out.println("MockDirectoryWrapper: using SlowOpeningMockIndexInputWrapper for file " + name); + System.out.println( + "MockDirectoryWrapper: using SlowOpeningMockIndexInputWrapper for file " + name); } ii = new SlowOpeningMockIndexInputWrapper(this, name, delegateInput); } else { @@ -809,7 +862,12 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } // RuntimeException instead of IOException because // super() does not throw IOException currently: - throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still " + openFiles.size() + " open files: " + openFiles, cause); + throw new RuntimeException( + "MockDirectoryWrapper: cannot close: there are still " + + openFiles.size() + + " open files: " + + openFiles, + cause); } if (openLocks.size() > 0) { Exception cause = null; @@ -817,12 +875,14 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { if (stacktraces.hasNext()) { cause = stacktraces.next(); } - throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open locks: " + openLocks, cause); + throw new RuntimeException( + "MockDirectoryWrapper: cannot close: there are still open locks: " + openLocks, cause); } randomIOExceptionRate = 0.0; randomIOExceptionRateOnOpen = 0.0; - if ((getCheckIndexOnClose() || assertNoUnreferencedFilesOnClose) && DirectoryReader.indexExists(this)) { + if ((getCheckIndexOnClose() || assertNoUnreferencedFilesOnClose) + && DirectoryReader.indexExists(this)) { if (getCheckIndexOnClose()) { if (LuceneTestCase.VERBOSE) { @@ -831,11 +891,11 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { crash(); // corrupt any unsynced-files if (LuceneTestCase.VERBOSE) { System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex"); - } + } TestUtil.checkIndex(this, getCrossCheckTermVectorsOnClose(), true, null); } - + // TODO: factor this out / share w/ TestIW.assertNoUnreferencedFiles if (assertNoUnreferencedFilesOnClose) { if (LuceneTestCase.VERBOSE) { @@ -847,47 +907,53 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { IndexWriterConfig iwc = new IndexWriterConfig(null); iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); - // We must do this before opening writer otherwise writer will be angry if there are pending deletions: + // We must do this before opening writer otherwise writer will be angry if there are + // pending deletions: TestUtil.disableVirusChecker(in); new IndexWriter(in, iwc).rollback(); String[] endFiles = in.listAll(); - + Set startSet = new TreeSet<>(Arrays.asList(startFiles)); Set endSet = new TreeSet<>(Arrays.asList(endFiles)); - + startFiles = startSet.toArray(new String[0]); endFiles = endSet.toArray(new String[0]); - + if (!Arrays.equals(startFiles, endFiles)) { List removed = new ArrayList<>(); - for(String fileName : startFiles) { + for (String fileName : startFiles) { if (!endSet.contains(fileName)) { removed.add(fileName); } } - + List added = new ArrayList<>(); - for(String fileName : endFiles) { + for (String fileName : endFiles) { if (!startSet.contains(fileName)) { added.add(fileName); } } - + String extras; if (removed.size() != 0) { extras = "\n\nThese files were removed: " + removed; } else { extras = ""; } - + if (added.size() != 0) { extras += "\n\nThese files were added (waaaaaaaaaat!): " + added; } - - throw new RuntimeException("unreferenced files: before delete:\n " + Arrays.toString(startFiles) + "\n after delete:\n " + Arrays.toString(endFiles) + extras); + + throw new RuntimeException( + "unreferenced files: before delete:\n " + + Arrays.toString(startFiles) + + "\n after delete:\n " + + Arrays.toString(endFiles) + + extras); } - + DirectoryReader ir1 = DirectoryReader.open(this); int numDocs1 = ir1.numDocs(); ir1.close(); @@ -895,7 +961,11 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { DirectoryReader ir2 = DirectoryReader.open(this); int numDocs2 = ir2.numDocs(); ir2.close(); - assert numDocs1 == numDocs2 : "numDocs changed after opening/closing IW: before=" + numDocs1 + " after=" + numDocs2; + assert numDocs1 == numDocs2 + : "numDocs changed after opening/closing IW: before=" + + numDocs1 + + " after=" + + numDocs2; } } success = true; @@ -915,49 +985,44 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { if (v.intValue() == 1) { openFiles.remove(name); } else { - v = Integer.valueOf(v.intValue()-1); + v = Integer.valueOf(v.intValue() - 1); openFiles.put(name, v); } } openFileHandles.remove(c); } - + public synchronized void removeIndexOutput(IndexOutput out, String name) { openFilesForWrite.remove(name); removeOpenFile(out, name); } - + public synchronized void removeIndexInput(IndexInput in, String name) { removeOpenFile(in, name); } - + /** - * Objects that represent fail-able conditions. Objects of a derived - * class are created and registered with the mock directory. After - * register, each object will be invoked once for each first write - * of a file, giving the object a chance to throw an IOException. + * Objects that represent fail-able conditions. Objects of a derived class are created and + * registered with the mock directory. After register, each object will be invoked once for each + * first write of a file, giving the object a chance to throw an IOException. */ public static class Failure { - /** - * eval is called on the first write of every new file. - */ - public void eval(MockDirectoryWrapper dir) throws IOException { } + /** eval is called on the first write of every new file. */ + public void eval(MockDirectoryWrapper dir) throws IOException {} /** - * reset should set the state of the failure to its default - * (freshly constructed) state. Reset is convenient for tests - * that want to create one failure object and then reuse it in - * multiple cases. This, combined with the fact that Failure - * subclasses are often anonymous classes makes reset difficult to - * do otherwise. + * reset should set the state of the failure to its default (freshly constructed) state. Reset + * is convenient for tests that want to create one failure object and then reuse it in multiple + * cases. This, combined with the fact that Failure subclasses are often anonymous classes makes + * reset difficult to do otherwise. * - * A typical example of use is - * Failure failure = new Failure() { ... }; - * ... + *

    A typical example of use is Failure failure = new Failure() { ... }; ... * mock.failOn(failure.reset()) */ - public Failure reset() { return this; } + public Failure reset() { + return this; + } protected boolean doFail; @@ -973,23 +1038,19 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { ArrayList failures; /** - * add a Failure object to the list of objects to be evaluated - * at every potential failure point + * add a Failure object to the list of objects to be evaluated at every potential failure point */ - synchronized public void failOn(Failure fail) { + public synchronized void failOn(Failure fail) { if (failures == null) { failures = new ArrayList<>(); } failures.add(fail); } - /** - * Iterate through the failures list, giving each object a - * chance to throw an IOE - */ + /** Iterate through the failures list, giving each object a chance to throw an IOE */ synchronized void maybeThrowDeterministicException() throws IOException { if (failures != null) { - for(int i = 0; i < failures.size(); i++) { + for (int i = 0; i < failures.size(); i++) { try { failures.get(i).eval(this); } catch (Throwable t) { @@ -1002,7 +1063,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } } } - + @Override public synchronized String[] listAll() throws IOException { maybeYield(); @@ -1021,11 +1082,12 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { return super.obtainLock(name); // TODO: consider mocking locks, but not all the time, can hide bugs } - - /** Use this when throwing fake {@code IOException}, - * e.g. from {@link MockDirectoryWrapper.Failure}. */ - public static class FakeIOException extends IOException { - } + + /** + * Use this when throwing fake {@code IOException}, e.g. from {@link + * MockDirectoryWrapper.Failure}. + */ + public static class FakeIOException extends IOException {} @Override public String toString() { @@ -1036,16 +1098,18 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } } - // don't override optional methods like copyFrom: we need the default impl for things like disk + // don't override optional methods like copyFrom: we need the default impl for things like disk // full checks. we randomly exercise "raw" directories anyway. We ensure default impls are used: - + @Override - public final ChecksumIndexInput openChecksumInput(String name, IOContext context) throws IOException { + public final ChecksumIndexInput openChecksumInput(String name, IOContext context) + throws IOException { return super.openChecksumInput(name, context); } @Override - public final void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + public final void copyFrom(Directory from, String src, String dest, IOContext context) + throws IOException { super.copyFrom(from, src, dest, context); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java index 515c0dc5d31..f349129e7f2 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexInputWrapper.java @@ -22,10 +22,8 @@ import java.util.Map; import java.util.Set; /** - * Used by MockDirectoryWrapper to create an input stream that - * keeps track of when it's been closed. + * Used by MockDirectoryWrapper to create an input stream that keeps track of when it's been closed. */ - public class MockIndexInputWrapper extends IndexInput { private MockDirectoryWrapper dir; final String name; @@ -34,14 +32,15 @@ public class MockIndexInputWrapper extends IndexInput { // Which MockIndexInputWrapper we were cloned from, or null if we are not a clone: private final MockIndexInputWrapper parent; - + /** Sole constructor */ - public MockIndexInputWrapper(MockDirectoryWrapper dir, String name, IndexInput delegate, MockIndexInputWrapper parent) { + public MockIndexInputWrapper( + MockDirectoryWrapper dir, String name, IndexInput delegate, MockIndexInputWrapper parent) { super("MockIndexInputWrapper(name=" + name + " delegate=" + delegate + ")"); // If we are a clone then our parent better not be a clone! assert parent == null || parent.parent == null; - + this.parent = parent; this.name = name; this.dir = dir; @@ -55,7 +54,7 @@ public class MockIndexInputWrapper extends IndexInput { return; } closed = true; - + try (Closeable delegate = this.delegate) { // Pending resolution on LUCENE-686 we may want to // remove the conditional check so we also track that @@ -67,9 +66,10 @@ public class MockIndexInputWrapper extends IndexInput { dir.maybeThrowDeterministicException(); } } - + private void ensureOpen() { - // TODO: not great this is a volatile read (closed) ... we should deploy heavy JVM voodoo like SwitchPoint to avoid this + // TODO: not great this is a volatile read (closed) ... we should deploy heavy JVM voodoo like + // SwitchPoint to avoid this if (closed) { throw new RuntimeException("Abusing closed IndexInput!"); } @@ -86,7 +86,8 @@ public class MockIndexInputWrapper extends IndexInput { } dir.inputCloneCount.incrementAndGet(); IndexInput iiclone = delegate.clone(); - MockIndexInputWrapper clone = new MockIndexInputWrapper(dir, name, iiclone, parent != null ? parent : this); + MockIndexInputWrapper clone = + new MockIndexInputWrapper(dir, name, iiclone, parent != null ? parent : this); // Pending resolution on LUCENE-686 we may want to // uncomment this code so that we also track that all // clones get closed: @@ -112,7 +113,8 @@ public class MockIndexInputWrapper extends IndexInput { } dir.inputCloneCount.incrementAndGet(); IndexInput slice = delegate.slice(sliceDescription, offset, length); - MockIndexInputWrapper clone = new MockIndexInputWrapper(dir, sliceDescription, slice, parent != null ? parent : this); + MockIndexInputWrapper clone = + new MockIndexInputWrapper(dir, sliceDescription, slice, parent != null ? parent : this); return clone; } @@ -147,8 +149,7 @@ public class MockIndexInputWrapper extends IndexInput { } @Override - public void readBytes(byte[] b, int offset, int len, boolean useBuffer) - throws IOException { + public void readBytes(byte[] b, int offset, int len, boolean useBuffer) throws IOException { ensureOpen(); delegate.readBytes(b, offset, len, useBuffer); } @@ -208,7 +209,7 @@ public class MockIndexInputWrapper extends IndexInput { } @Override - public Map readMapOfStrings() throws IOException { + public Map readMapOfStrings() throws IOException { ensureOpen(); return delegate.readMapOfStrings(); } @@ -224,4 +225,3 @@ public class MockIndexInputWrapper extends IndexInput { return "MockIndexInputWrapper(" + delegate + ")"; } } - diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexOutputWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexOutputWrapper.java index ef28da61681..939b8184eae 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexOutputWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexOutputWrapper.java @@ -18,22 +18,18 @@ package org.apache.lucene.store; import java.io.Closeable; import java.io.IOException; - import org.apache.lucene.util.LuceneTestCase; /** - * Used to create an output stream that - * will throw an IOException on fake disk full, track max - * disk space actually used, and maybe throw random - * IOExceptions. + * Used to create an output stream that will throw an IOException on fake disk full, track max disk + * space actually used, and maybe throw random IOExceptions. */ - public class MockIndexOutputWrapper extends IndexOutput { private MockDirectoryWrapper dir; private final IndexOutput delegate; - private boolean first=true; + private boolean first = true; final String name; - + byte[] singleByte = new byte[1]; /** Construct an empty output buffer. */ @@ -47,10 +43,11 @@ public class MockIndexOutputWrapper extends IndexOutput { private void checkCrashed() throws IOException { // If crashed since we were opened, then don't write anything if (dir.crashed) { - throw new IOException(dir.getClass().getSimpleName() + " has crashed; cannot write to " + name); + throw new IOException( + dir.getClass().getSimpleName() + " has crashed; cannot write to " + name); } } - + private void checkDiskFull(byte[] b, int offset, DataInput in, long len) throws IOException { long freeSpace = dir.maxSize == 0 ? 0 : dir.maxSize - dir.sizeInBytes(); long realUsage = 0; @@ -75,7 +72,13 @@ public class MockIndexOutputWrapper extends IndexOutput { if (realUsage > dir.maxUsedSize) { dir.maxUsedSize = realUsage; } - String message = "fake disk full at " + dir.sizeInBytes() + " bytes when writing " + name + " (file length=" + delegate.getFilePointer(); + String message = + "fake disk full at " + + dir.sizeInBytes() + + " bytes when writing " + + name + + " (file length=" + + delegate.getFilePointer(); if (freeSpace > 0) { message += "; wrote " + freeSpace + " of " + len + " bytes"; } @@ -87,9 +90,9 @@ public class MockIndexOutputWrapper extends IndexOutput { throw new IOException(message); } } - + private boolean closed; - + @Override public void close() throws IOException { if (closed) { @@ -97,7 +100,7 @@ public class MockIndexOutputWrapper extends IndexOutput { return; } closed = true; - + try (Closeable delegate = this.delegate) { assert delegate != null; dir.maybeThrowDeterministicException(); @@ -113,7 +116,7 @@ public class MockIndexOutputWrapper extends IndexOutput { } } } - + private void ensureOpen() { if (closed) { throw new AlreadyClosedException("Already closed: " + this); @@ -125,18 +128,18 @@ public class MockIndexOutputWrapper extends IndexOutput { singleByte[0] = b; writeBytes(singleByte, 0, 1); } - + @Override public void writeBytes(byte[] b, int offset, int len) throws IOException { ensureOpen(); checkCrashed(); checkDiskFull(b, offset, null, len); - + if (dir.randomState.nextInt(200) == 0) { - final int half = len/2; + final int half = len / 2; delegate.writeBytes(b, offset, half); Thread.yield(); - delegate.writeBytes(b, offset+half, len-half); + delegate.writeBytes(b, offset + half, len - half); } else { delegate.writeBytes(b, offset, len); } @@ -161,7 +164,7 @@ public class MockIndexOutputWrapper extends IndexOutput { ensureOpen(); checkCrashed(); checkDiskFull(null, 0, input, numBytes); - + delegate.copyBytes(input, numBytes); dir.maybeThrowDeterministicException(); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/RawDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/RawDirectoryWrapper.java index 6ceae351d83..8886101472d 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/RawDirectoryWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/RawDirectoryWrapper.java @@ -20,22 +20,22 @@ import java.io.IOException; /** * Delegates all operations, even optional ones, to the wrapped directory. - *

    - * This class is used if you want the most realistic testing, but still - * with a checkindex on close. If you want asserts and evil things, - * use MockDirectoryWrapper instead. + * + *

    This class is used if you want the most realistic testing, but still with a checkindex on + * close. If you want asserts and evil things, use MockDirectoryWrapper instead. */ public final class RawDirectoryWrapper extends BaseDirectoryWrapper { public RawDirectoryWrapper(Directory delegate) { super(delegate); } - + @Override - public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + public void copyFrom(Directory from, String src, String dest, IOContext context) + throws IOException { in.copyFrom(from, src, dest, context); } - + @Override public ChecksumIndexInput openChecksumInput(String name, IOContext context) throws IOException { return in.openChecksumInput(name, context); diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/SlowClosingMockIndexInputWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/SlowClosingMockIndexInputWrapper.java index e6c3857164e..d79d0e6ac3b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/SlowClosingMockIndexInputWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/SlowClosingMockIndexInputWrapper.java @@ -17,22 +17,20 @@ package org.apache.lucene.store; import java.io.IOException; - import org.apache.lucene.util.ThreadInterruptedException; /** - * hangs onto files a little bit longer (50ms in close). - * MockDirectoryWrapper acts like windows: you can't delete files - * open elsewhere. so the idea is to make race conditions for tiny - * files (like segments) easier to reproduce. + * hangs onto files a little bit longer (50ms in close). MockDirectoryWrapper acts like windows: you + * can't delete files open elsewhere. so the idea is to make race conditions for tiny files (like + * segments) easier to reproduce. */ class SlowClosingMockIndexInputWrapper extends MockIndexInputWrapper { - public SlowClosingMockIndexInputWrapper(MockDirectoryWrapper dir, - String name, IndexInput delegate) { + public SlowClosingMockIndexInputWrapper( + MockDirectoryWrapper dir, String name, IndexInput delegate) { super(dir, name, delegate, null); } - + @Override public void close() throws IOException { try { diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/SlowOpeningMockIndexInputWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/SlowOpeningMockIndexInputWrapper.java index 1e95451ec3d..5b0c3abb93a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/SlowOpeningMockIndexInputWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/SlowOpeningMockIndexInputWrapper.java @@ -17,24 +17,25 @@ package org.apache.lucene.store; import java.io.IOException; - import org.apache.lucene.util.ThreadInterruptedException; /** - * Takes a while to open files: gives testThreadInterruptDeadlock - * a chance to find file leaks if opening an input throws exception + * Takes a while to open files: gives testThreadInterruptDeadlock a chance to find file leaks if + * opening an input throws exception */ class SlowOpeningMockIndexInputWrapper extends MockIndexInputWrapper { - public SlowOpeningMockIndexInputWrapper(MockDirectoryWrapper dir, - String name, IndexInput delegate) throws IOException { + public SlowOpeningMockIndexInputWrapper( + MockDirectoryWrapper dir, String name, IndexInput delegate) throws IOException { super(dir, name, delegate, null); try { Thread.sleep(50); } catch (InterruptedException ie) { try { super.close(); - } catch (Throwable ignore) {} // we didnt open successfully + } catch (Throwable ignore) { + // we didnt open successfully + } throw new ThreadInterruptedException(ie); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/AbstractBeforeAfterRule.java b/lucene/test-framework/src/java/org/apache/lucene/util/AbstractBeforeAfterRule.java index 1bd4bed5f2d..e0633c54934 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/AbstractBeforeAfterRule.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/AbstractBeforeAfterRule.java @@ -17,7 +17,6 @@ package org.apache.lucene.util; import java.util.ArrayList; - import org.junit.After; import org.junit.AfterClass; import org.junit.rules.RuleChain; @@ -27,10 +26,9 @@ import org.junit.runners.model.MultipleFailureException; import org.junit.runners.model.Statement; /** - * A {@link TestRule} that guarantees the execution of {@link #after} even - * if an exception has been thrown from delegate {@link Statement}. This is much - * like {@link AfterClass} or {@link After} annotations but can be used with - * {@link RuleChain} to guarantee the order of execution. + * A {@link TestRule} that guarantees the execution of {@link #after} even if an exception has been + * thrown from delegate {@link Statement}. This is much like {@link AfterClass} or {@link After} + * annotations but can be used with {@link RuleChain} to guarantee the order of execution. */ abstract class AbstractBeforeAfterRule implements TestRule { @Override @@ -46,7 +44,7 @@ abstract class AbstractBeforeAfterRule implements TestRule { } catch (Throwable t) { errors.add(t); } - + try { after(); } catch (Throwable t) { @@ -59,5 +57,6 @@ abstract class AbstractBeforeAfterRule implements TestRule { } protected void before() throws Exception {} + protected void after() throws Exception {} } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/BaseBitSetTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/BaseBitSetTestCase.java index 832009a54c0..a9cc4b6419e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/BaseBitSetTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/BaseBitSetTestCase.java @@ -20,14 +20,11 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.Random; - import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.junit.Ignore; -/** - * Base test case for BitSets. - */ +/** Base test case for BitSets. */ @Ignore public abstract class BaseBitSetTestCase extends LuceneTestCase { @@ -158,7 +155,9 @@ public abstract class BaseBitSetTestCase extends LuceneTestCas return new BitDocIdSet(copyOf(set, numBits), set.cardinality()); case 2: final RoaringDocIdSet.Builder builder = new RoaringDocIdSet.Builder(numBits); - for (int i = set.nextSetBit(0); i != DocIdSetIterator.NO_MORE_DOCS; i = i + 1 >= numBits ? DocIdSetIterator.NO_MORE_DOCS : set.nextSetBit(i + 1)) { + for (int i = set.nextSetBit(0); + i != DocIdSetIterator.NO_MORE_DOCS; + i = i + 1 >= numBits ? DocIdSetIterator.NO_MORE_DOCS : set.nextSetBit(i + 1)) { builder.add(i); } return builder.build(); @@ -180,10 +179,11 @@ public abstract class BaseBitSetTestCase extends LuceneTestCas final int numBits = 1 + random().nextInt(100000); BitSet set1 = new JavaUtilBitSet(randomSet(numBits, 0), numBits); // empty T set2 = copyOf(set1, numBits); - + final int iterations = atLeast(10); for (int iter = 0; iter < iterations; ++iter) { - DocIdSet otherSet = randomCopy(new JavaUtilBitSet(randomSet(numBits, load), numBits), numBits); + DocIdSet otherSet = + randomCopy(new JavaUtilBitSet(randomSet(numBits, load), numBits), numBits); DocIdSetIterator otherIterator = otherSet.iterator(); if (otherIterator != null) { set1.or(otherIterator); @@ -274,7 +274,5 @@ public abstract class BaseBitSetTestCase extends LuceneTestCas } return next; } - } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/BaseDocIdSetTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/BaseDocIdSetTestCase.java index 1798840f425..640b4c4ffbf 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/BaseDocIdSetTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/BaseDocIdSetTestCase.java @@ -21,7 +21,6 @@ import static org.apache.lucene.util.BaseBitSetTestCase.randomSet; import java.io.IOException; import java.util.BitSet; import java.util.Random; - import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; @@ -106,7 +105,8 @@ public abstract class BaseDocIdSetTestCase extends LuceneTes for (int i = 0; i < iters; ++i) { final int pow = random.nextInt(20); final int maxDoc = TestUtil.nextInt(random, 1, 1 << pow); - final int numDocs = TestUtil.nextInt(random, 0, Math.min(maxDoc, 1 << TestUtil.nextInt(random, 0, pow))); + final int numDocs = + TestUtil.nextInt(random, 0, Math.min(maxDoc, 1 << TestUtil.nextInt(random, 0, pow))); final BitSet set = randomSet(maxDoc, numDocs); final DocIdSet copy = copyOf(set, maxDoc); final long actualBytes = ramBytesUsed(copy, maxDoc); @@ -115,7 +115,10 @@ public abstract class BaseDocIdSetTestCase extends LuceneTes } } - /** Assert that the content of the {@link DocIdSet} is the same as the content of the {@link BitSet}. */ + /** + * Assert that the content of the {@link DocIdSet} is the same as the content of the {@link + * BitSet}. + */ public void assertEquals(int numBits, BitSet ds1, T ds2) throws IOException { Random random = random(); // nextDoc @@ -137,7 +140,7 @@ public abstract class BaseDocIdSetTestCase extends LuceneTes if (it2 == null) { assertEquals(-1, ds1.nextSetBit(0)); } else { - for (int doc = -1; doc != DocIdSetIterator.NO_MORE_DOCS;) { + for (int doc = -1; doc != DocIdSetIterator.NO_MORE_DOCS; ) { if (random.nextBoolean()) { doc = ds1.nextSetBit(doc + 1); if (doc == -1) { @@ -146,7 +149,8 @@ public abstract class BaseDocIdSetTestCase extends LuceneTes assertEquals(doc, it2.nextDoc()); assertEquals(doc, it2.docID()); } else { - final int target = doc + 1 + random.nextInt(random.nextBoolean() ? 64 : Math.max(numBits / 8, 1)); + final int target = + doc + 1 + random.nextInt(random.nextBoolean() ? 64 : Math.max(numBits / 8, 1)); doc = ds1.nextSetBit(target); if (doc == -1) { doc = DocIdSetIterator.NO_MORE_DOCS; @@ -191,5 +195,4 @@ public abstract class BaseDocIdSetTestCase extends LuceneTes long bytes2 = RamUsageTester.sizeOf(dummy); return bytes1 - bytes2; } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/CloseableDirectory.java b/lucene/test-framework/src/java/org/apache/lucene/util/CloseableDirectory.java index e7542da37f6..541f4a5bf32 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/CloseableDirectory.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/CloseableDirectory.java @@ -17,25 +17,23 @@ package org.apache.lucene.util; import java.io.Closeable; - import org.apache.lucene.store.BaseDirectoryWrapper; import org.junit.Assert; /** * Attempts to close a {@link BaseDirectoryWrapper}. - * + * * @see LuceneTestCase#newDirectory(java.util.Random) */ final class CloseableDirectory implements Closeable { private final BaseDirectoryWrapper dir; private final TestRuleMarkFailure failureMarker; - - public CloseableDirectory(BaseDirectoryWrapper dir, - TestRuleMarkFailure failureMarker) { + + public CloseableDirectory(BaseDirectoryWrapper dir, TestRuleMarkFailure failureMarker) { this.dir = dir; this.failureMarker = failureMarker; } - + @Override public void close() { // We only attempt to check open/closed state if there were no other test diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/English.java b/lucene/test-framework/src/java/org/apache/lucene/util/English.java index 98e4e89c7c6..b245cb7b740 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/English.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/English.java @@ -18,8 +18,9 @@ package org.apache.lucene.util; /** * Converts numbers to english strings for testing. + * * @lucene.internal - */ + */ public final class English { private English() {} // no instance @@ -39,42 +40,42 @@ public final class English { result.append("minus "); i = -i; } - if (i >= 1000000000000000000l) { // quadrillion + if (i >= 1000000000000000000l) { // quadrillion longToEnglish(i / 1000000000000000000l, result); result.append("quintillion, "); i = i % 1000000000000000000l; } - if (i >= 1000000000000000l) { // quadrillion + if (i >= 1000000000000000l) { // quadrillion longToEnglish(i / 1000000000000000l, result); result.append("quadrillion, "); i = i % 1000000000000000l; } - if (i >= 1000000000000l) { // trillions + if (i >= 1000000000000l) { // trillions longToEnglish(i / 1000000000000l, result); result.append("trillion, "); i = i % 1000000000000l; } - if (i >= 1000000000) { // billions + if (i >= 1000000000) { // billions longToEnglish(i / 1000000000, result); result.append("billion, "); i = i % 1000000000; } - if (i >= 1000000) { // millions + if (i >= 1000000) { // millions longToEnglish(i / 1000000, result); result.append("million, "); i = i % 1000000; } - if (i >= 1000) { // thousands + if (i >= 1000) { // thousands longToEnglish(i / 1000, result); result.append("thousand, "); i = i % 1000; } - if (i >= 100) { // hundreds + if (i >= 100) { // hundreds longToEnglish(i / 100, result); result.append("hundred "); i = i % 100; } - //we know we are smaller here so we can cast + // we know we are smaller here so we can cast if (i >= 20) { switch (((int) i) / 10) { case 9: @@ -103,10 +104,8 @@ public final class English { break; } i = i % 10; - if (i == 0) - result.append(" "); - else - result.append("-"); + if (i == 0) result.append(" "); + else result.append("-"); } switch ((int) i) { case 19: @@ -172,7 +171,6 @@ public final class English { } } - public static String intToEnglish(int i) { StringBuilder result = new StringBuilder(); longToEnglish(i, result); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/FailOnNonBulkMergesInfoStream.java b/lucene/test-framework/src/java/org/apache/lucene/util/FailOnNonBulkMergesInfoStream.java index fb5fcf939df..e43a7040d97 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/FailOnNonBulkMergesInfoStream.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/FailOnNonBulkMergesInfoStream.java @@ -23,9 +23,8 @@ import java.io.IOException; public class FailOnNonBulkMergesInfoStream extends InfoStream { @Override - public void close() throws IOException { - } - + public void close() throws IOException {} + @Override public boolean isEnabled(String component) { return true; diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/FailureMarker.java b/lucene/test-framework/src/java/org/apache/lucene/util/FailureMarker.java index 39453621ecc..517057897b9 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/FailureMarker.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/FailureMarker.java @@ -17,13 +17,12 @@ package org.apache.lucene.util; import java.util.concurrent.atomic.AtomicInteger; - import org.junit.runner.notification.Failure; import org.junit.runner.notification.RunListener; /** - * A {@link RunListener} that detects suite/ test failures. We need it because failures - * due to thread leaks happen outside of any rule contexts. + * A {@link RunListener} that detects suite/ test failures. We need it because failures due to + * thread leaks happen outside of any rule contexts. */ public class FailureMarker extends RunListener { static final AtomicInteger failures = new AtomicInteger(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java index 6d0c4bffdd5..34554be49e4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java @@ -34,7 +34,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import java.util.zip.GZIPInputStream; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -46,19 +45,19 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; -/** Minimal port of benchmark's LneDocSource + - * DocMaker, so tests can enum docs from a line file created - * by benchmark's WriteLineDoc task */ +/** + * Minimal port of benchmark's LneDocSource + DocMaker, so tests can enum docs from a line file + * created by benchmark's WriteLineDoc task + */ public class LineFileDocs implements Closeable { private BufferedReader reader; - private final static int BUFFER_SIZE = 1 << 16; // 64K + private static final int BUFFER_SIZE = 1 << 16; // 64K private final AtomicInteger id = new AtomicInteger(); private final String path; private final Random random; - /** If forever is true, we rewind the file at EOF (repeat - * the docs over and over) */ + /** If forever is true, we rewind the file at EOF (repeat the docs over and over) */ public LineFileDocs(Random random, String path) throws IOException { this.path = path; this.random = new Random(random.nextLong()); @@ -74,12 +73,12 @@ public class LineFileDocs implements Closeable { IOUtils.close(reader, threadDocs); reader = null; } - + private long randomSeekPos(Random random, long size) { if (random == null || size <= 3L) { return 0L; } else { - return (random.nextLong()&Long.MAX_VALUE) % (size/3); + return (random.nextLong() & Long.MAX_VALUE) % (size / 3); } } @@ -88,14 +87,15 @@ public class LineFileDocs implements Closeable { // true if the InputStream is not already randomly seek'd after the if/else block below: boolean needSkip; - + long size = 0L, seekTo = 0L; if (is == null) { // if it's not in classpath, we load it as absolute filesystem path (e.g. Jenkins' home dir) Path file = Paths.get(path); size = Files.size(file); if (path.endsWith(".gz")) { - // if it is a gzip file, we need to use InputStream and seek to one of the pre-computed skip points: + // if it is a gzip file, we need to use InputStream and seek to one of the pre-computed skip + // points: is = Files.newInputStream(file); needSkip = true; } else { @@ -108,8 +108,10 @@ public class LineFileDocs implements Closeable { channel.position(seekTo); is = Channels.newInputStream(channel); - // read until newline char, otherwise we may hit "java.nio.charset.MalformedInputException: Input length = 1" - // exception in readline() below, because we seeked part way through a multi-byte (in UTF-8) encoded + // read until newline char, otherwise we may hit "java.nio.charset.MalformedInputException: + // Input length = 1" + // exception in readline() below, because we seeked part way through a multi-byte (in UTF-8) + // encoded // unicode character: if (seekTo > 0L) { int b; @@ -128,12 +130,14 @@ public class LineFileDocs implements Closeable { if (needSkip) { - // LUCENE-9191: use the optimized (pre-computed, using dev-tools/scripts/create_line_file_docs.py) + // LUCENE-9191: use the optimized (pre-computed, using + // dev-tools/scripts/create_line_file_docs.py) // seek file, so we can seek in a gzip'd file int index = path.lastIndexOf('.'); if (index == -1) { - throw new IllegalArgumentException("could not determine extension for path \"" + path + "\""); + throw new IllegalArgumentException( + "could not determine extension for path \"" + path + "\""); } // e.g. foo.txt --> foo.seek, foo.txt.gz --> foo.txt.seek @@ -143,13 +147,13 @@ public class LineFileDocs implements Closeable { seekIS = Files.newInputStream(Paths.get(seekFilePath)); } - try (BufferedReader reader = new BufferedReader(new InputStreamReader(seekIS, - StandardCharsets.UTF_8))) { + try (BufferedReader reader = + new BufferedReader(new InputStreamReader(seekIS, StandardCharsets.UTF_8))) { List skipPoints = new ArrayList<>(); // explicitly insert implicit 0 as the first skip point: skipPoints.add(0L); - + while (true) { String line = reader.readLine(); if (line == null) { @@ -170,10 +174,12 @@ public class LineFileDocs implements Closeable { } } } - - CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder() - .onMalformedInput(CodingErrorAction.REPORT) - .onUnmappableCharacter(CodingErrorAction.REPORT); + + CharsetDecoder decoder = + StandardCharsets.UTF_8 + .newDecoder() + .onMalformedInput(CodingErrorAction.REPORT) + .onUnmappableCharacter(CodingErrorAction.REPORT); reader = new BufferedReader(new InputStreamReader(is, decoder), BUFFER_SIZE); } @@ -184,7 +190,7 @@ public class LineFileDocs implements Closeable { id.set(0); } - private final static char SEP = '\t'; + private static final char SEP = '\t'; private static final class DocState { final Document doc; @@ -199,7 +205,7 @@ public class LineFileDocs implements Closeable { public DocState() { doc = new Document(); - + title = new StringField("title", "", Field.Store.NO); doc.add(title); @@ -208,7 +214,7 @@ public class LineFileDocs implements Closeable { ft.setStoreTermVectors(true); ft.setStoreTermVectorOffsets(true); ft.setStoreTermVectorPositions(true); - + titleTokenized = new Field("titleTokenized", "", ft); doc.add(titleTokenized); @@ -236,7 +242,7 @@ public class LineFileDocs implements Closeable { /** Note: Document instance is re-used per-thread */ public Document nextDoc() throws IOException { String line; - synchronized(this) { + synchronized (this) { line = reader.readLine(); if (line == null) { // Always rewind at end: @@ -265,14 +271,14 @@ public class LineFileDocs implements Closeable { throw new RuntimeException("line: [" + line + "] is in an invalid format !"); } - docState.body.setStringValue(line.substring(1+spot2, line.length())); + docState.body.setStringValue(line.substring(1 + spot2, line.length())); final String title = line.substring(0, spot); docState.title.setStringValue(title); if (docState.titleDV != null) { docState.titleDV.setBytesValue(new BytesRef(title)); } docState.titleTokenized.setStringValue(title); - docState.date.setStringValue(line.substring(1+spot, spot2)); + docState.date.setStringValue(line.substring(1 + spot, spot2)); final int i = id.getAndIncrement(); docState.id.setStringValue(Integer.toString(i)); docState.idNum.setIntValue(i); @@ -283,7 +289,7 @@ public class LineFileDocs implements Closeable { if (random.nextInt(5) == 4) { // Make some sparse fields Document doc = new Document(); - for(IndexableField field : docState.doc) { + for (IndexableField field : docState.doc) { doc.add(field); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnit3MethodProvider.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnit3MethodProvider.java index 23b177fc227..6ff9af53a5b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnit3MethodProvider.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnit3MethodProvider.java @@ -16,33 +16,30 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.ClassModel; +import com.carrotsearch.randomizedtesting.ClassModel.MethodModel; +import com.carrotsearch.randomizedtesting.TestMethodProvider; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.Collection; import java.util.Map; -import com.carrotsearch.randomizedtesting.ClassModel; -import com.carrotsearch.randomizedtesting.ClassModel.MethodModel; -import com.carrotsearch.randomizedtesting.TestMethodProvider; - -/** - * Backwards compatible test* method provider (public, non-static). - */ +/** Backwards compatible test* method provider (public, non-static). */ public final class LuceneJUnit3MethodProvider implements TestMethodProvider { @Override public Collection getTestMethods(Class suiteClass, ClassModel classModel) { - Map methods = classModel.getMethods(); + Map methods = classModel.getMethods(); ArrayList result = new ArrayList<>(); for (MethodModel mm : methods.values()) { // Skip any methods that have overrieds/ shadows. if (mm.getDown() != null) continue; Method m = mm.element; - if (m.getName().startsWith("test") && - Modifier.isPublic(m.getModifiers()) && - !Modifier.isStatic(m.getModifiers()) && - m.getParameterTypes().length == 0) { + if (m.getName().startsWith("test") + && Modifier.isPublic(m.getModifiers()) + && !Modifier.isStatic(m.getModifiers()) + && m.getParameterTypes().length == 0) { result.add(m); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java index d6b884223f0..8e9ab9f9a41 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java @@ -16,6 +16,35 @@ */ package org.apache.lucene.util; +import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; +import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsInt; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +import com.carrotsearch.randomizedtesting.JUnit4MethodProvider; +import com.carrotsearch.randomizedtesting.LifecycleScope; +import com.carrotsearch.randomizedtesting.MixWithSuiteName; +import com.carrotsearch.randomizedtesting.RandomizedContext; +import com.carrotsearch.randomizedtesting.RandomizedRunner; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.annotations.Listeners; +import com.carrotsearch.randomizedtesting.annotations.SeedDecorators; +import com.carrotsearch.randomizedtesting.annotations.TestGroup; +import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule; +import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule; +import com.carrotsearch.randomizedtesting.rules.StaticFieldsInvariantRule; import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; @@ -67,12 +96,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.logging.Logger; import java.util.stream.Collectors; - +import junit.framework.AssertionFailedError; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field; +import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; @@ -98,8 +127,8 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.MergeInfo; -import org.apache.lucene.store.MockDirectoryWrapper.Throttling; import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.store.MockDirectoryWrapper.Throttling; import org.apache.lucene.store.NRTCachingDirectory; import org.apache.lucene.store.RawDirectoryWrapper; import org.apache.lucene.util.automaton.AutomatonTestUtil; @@ -113,108 +142,62 @@ import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; +import org.junit.internal.AssumptionViolatedException; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; import org.junit.runner.RunWith; -import org.junit.internal.AssumptionViolatedException; - -import com.carrotsearch.randomizedtesting.JUnit4MethodProvider; -import com.carrotsearch.randomizedtesting.LifecycleScope; -import com.carrotsearch.randomizedtesting.MixWithSuiteName; -import com.carrotsearch.randomizedtesting.RandomizedContext; -import com.carrotsearch.randomizedtesting.RandomizedRunner; -import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.annotations.Listeners; -import com.carrotsearch.randomizedtesting.annotations.SeedDecorators; -import com.carrotsearch.randomizedtesting.annotations.TestGroup; -import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; -import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule; -import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule; -import com.carrotsearch.randomizedtesting.rules.StaticFieldsInvariantRule; - -import junit.framework.AssertionFailedError; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; -import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsInt; -import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; /** * Base class for all Lucene unit tests, Junit3 or Junit4 variant. - * + * *

    Class and instance setup.

    - * - *

    - * The preferred way to specify class (suite-level) setup/cleanup is to use - * static methods annotated with {@link BeforeClass} and {@link AfterClass}. Any - * code in these methods is executed within the test framework's control and - * ensure proper setup has been made. Try not to use static initializers - * (including complex final field initializers). Static initializers are - * executed before any setup rules are fired and may cause you (or somebody - * else) headaches. - * - *

    - * For instance-level setup, use {@link Before} and {@link After} annotated - * methods. If you override either {@link #setUp()} or {@link #tearDown()} in - * your subclass, make sure you call super.setUp() and - * super.tearDown(). This is detected and enforced. - * + * + *

    The preferred way to specify class (suite-level) setup/cleanup is to use static methods + * annotated with {@link BeforeClass} and {@link AfterClass}. Any code in these methods is executed + * within the test framework's control and ensure proper setup has been made. Try not to use + * static initializers (including complex final field initializers). Static initializers are + * executed before any setup rules are fired and may cause you (or somebody else) headaches. + * + *

    For instance-level setup, use {@link Before} and {@link After} annotated methods. If you + * override either {@link #setUp()} or {@link #tearDown()} in your subclass, make sure you call + * super.setUp() and super.tearDown(). This is detected and enforced. + * *

    Specifying test cases

    - * - *

    - * Any test method with a testXXX prefix is considered a test case. - * Any test method annotated with {@link Test} is considered a test case. - * + * + *

    Any test method with a testXXX prefix is considered a test case. Any test method + * annotated with {@link Test} is considered a test case. + * *

    Randomized execution and test facilities

    - * - *

    - * {@link LuceneTestCase} uses {@link RandomizedRunner} to execute test cases. - * {@link RandomizedRunner} has built-in support for tests randomization - * including access to a repeatable {@link Random} instance. See - * {@link #random()} method. Any test using {@link Random} acquired from - * {@link #random()} should be fully reproducible (assuming no race conditions - * between threads etc.). The initial seed for a test case is reported in many - * ways: + * + *

    {@link LuceneTestCase} uses {@link RandomizedRunner} to execute test cases. {@link + * RandomizedRunner} has built-in support for tests randomization including access to a repeatable + * {@link Random} instance. See {@link #random()} method. Any test using {@link Random} acquired + * from {@link #random()} should be fully reproducible (assuming no race conditions between threads + * etc.). The initial seed for a test case is reported in many ways: + * *

      - *
    • as part of any exception thrown from its body (inserted as a dummy stack - * trace entry),
    • - *
    • as part of the main thread executing the test case (if your test hangs, - * just dump the stack trace of all threads and you'll see the seed),
    • - *
    • the master seed can also be accessed manually by getting the current - * context ({@link RandomizedContext#current()}) and then calling - * {@link RandomizedContext#getRunnerSeedAsString()}.
    • + *
    • as part of any exception thrown from its body (inserted as a dummy stack trace entry), + *
    • as part of the main thread executing the test case (if your test hangs, just dump the stack + * trace of all threads and you'll see the seed), + *
    • the master seed can also be accessed manually by getting the current context ({@link + * RandomizedContext#current()}) and then calling {@link + * RandomizedContext#getRunnerSeedAsString()}. *
    */ @RunWith(RandomizedRunner.class) -@TestMethodProviders({ - LuceneJUnit3MethodProvider.class, - JUnit4MethodProvider.class -}) -@Listeners({ - RunListenerPrintReproduceInfo.class, - FailureMarker.class -}) +@TestMethodProviders({LuceneJUnit3MethodProvider.class, JUnit4MethodProvider.class}) +@Listeners({RunListenerPrintReproduceInfo.class, FailureMarker.class}) @SeedDecorators({MixWithSuiteName.class}) // See LUCENE-3995 for rationale. @ThreadLeakScope(Scope.SUITE) @ThreadLeakGroup(Group.MAIN) @ThreadLeakAction({Action.WARN, Action.INTERRUPT}) -@ThreadLeakLingering(linger = 20000) // Wait long for leaked threads to complete before failure. zk needs this. +// Wait long for leaked threads to complete before failure. zk needs this. +@ThreadLeakLingering(linger = 20000) @ThreadLeakZombies(Consequence.IGNORE_REMAINING_TESTS) @TimeoutSuite(millis = 2 * TimeUnits.HOUR) -@ThreadLeakFilters(defaultFilters = true, filters = { - QuickPatchThreadsFilter.class -}) +@ThreadLeakFilters( + defaultFilters = true, + filters = {QuickPatchThreadsFilter.class}) @TestRuleLimitSysouts.Limit( bytes = TestRuleLimitSysouts.DEFAULT_LIMIT, hardLimit = TestRuleLimitSysouts.DEFAULT_HARD_LIMIT) @@ -231,33 +214,27 @@ public abstract class LuceneTestCase extends Assert { public static final String SYSPROP_SLOW = "tests.slow"; public static final String SYSPROP_BADAPPLES = "tests.badapples"; - /** @see #ignoreAfterMaxFailures*/ + /** @see #ignoreAfterMaxFailures */ public static final String SYSPROP_MAXFAILURES = "tests.maxfailures"; - /** @see #ignoreAfterMaxFailures*/ + /** @see #ignoreAfterMaxFailures */ public static final String SYSPROP_FAILFAST = "tests.failfast"; - /** - * Annotation for tests that should only be run during nightly builds. - */ + /** Annotation for tests that should only be run during nightly builds. */ @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @TestGroup(enabled = false, sysProperty = SYSPROP_NIGHTLY) public @interface Nightly {} - /** - * Annotation for tests that should only be run during weekly builds - */ + /** Annotation for tests that should only be run during weekly builds */ @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @TestGroup(enabled = false, sysProperty = SYSPROP_WEEKLY) public @interface Weekly {} - - /** - * Annotation for monster tests that require special setup (e.g. use tons of disk and RAM) - */ + + /** Annotation for monster tests that require special setup (e.g. use tons of disk and RAM) */ @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @@ -266,9 +243,7 @@ public abstract class LuceneTestCase extends Assert { String value(); } - /** - * Annotation for tests which exhibit a known issue and are temporarily disabled. - */ + /** Annotation for tests which exhibit a known issue and are temporarily disabled. */ @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @@ -279,8 +254,8 @@ public abstract class LuceneTestCase extends Assert { } /** - * Annotation for tests that are slow. Slow tests do run by default but can be - * disabled if a quick run is needed. + * Annotation for tests that are slow. Slow tests do run by default but can be disabled if a quick + * run is needed. */ @Documented @Inherited @@ -289,14 +264,15 @@ public abstract class LuceneTestCase extends Assert { public @interface Slow {} /** - * Annotation for tests that fail frequently and are not executed in Jenkins builds - * to not spam mailing lists with false reports. + * Annotation for tests that fail frequently and are not executed in Jenkins builds to not spam + * mailing lists with false reports. + * + *

    Tests are turned on for developers by default. If you want to disable them, set: * - * Tests are turned on for developers by default. If you want to disable - * them, set: *

        * -Dtests.badapples=false
        * 
    + * * (or do this through {@code ~./lucene.build.properties}). */ @Documented @@ -309,8 +285,8 @@ public abstract class LuceneTestCase extends Assert { } /** - * Annotation for test classes that should avoid certain codec types - * (because they are expensive, for example). + * Annotation for test classes that should avoid certain codec types (because they are expensive, + * for example). */ @Documented @Inherited @@ -319,13 +295,13 @@ public abstract class LuceneTestCase extends Assert { public @interface SuppressCodecs { String[] value(); } - + /** - * Annotation for test classes that should avoid mock filesystem types - * (because they test a bug that only happens on linux, for example). - *

    - * You can avoid specific names {@link Class#getSimpleName()} or use - * the special value {@code *} to disable all mock filesystems. + * Annotation for test classes that should avoid mock filesystem types (because they test a bug + * that only happens on linux, for example). + * + *

    You can avoid specific names {@link Class#getSimpleName()} or use the special value * + * to disable all mock filesystems. */ @Documented @Inherited @@ -334,24 +310,23 @@ public abstract class LuceneTestCase extends Assert { public @interface SuppressFileSystems { String[] value(); } - + /** - * Annotation for test classes that should avoid always omit - * actual fsync calls from reaching the filesystem. - *

    - * This can be useful, e.g. if they make many lucene commits. + * Annotation for test classes that should avoid always omit actual fsync calls from reaching the + * filesystem. + * + *

    This can be useful, e.g. if they make many lucene commits. */ @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE) public @interface SuppressFsync {} - + /** - * Marks any suites which are known not to close all the temporary - * files. This may prevent temp. files and folders from being cleaned - * up after the suite is completed. - * + * Marks any suites which are known not to close all the temporary files. This may prevent temp. + * files and folders from being cleaned up after the suite is completed. + * * @see LuceneTestCase#createTempDir() * @see LuceneTestCase#createTempFile(String, String) */ @@ -365,9 +340,9 @@ public abstract class LuceneTestCase extends Assert { } /** - * Ignore {@link TestRuleLimitSysouts} for any suite which is known to print - * over the default limit of bytes to {@link System#out} or {@link System#err}. - * + * Ignore {@link TestRuleLimitSysouts} for any suite which is known to print over the default + * limit of bytes to {@link System#out} or {@link System#err}. + * * @see TestRuleLimitSysouts */ @Documented @@ -380,34 +355,32 @@ public abstract class LuceneTestCase extends Assert { } /** - * Suppress the default {@code reproduce with: ant test...} - * Your own listener can be added as needed for your build. + * Suppress the default {@code reproduce with: ant test...} Your own listener can be added as + * needed for your build. */ @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE) public @interface SuppressReproduceLine {} - + // ----------------------------------------------------------------- - // Truly immutable fields and constants, initialized once and valid + // Truly immutable fields and constants, initialized once and valid // for all suites ever since. // ----------------------------------------------------------------- /** - * True if and only if tests are run in verbose mode. If this flag is false - * tests are not expected to print any messages. Enforced with {@link TestRuleLimitSysouts}. + * True if and only if tests are run in verbose mode. If this flag is false tests are not expected + * to print any messages. Enforced with {@link TestRuleLimitSysouts}. */ public static final boolean VERBOSE = systemPropertyAsBoolean("tests.verbose", false); - /** - * Enables or disables dumping of {@link InfoStream} messages. - */ + /** Enables or disables dumping of {@link InfoStream} messages. */ public static final boolean INFOSTREAM = systemPropertyAsBoolean("tests.infostream", VERBOSE); /** - * A random multiplier which you should use when writing random tests: - * multiply it by the number of iterations to scale your tests (for nightly builds). + * A random multiplier which you should use when writing random tests: multiply it by the number + * of iterations to scale your tests (for nightly builds). */ public static final int RANDOM_MULTIPLIER = systemPropertyAsInt("tests.multiplier", 1); @@ -423,132 +396,151 @@ public abstract class LuceneTestCase extends Assert { public static final String TEST_CODEC = System.getProperty("tests.codec", "random"); /** Gets the postingsFormat to run tests with. */ - public static final String TEST_POSTINGSFORMAT = System.getProperty("tests.postingsformat", "random"); - + public static final String TEST_POSTINGSFORMAT = + System.getProperty("tests.postingsformat", "random"); + /** Gets the docValuesFormat to run tests with */ - public static final String TEST_DOCVALUESFORMAT = System.getProperty("tests.docvaluesformat", "random"); + public static final String TEST_DOCVALUESFORMAT = + System.getProperty("tests.docvaluesformat", "random"); /** Gets the directory to run tests with */ public static final String TEST_DIRECTORY = System.getProperty("tests.directory", "random"); /** the line file used by LineFileDocs */ - public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", DEFAULT_LINE_DOCS_FILE); + public static final String TEST_LINE_DOCS_FILE = + System.getProperty("tests.linedocsfile", DEFAULT_LINE_DOCS_FILE); /** Whether or not {@link Nightly} tests should run. */ - public static final boolean TEST_NIGHTLY = systemPropertyAsBoolean(SYSPROP_NIGHTLY, Nightly.class.getAnnotation(TestGroup.class).enabled()); + public static final boolean TEST_NIGHTLY = + systemPropertyAsBoolean( + SYSPROP_NIGHTLY, Nightly.class.getAnnotation(TestGroup.class).enabled()); /** Whether or not {@link Weekly} tests should run. */ - public static final boolean TEST_WEEKLY = systemPropertyAsBoolean(SYSPROP_WEEKLY, Weekly.class.getAnnotation(TestGroup.class).enabled()); - + public static final boolean TEST_WEEKLY = + systemPropertyAsBoolean( + SYSPROP_WEEKLY, Weekly.class.getAnnotation(TestGroup.class).enabled()); + /** Whether or not {@link Monster} tests should run. */ - public static final boolean TEST_MONSTER = systemPropertyAsBoolean(SYSPROP_MONSTER, Monster.class.getAnnotation(TestGroup.class).enabled()); + public static final boolean TEST_MONSTER = + systemPropertyAsBoolean( + SYSPROP_MONSTER, Monster.class.getAnnotation(TestGroup.class).enabled()); /** Whether or not {@link AwaitsFix} tests should run. */ - public static final boolean TEST_AWAITSFIX = systemPropertyAsBoolean(SYSPROP_AWAITSFIX, AwaitsFix.class.getAnnotation(TestGroup.class).enabled()); + public static final boolean TEST_AWAITSFIX = + systemPropertyAsBoolean( + SYSPROP_AWAITSFIX, AwaitsFix.class.getAnnotation(TestGroup.class).enabled()); /** Whether or not {@link BadApple} tests should run. */ - public static final boolean TEST_BADAPPLES = systemPropertyAsBoolean(SYSPROP_BADAPPLES, BadApple.class.getAnnotation(TestGroup.class).enabled()); + public static final boolean TEST_BADAPPLES = + systemPropertyAsBoolean( + SYSPROP_BADAPPLES, BadApple.class.getAnnotation(TestGroup.class).enabled()); /** Whether or not {@link Slow} tests should run. */ - public static final boolean TEST_SLOW = systemPropertyAsBoolean(SYSPROP_SLOW, Slow.class.getAnnotation(TestGroup.class).enabled()); + public static final boolean TEST_SLOW = + systemPropertyAsBoolean(SYSPROP_SLOW, Slow.class.getAnnotation(TestGroup.class).enabled()); /** Throttling, see {@link MockDirectoryWrapper#setThrottling(Throttling)}. */ - public static final Throttling TEST_THROTTLING = TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER; + public static final Throttling TEST_THROTTLING = + TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER; /** Leave temporary files on disk, even on successful runs. */ public static final boolean LEAVE_TEMPORARY; + static { boolean defaultValue = false; - for (String property : Arrays.asList( - "tests.leaveTemporary" /* ANT tasks's (junit4) flag. */, - "tests.leavetemporary" /* lowercase */, - "tests.leavetmpdir" /* default */, - "solr.test.leavetmpdir" /* Solr's legacy */)) { + for (String property : + Arrays.asList( + "tests.leaveTemporary" /* ANT tasks's (junit4) flag. */, + "tests.leavetemporary" /* lowercase */, + "tests.leavetmpdir" /* default */, + "solr.test.leavetmpdir" /* Solr's legacy */)) { defaultValue |= systemPropertyAsBoolean(property, false); } LEAVE_TEMPORARY = defaultValue; } - /** Returns true, if MMapDirectory supports unmapping on this platform (required for Windows), or if we are not on Windows. */ + /** + * Returns true, if MMapDirectory supports unmapping on this platform (required for Windows), or + * if we are not on Windows. + */ public static boolean hasWorkingMMapOnWindows() { return !Constants.WINDOWS || MMapDirectory.UNMAP_SUPPORTED; } - - /** Assumes that the current MMapDirectory implementation supports unmapping, so the test will not fail on Windows. + + /** + * Assumes that the current MMapDirectory implementation supports unmapping, so the test will not + * fail on Windows. + * * @see #hasWorkingMMapOnWindows() - * */ + */ public static void assumeWorkingMMapOnWindows() { assumeTrue(MMapDirectory.UNMAP_NOT_SUPPORTED_REASON, hasWorkingMMapOnWindows()); } /** Filesystem-based {@link Directory} implementations. */ - private static final List FS_DIRECTORIES = Arrays.asList( - "NIOFSDirectory", - // NIOFSDirectory as replacement for MMapDirectory if unmapping is not supported on Windows (to make randomization stable): - hasWorkingMMapOnWindows() ? "MMapDirectory" : "NIOFSDirectory" - ); + private static final List FS_DIRECTORIES = + Arrays.asList( + "NIOFSDirectory", + // NIOFSDirectory as replacement for MMapDirectory if unmapping is not supported on + // Windows (to make randomization stable): + hasWorkingMMapOnWindows() ? "MMapDirectory" : "NIOFSDirectory"); /** All {@link Directory} implementations. */ private static final List CORE_DIRECTORIES; + static { CORE_DIRECTORIES = new ArrayList<>(FS_DIRECTORIES); CORE_DIRECTORIES.add(ByteBuffersDirectory.class.getSimpleName()); } - + /** A {@link org.apache.lucene.search.QueryCachingPolicy} that randomly caches. */ - public static final QueryCachingPolicy MAYBE_CACHE_POLICY = new QueryCachingPolicy() { + public static final QueryCachingPolicy MAYBE_CACHE_POLICY = + new QueryCachingPolicy() { - @Override - public void onUse(Query query) {} + @Override + public void onUse(Query query) {} - @Override - public boolean shouldCache(Query query) throws IOException { - return random().nextBoolean(); - } + @Override + public boolean shouldCache(Query query) throws IOException { + return random().nextBoolean(); + } + }; - }; - // ----------------------------------------------------------------- // Fields initialized in class or instance rules. // ----------------------------------------------------------------- - // ----------------------------------------------------------------- // Class level (suite) rules. // ----------------------------------------------------------------- - - /** - * Stores the currently class under test. - */ - private static final TestRuleStoreClassName classNameRule; - /** - * Class environment setup rule. - */ + /** Stores the currently class under test. */ + private static final TestRuleStoreClassName classNameRule; + + /** Class environment setup rule. */ static final TestRuleSetupAndRestoreClassEnv classEnvRule; - /** - * Suite failure marker (any error in the test or suite scope). - */ + /** Suite failure marker (any error in the test or suite scope). */ protected static TestRuleMarkFailure suiteFailureMarker; - - /** - * Temporary files cleanup rule. - */ + + /** Temporary files cleanup rule. */ private static TestRuleTemporaryFilesCleanup tempFilesCleanupRule; /** - * Ignore tests after hitting a designated number of initial failures. This - * is truly a "static" global singleton since it needs to span the lifetime of all - * test classes running inside this JVM (it cannot be part of a class rule). - * - *

    This poses some problems for the test framework's tests because these sometimes - * trigger intentional failures which add up to the global count. This field contains - * a (possibly) changing reference to {@link TestRuleIgnoreAfterMaxFailures} and we - * dispatch to its current value from the {@link #classRules} chain using {@link TestRuleDelegate}. + * Ignore tests after hitting a designated number of initial failures. This is truly a "static" + * global singleton since it needs to span the lifetime of all test classes running inside this + * JVM (it cannot be part of a class rule). + * + *

    This poses some problems for the test framework's tests because these sometimes trigger + * intentional failures which add up to the global count. This field contains a (possibly) + * changing reference to {@link TestRuleIgnoreAfterMaxFailures} and we dispatch to its current + * value from the {@link #classRules} chain using {@link TestRuleDelegate}. */ - private static final AtomicReference ignoreAfterMaxFailuresDelegate; + private static final AtomicReference + ignoreAfterMaxFailuresDelegate; + private static final TestRule ignoreAfterMaxFailures; + static { int maxFailures = systemPropertyAsInt(SYSPROP_MAXFAILURES, Integer.MAX_VALUE); boolean failFast = systemPropertyAsBoolean(SYSPROP_FAILFAST, false); @@ -557,21 +549,25 @@ public abstract class LuceneTestCase extends Assert { if (maxFailures == Integer.MAX_VALUE) { maxFailures = 1; } else { - Logger.getLogger(LuceneTestCase.class.getSimpleName()).warning( - "Property '" + SYSPROP_MAXFAILURES + "'=" + maxFailures + ", 'failfast' is" + - " ignored."); + Logger.getLogger(LuceneTestCase.class.getSimpleName()) + .warning( + "Property '" + + SYSPROP_MAXFAILURES + + "'=" + + maxFailures + + ", 'failfast' is" + + " ignored."); } } - ignoreAfterMaxFailuresDelegate = - new AtomicReference<>( - new TestRuleIgnoreAfterMaxFailures(maxFailures)); + ignoreAfterMaxFailuresDelegate = + new AtomicReference<>(new TestRuleIgnoreAfterMaxFailures(maxFailures)); ignoreAfterMaxFailures = TestRuleDelegate.of(ignoreAfterMaxFailuresDelegate); } - + /** - * Try to capture streams early so that other classes don't have a chance to steal references - * to them (as is the case with ju.logging handlers). + * Try to capture streams early so that other classes don't have a chance to steal references to + * them (as is the case with ju.logging handlers). */ static { TestRuleLimitSysouts.checkCaptureStreams(); @@ -579,83 +575,93 @@ public abstract class LuceneTestCase extends Assert { } /** - * Temporarily substitute the global {@link TestRuleIgnoreAfterMaxFailures}. See - * {@link #ignoreAfterMaxFailuresDelegate} for some explanation why this method - * is needed. + * Temporarily substitute the global {@link TestRuleIgnoreAfterMaxFailures}. See {@link + * #ignoreAfterMaxFailuresDelegate} for some explanation why this method is needed. */ - public static TestRuleIgnoreAfterMaxFailures replaceMaxFailureRule(TestRuleIgnoreAfterMaxFailures newValue) { + public static TestRuleIgnoreAfterMaxFailures replaceMaxFailureRule( + TestRuleIgnoreAfterMaxFailures newValue) { return ignoreAfterMaxFailuresDelegate.getAndSet(newValue); } /** - * Max 10mb of static data stored in a test suite class after the suite is complete. - * Prevents static data structures leaking and causing OOMs in subsequent tests. + * Max 10mb of static data stored in a test suite class after the suite is complete. Prevents + * static data structures leaking and causing OOMs in subsequent tests. */ - private final static long STATIC_LEAK_THRESHOLD = 10 * 1024 * 1024; + private static final long STATIC_LEAK_THRESHOLD = 10 * 1024 * 1024; /** By-name list of ignored types like loggers etc. */ - private final static Set STATIC_LEAK_IGNORED_TYPES = Set.of( - "org.slf4j.Logger", - "org.apache.solr.SolrLogFormatter", - "java.io.File", // Solr sometimes refers to this in a static way, but it has a "java.nio.fs.Path" inside - Path.class.getName(), // causes problems because interface is implemented by hidden classes - Class.class.getName(), - EnumSet.class.getName()); + private static final Set STATIC_LEAK_IGNORED_TYPES = + Set.of( + "org.slf4j.Logger", + "org.apache.solr.SolrLogFormatter", + "java.io.File", // Solr sometimes refers to this in a static way, but it has a + // "java.nio.fs.Path" inside + Path.class + .getName(), // causes problems because interface is implemented by hidden classes + Class.class.getName(), + EnumSet.class.getName()); /** - * This controls how suite-level rules are nested. It is important that _all_ rules declared - * in {@link LuceneTestCase} are executed in proper order if they depend on each - * other. + * This controls how suite-level rules are nested. It is important that _all_ rules declared in + * {@link LuceneTestCase} are executed in proper order if they depend on each other. */ - @ClassRule - public static TestRule classRules; + @ClassRule public static TestRule classRules; + static { - RuleChain r = RuleChain.outerRule(new TestRuleIgnoreTestSuites()) - .around(ignoreAfterMaxFailures) - .around(suiteFailureMarker = new TestRuleMarkFailure()) - .around(new TestRuleAssertionsRequired()) - .around(new TestRuleLimitSysouts(suiteFailureMarker)) - .around(tempFilesCleanupRule = new TestRuleTemporaryFilesCleanup(suiteFailureMarker)); - // TODO LUCENE-7595: Java 9 does not allow to look into runtime classes, so we have to fix the RAM usage checker! + RuleChain r = + RuleChain.outerRule(new TestRuleIgnoreTestSuites()) + .around(ignoreAfterMaxFailures) + .around(suiteFailureMarker = new TestRuleMarkFailure()) + .around(new TestRuleAssertionsRequired()) + .around(new TestRuleLimitSysouts(suiteFailureMarker)) + .around(tempFilesCleanupRule = new TestRuleTemporaryFilesCleanup(suiteFailureMarker)); + // TODO LUCENE-7595: Java 9 does not allow to look into runtime classes, so we have to fix the + // RAM usage checker! if (!Constants.JRE_IS_MINIMUM_JAVA9) { - r = r.around(new StaticFieldsInvariantRule(STATIC_LEAK_THRESHOLD, true) { - @Override - protected boolean accept(java.lang.reflect.Field field) { - // Don't count known classes that consume memory once. - if (STATIC_LEAK_IGNORED_TYPES.contains(field.getType().getName())) { - return false; - } - // Don't count references from ourselves, we're top-level. - if (field.getDeclaringClass() == LuceneTestCase.class) { - return false; - } - return super.accept(field); - } - }); + r = + r.around( + new StaticFieldsInvariantRule(STATIC_LEAK_THRESHOLD, true) { + @Override + protected boolean accept(java.lang.reflect.Field field) { + // Don't count known classes that consume memory once. + if (STATIC_LEAK_IGNORED_TYPES.contains(field.getType().getName())) { + return false; + } + // Don't count references from ourselves, we're top-level. + if (field.getDeclaringClass() == LuceneTestCase.class) { + return false; + } + return super.accept(field); + } + }); } - classRules = r.around(new NoClassHooksShadowingRule()) - .around(new NoInstanceHooksOverridesRule() { - @Override - protected boolean verify(Method key) { - String name = key.getName(); - return !(name.equals("setUp") || name.equals("tearDown")); - } - }) - .around(classNameRule = new TestRuleStoreClassName()) - .around(new TestRuleRestoreSystemProperties( - // Enlist all properties to which we have write access (security manager); - // these should be restored to previous state, no matter what the outcome of the test. - - // We reset the default locale and timezone; these properties change as a side-effect - "user.language", - "user.timezone", - - // TODO: these should, ideally, be moved to Solr's base class. - "solr.directoryFactory", - "solr.solr.home", - "solr.data.dir" - )) - .around(classEnvRule = new TestRuleSetupAndRestoreClassEnv()); + classRules = + r.around(new NoClassHooksShadowingRule()) + .around( + new NoInstanceHooksOverridesRule() { + @Override + protected boolean verify(Method key) { + String name = key.getName(); + return !(name.equals("setUp") || name.equals("tearDown")); + } + }) + .around(classNameRule = new TestRuleStoreClassName()) + .around( + new TestRuleRestoreSystemProperties( + // Enlist all properties to which we have write access (security manager); + // these should be restored to previous state, no matter what the outcome of the + // test. + + // We reset the default locale and timezone; these properties change as a + // side-effect + "user.language", + "user.timezone", + + // TODO: these should, ideally, be moved to Solr's base class. + "solr.directoryFactory", + "solr.solr.home", + "solr.data.dir")) + .around(classEnvRule = new TestRuleSetupAndRestoreClassEnv()); } // ----------------------------------------------------------------- @@ -669,24 +675,27 @@ public abstract class LuceneTestCase extends Assert { private TestRuleThreadAndTestName threadAndTestNameRule = new TestRuleThreadAndTestName(); /** Taint suite result with individual test failures. */ - private TestRuleMarkFailure testFailureMarker = new TestRuleMarkFailure(suiteFailureMarker); - + private TestRuleMarkFailure testFailureMarker = new TestRuleMarkFailure(suiteFailureMarker); + /** - * This controls how individual test rules are nested. It is important that - * _all_ rules declared in {@link LuceneTestCase} are executed in proper order - * if they depend on each other. + * This controls how individual test rules are nested. It is important that _all_ rules declared + * in {@link LuceneTestCase} are executed in proper order if they depend on each other. */ @Rule - public final TestRule ruleChain = RuleChain - .outerRule(testFailureMarker) - .around(ignoreAfterMaxFailures) - .around(threadAndTestNameRule) - .around(new TestRuleSetupAndRestoreInstanceEnv()) - .around(parentChainCallRule); + public final TestRule ruleChain = + RuleChain.outerRule(testFailureMarker) + .around(ignoreAfterMaxFailures) + .around(threadAndTestNameRule) + .around(new TestRuleSetupAndRestoreInstanceEnv()) + .around(parentChainCallRule); - private static final Map fieldToType = new HashMap(); + private static final Map fieldToType = new HashMap(); - enum LiveIWCFlushMode {BY_RAM, BY_DOCS, EITHER}; + enum LiveIWCFlushMode { + BY_RAM, + BY_DOCS, + EITHER + }; /** Set by TestRuleSetupAndRestoreClassEnv */ static LiveIWCFlushMode liveIWCFlushMode; @@ -699,17 +708,13 @@ public abstract class LuceneTestCase extends Assert { // Suite and test case setup/ cleanup. // ----------------------------------------------------------------- - /** - * For subclasses to override. Overrides must call {@code super.setUp()}. - */ + /** For subclasses to override. Overrides must call {@code super.setUp()}. */ @Before public void setUp() throws Exception { parentChainCallRule.setupCalled = true; } - /** - * For subclasses to override. Overrides must call {@code super.tearDown()}. - */ + /** For subclasses to override. Overrides must call {@code super.tearDown()}. */ @After public void tearDown() throws Exception { parentChainCallRule.teardownCalled = true; @@ -719,8 +724,10 @@ public abstract class LuceneTestCase extends Assert { restoreIndexWriterMaxDocs(); } - /** Tells {@link IndexWriter} to enforce the specified limit as the maximum number of documents in one index; call - * {@link #restoreIndexWriterMaxDocs} once your test is done. */ + /** + * Tells {@link IndexWriter} to enforce the specified limit as the maximum number of documents in + * one index; call {@link #restoreIndexWriterMaxDocs} once your test is done. + */ public void setIndexWriterMaxDocs(int limit) { IndexWriterMaxDocsChanger.setMaxDocs(limit); } @@ -731,27 +738,28 @@ public abstract class LuceneTestCase extends Assert { } // ----------------------------------------------------------------- - // Test facilities and facades for subclasses. + // Test facilities and facades for subclasses. // ----------------------------------------------------------------- /** - * Access to the current {@link RandomizedContext}'s Random instance. It is safe to use - * this method from multiple threads, etc., but it should be called while within a runner's - * scope (so no static initializers). The returned {@link Random} instance will be - * different when this method is called inside a {@link BeforeClass} hook (static - * suite scope) and within {@link Before}/ {@link After} hooks or test methods. - * - *

    The returned instance must not be shared with other threads or cross a single scope's - * boundary. For example, a {@link Random} acquired within a test method shouldn't be reused - * for another test case. - * - *

    There is an overhead connected with getting the {@link Random} for a particular context - * and thread. It is better to cache the {@link Random} locally if tight loops with multiple - * invocations are present or create a derivative local {@link Random} for millions of calls - * like this: + * Access to the current {@link RandomizedContext}'s Random instance. It is safe to use this + * method from multiple threads, etc., but it should be called while within a runner's scope (so + * no static initializers). The returned {@link Random} instance will be different when + * this method is called inside a {@link BeforeClass} hook (static suite scope) and within {@link + * Before}/ {@link After} hooks or test methods. + * + *

    The returned instance must not be shared with other threads or cross a single scope's + * boundary. For example, a {@link Random} acquired within a test method shouldn't be reused for + * another test case. + * + *

    There is an overhead connected with getting the {@link Random} for a particular context and + * thread. It is better to cache the {@link Random} locally if tight loops with multiple + * invocations are present or create a derivative local {@link Random} for millions of calls like + * this: + * *

        * Random random = new Random(random().nextLong());
    -   * // tight loop with many invocations. 
    +   * // tight loop with many invocations.
        * 
    */ public static Random random() { @@ -759,9 +767,8 @@ public abstract class LuceneTestCase extends Assert { } /** - * Registers a {@link Closeable} resource that should be closed after the test - * completes. - * + * Registers a {@link Closeable} resource that should be closed after the test completes. + * * @return resource (for call chaining). */ public T closeAfterTest(T resource) { @@ -769,34 +776,29 @@ public abstract class LuceneTestCase extends Assert { } /** - * Registers a {@link Closeable} resource that should be closed after the suite - * completes. - * + * Registers a {@link Closeable} resource that should be closed after the suite completes. + * * @return resource (for call chaining). */ public static T closeAfterSuite(T resource) { return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.SUITE); } - /** - * Return the current class being tested. - */ + /** Return the current class being tested. */ public static Class getTestClass() { return classNameRule.getTestClass(); } - /** - * Return the name of the currently executing test case. - */ + /** Return the name of the currently executing test case. */ public String getTestName() { return threadAndTestNameRule.testMethodName; } /** - * Some tests expect the directory to contain a single segment, and want to - * do tests on that segment's reader. This is an utility method to help them. + * Some tests expect the directory to contain a single segment, and want to do tests on that + * segment's reader. This is an utility method to help them. */ - /* + /* public static SegmentReader getOnlySegmentReader(DirectoryReader reader) { List subReaders = reader.leaves(); if (subReaders.size() != 1) { @@ -809,20 +811,20 @@ public abstract class LuceneTestCase extends Assert { */ /** - * Some tests expect the directory to contain a single segment, and want to - * do tests on that segment's reader. This is an utility method to help them. + * Some tests expect the directory to contain a single segment, and want to do tests on that + * segment's reader. This is an utility method to help them. */ public static LeafReader getOnlyLeafReader(IndexReader reader) { List subReaders = reader.leaves(); if (subReaders.size() != 1) { - throw new IllegalArgumentException(reader + " has " + subReaders.size() + " segments instead of exactly one"); + throw new IllegalArgumentException( + reader + " has " + subReaders.size() + " segments instead of exactly one"); } return subReaders.get(0).reader(); } /** - * Returns true if and only if the calling thread is the primary thread - * executing the test case. + * Returns true if and only if the calling thread is the primary thread executing the test case. */ protected boolean isTestThread() { assertNotNull("Test case thread not set?", threadAndTestNameRule.testCaseThread); @@ -831,25 +833,25 @@ public abstract class LuceneTestCase extends Assert { /** * Returns a number of at least i - *

    - * The actual number returned will be influenced by whether {@link #TEST_NIGHTLY} - * is active and {@link #RANDOM_MULTIPLIER}, but also with some random fudge. + * + *

    The actual number returned will be influenced by whether {@link #TEST_NIGHTLY} is active and + * {@link #RANDOM_MULTIPLIER}, but also with some random fudge. */ public static int atLeast(Random random, int i) { - int min = (TEST_NIGHTLY ? 2*i : i) * RANDOM_MULTIPLIER; - int max = min+(min/2); + int min = (TEST_NIGHTLY ? 2 * i : i) * RANDOM_MULTIPLIER; + int max = min + (min / 2); return TestUtil.nextInt(random, min, max); } - + public static int atLeast(int i) { return atLeast(random(), i); } - + /** * Returns true if something should happen rarely, - *

    - * The actual number returned will be influenced by whether {@link #TEST_NIGHTLY} - * is active and {@link #RANDOM_MULTIPLIER}. + * + *

    The actual number returned will be influenced by whether {@link #TEST_NIGHTLY} is active and + * {@link #RANDOM_MULTIPLIER}. */ public static boolean rarely(Random random) { int p = TEST_NIGHTLY ? 10 : 1; @@ -857,15 +859,15 @@ public abstract class LuceneTestCase extends Assert { int min = 100 - Math.min(p, 50); // never more than 50 return random.nextInt(100) >= min; } - + public static boolean rarely() { return rarely(random()); } - + public static boolean usually(Random random) { return !rarely(random); } - + public static boolean usually() { return usually(random()); } @@ -881,12 +883,13 @@ public abstract class LuceneTestCase extends Assert { public static void assumeNoException(String msg, Exception e) { RandomizedTest.assumeNoException(msg, e); } - + /** - * Return args as a {@link Set} instance. The order of elements is not - * preserved in iterators. + * Return args as a {@link Set} instance. The order of elements is not preserved in + * iterators. */ - @SafeVarargs @SuppressWarnings("varargs") + @SafeVarargs + @SuppressWarnings("varargs") public static Set asSet(T... args) { return new HashSet<>(Arrays.asList(args)); } @@ -894,12 +897,12 @@ public abstract class LuceneTestCase extends Assert { /** * Convenience method for logging an iterator. * - * @param label String logged before/after the items in the iterator - * @param iter Each next() is toString()ed and logged on its own line. If iter is null this is logged differently then an empty iterator. + * @param label String logged before/after the items in the iterator + * @param iter Each next() is toString()ed and logged on its own line. If iter is null this is + * logged differently then an empty iterator. * @param stream Stream to log messages to. */ - public static void dumpIterator(String label, Iterator iter, - PrintStream stream) { + public static void dumpIterator(String label, Iterator iter, PrintStream stream) { stream.println("*** BEGIN " + label + " ***"); if (null == iter) { stream.println(" ... NULL ..."); @@ -912,12 +915,11 @@ public abstract class LuceneTestCase extends Assert { } /** - * Convenience method for logging an array. Wraps the array in an iterator and delegates + * Convenience method for logging an array. Wraps the array in an iterator and delegates * * @see #dumpIterator(String,Iterator,PrintStream) */ - public static void dumpArray(String label, Object[] objs, - PrintStream stream) { + public static void dumpArray(String label, Object[] objs, PrintStream stream) { Iterator iter = (null == objs) ? null : Arrays.asList(objs).iterator(); dumpIterator(label, iter, stream); } @@ -931,7 +933,7 @@ public abstract class LuceneTestCase extends Assert { public static IndexWriterConfig newIndexWriterConfig(Analyzer a) { return newIndexWriterConfig(random(), a); } - + /** create a new index writer config with random defaults using the specified random */ public static IndexWriterConfig newIndexWriterConfig(Random r, Analyzer a) { IndexWriterConfig c = new IndexWriterConfig(a); @@ -943,7 +945,8 @@ public abstract class LuceneTestCase extends Assert { // that when there are separate instances of // IndexWriter created we see "IW 0", "IW 1", "IW 2", // ... instead of just always "IW 0": - c.setInfoStream(new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream(System.out)); + c.setInfoStream( + new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream(System.out)); } if (rarely(r)) { @@ -953,12 +956,13 @@ public abstract class LuceneTestCase extends Assert { if (r.nextBoolean()) { cms = new ConcurrentMergeScheduler(); } else { - cms = new ConcurrentMergeScheduler() { - @Override - protected synchronized boolean maybeStall(MergeSource mergeSource) { - return true; - } - }; + cms = + new ConcurrentMergeScheduler() { + @Override + protected synchronized boolean maybeStall(MergeSource mergeSource) { + return true; + } + }; } int maxThreadCount = TestUtil.nextInt(r, 1, 4); int maxMergeCount = TestUtil.nextInt(r, maxThreadCount, maxThreadCount + 4); @@ -967,7 +971,7 @@ public abstract class LuceneTestCase extends Assert { cms.disableAutoIOThrottle(); assertFalse(cms.getAutoIOThrottle()); } - cms.setForceMergeMBPerSec(10 + 10*random().nextDouble()); + cms.setForceMergeMBPerSec(10 + 10 * random().nextDouble()); c.setMergeScheduler(cms); } else { // Always use consistent settings, else CMS's dynamic (SSD or not) @@ -1003,7 +1007,7 @@ public abstract class LuceneTestCase extends Assert { if (rarely(r)) { c.setCheckPendingFlushUpdate(false); } - c.setMaxFullFlushMergeWaitMillis(rarely() ? atLeast(r, 1000) : atLeast(r, 200)); + c.setMaxFullFlushMergeWaitMillis(rarely() ? atLeast(r, 1000) : atLeast(r, 200)); return c; } @@ -1016,7 +1020,8 @@ public abstract class LuceneTestCase extends Assert { estFlushSizeBytes = Math.min(estFlushSizeBytes, iwc.getMaxBufferedDocs() * 1024); } if (iwc.getRAMBufferSizeMB() != IndexWriterConfig.DISABLE_AUTO_FLUSH) { - estFlushSizeBytes = Math.min(estFlushSizeBytes, (long) (iwc.getRAMBufferSizeMB() * 1024 * 1024)); + estFlushSizeBytes = + Math.min(estFlushSizeBytes, (long) (iwc.getRAMBufferSizeMB() * 1024 * 1024)); } assert estFlushSizeBytes > 0; @@ -1027,27 +1032,42 @@ public abstract class LuceneTestCase extends Assert { if (floorSegBytes / estFlushSizeBytes > 10) { double newValue = estFlushSizeBytes * 10.0 / 1024 / 1024; if (VERBOSE) { - System.out.println("NOTE: LuceneTestCase: changing TieredMergePolicy.floorSegmentMB from " + tmp.getFloorSegmentMB() + " to " + newValue + " to avoid pathological merging"); + System.out.println( + "NOTE: LuceneTestCase: changing TieredMergePolicy.floorSegmentMB from " + + tmp.getFloorSegmentMB() + + " to " + + newValue + + " to avoid pathological merging"); } tmp.setFloorSegmentMB(newValue); } } else if (mp instanceof LogByteSizeMergePolicy) { LogByteSizeMergePolicy lmp = (LogByteSizeMergePolicy) mp; - if ((lmp.getMinMergeMB()*1024*1024) / estFlushSizeBytes > 10) { + if ((lmp.getMinMergeMB() * 1024 * 1024) / estFlushSizeBytes > 10) { double newValue = estFlushSizeBytes * 10.0 / 1024 / 1024; if (VERBOSE) { - System.out.println("NOTE: LuceneTestCase: changing LogByteSizeMergePolicy.minMergeMB from " + lmp.getMinMergeMB() + " to " + newValue + " to avoid pathological merging"); + System.out.println( + "NOTE: LuceneTestCase: changing LogByteSizeMergePolicy.minMergeMB from " + + lmp.getMinMergeMB() + + " to " + + newValue + + " to avoid pathological merging"); } lmp.setMinMergeMB(newValue); } } else if (mp instanceof LogDocMergePolicy) { LogDocMergePolicy lmp = (LogDocMergePolicy) mp; - assert estFlushSizeBytes / 1024 < Integer.MAX_VALUE/10; + assert estFlushSizeBytes / 1024 < Integer.MAX_VALUE / 10; int estFlushDocs = Math.max(1, (int) (estFlushSizeBytes / 1024)); if (lmp.getMinMergeDocs() / estFlushDocs > 10) { int newValue = estFlushDocs * 10; if (VERBOSE) { - System.out.println("NOTE: LuceneTestCase: changing LogDocMergePolicy.minMergeDocs from " + lmp.getMinMergeDocs() + " to " + newValue + " to avoid pathological merging"); + System.out.println( + "NOTE: LuceneTestCase: changing LogDocMergePolicy.minMergeDocs from " + + lmp.getMinMergeDocs() + + " to " + + newValue + + " to avoid pathological merging"); } lmp.setMinMergeDocs(newValue); } @@ -1063,7 +1083,7 @@ public abstract class LuceneTestCase extends Assert { return new MockRandomMergePolicy(r); } else if (r.nextBoolean()) { return newTieredMergePolicy(r); - } else if (rarely(r) ) { + } else if (rarely(r)) { return newAlcoholicMergePolicy(r, classEnvRule.timeZone); } return newLogMergePolicy(r); @@ -1084,7 +1104,7 @@ public abstract class LuceneTestCase extends Assert { public static AlcoholicMergePolicy newAlcoholicMergePolicy() { return newAlcoholicMergePolicy(random(), classEnvRule.timeZone); } - + public static AlcoholicMergePolicy newAlcoholicMergePolicy(Random r, TimeZone tz) { return new AlcoholicMergePolicy(tz, new Random(r.nextLong())); } @@ -1100,14 +1120,14 @@ public abstract class LuceneTestCase extends Assert { configureRandom(r, logmp); return logmp; } - + private static void configureRandom(Random r, MergePolicy mergePolicy) { if (r.nextBoolean()) { - mergePolicy.setNoCFSRatio(0.1 + r.nextDouble()*0.8); + mergePolicy.setNoCFSRatio(0.1 + r.nextDouble() * 0.8); } else { mergePolicy.setNoCFSRatio(r.nextBoolean() ? 1.0 : 0.0); } - + if (rarely(r)) { mergePolicy.setMaxCFSSegmentSizeMB(0.2 + r.nextDouble() * 2.0); } else { @@ -1159,13 +1179,13 @@ public abstract class LuceneTestCase extends Assert { logmp.setMergeFactor(mergeFactor); return logmp; } - + // if you want it in LiveIndexWriterConfig: it must and will be tested here. public static void maybeChangeLiveIndexWriterConfig(Random r, LiveIndexWriterConfig c) { boolean didChange = false; String previous = c.toString(); - + if (rarely(r)) { // change flush parameters: // this is complicated because the api requires you "invoke setters in a magical order!" @@ -1173,19 +1193,19 @@ public abstract class LuceneTestCase extends Assert { synchronized (c) { boolean flushByRAM; switch (liveIWCFlushMode) { - case BY_RAM: - flushByRAM = true; - break; - case BY_DOCS: - flushByRAM = false; - break; - case EITHER: - flushByRAM = r.nextBoolean(); - break; - default: - throw new AssertionError(); + case BY_RAM: + flushByRAM = true; + break; + case BY_DOCS: + flushByRAM = false; + break; + case EITHER: + flushByRAM = r.nextBoolean(); + break; + default: + throw new AssertionError(); } - if (flushByRAM) { + if (flushByRAM) { c.setRAMBufferSizeMB(TestUtil.nextInt(r, 1, 10)); c.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); } else { @@ -1201,7 +1221,7 @@ public abstract class LuceneTestCase extends Assert { } didChange = true; } - + if (rarely(r)) { IndexWriter.IndexReaderWarmer curWarmer = c.getMergedSegmentWarmer(); if (curWarmer == null || curWarmer instanceof SimpleMergedSegmentWarmer) { @@ -1214,13 +1234,13 @@ public abstract class LuceneTestCase extends Assert { } didChange = true; } - + if (rarely(r)) { // change CFS flush parameters c.setUseCompoundFile(r.nextBoolean()); didChange = true; } - + if (rarely(r)) { // change CMS merge parameters MergeScheduler ms = c.getMergeScheduler(); @@ -1238,7 +1258,7 @@ public abstract class LuceneTestCase extends Assert { didChange = true; } } - + if (rarely(r)) { MergePolicy mp = c.getMergePolicy(); configureRandom(r, mp); @@ -1294,7 +1314,7 @@ public abstract class LuceneTestCase extends Assert { // but just in case of something ridiculous... diff.append(current.toString()); } - + // its possible to be empty, if we "change" a value to what it had before. if (diff.length() > 0) { System.out.println("NOTE: LuceneTestCase: randomly changed IWC's live settings:"); @@ -1304,14 +1324,12 @@ public abstract class LuceneTestCase extends Assert { } /** - * Returns a new Directory instance. Use this when the test does not - * care about the specific Directory implementation (most tests). - *

    - * The Directory is wrapped with {@link BaseDirectoryWrapper}. - * this means usually it will be picky, such as ensuring that you - * properly close it and all open files in your test. It will emulate - * some features of Windows, such as not allowing open files to be - * overwritten. + * Returns a new Directory instance. Use this when the test does not care about the specific + * Directory implementation (most tests). + * + *

    The Directory is wrapped with {@link BaseDirectoryWrapper}. this means usually it will be + * picky, such as ensuring that you properly close it and all open files in your test. It will + * emulate some features of Windows, such as not allowing open files to be overwritten. */ public static BaseDirectoryWrapper newDirectory() { return newDirectory(random()); @@ -1326,18 +1344,18 @@ public abstract class LuceneTestCase extends Assert { return newDirectory(random()); } } - + /** - * Returns a new Directory instance, using the specified random. - * See {@link #newDirectory()} for more information. + * Returns a new Directory instance, using the specified random. See {@link #newDirectory()} for + * more information. */ public static BaseDirectoryWrapper newDirectory(Random r) { return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), rarely(r), false); } /** - * Returns a new Directory instance, using the specified random. - * See {@link #newDirectory()} for more information. + * Returns a new Directory instance, using the specified random. See {@link #newDirectory()} for + * more information. */ public static BaseDirectoryWrapper newDirectory(Random r, LockFactory lf) { return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), rarely(r), false); @@ -1348,11 +1366,13 @@ public abstract class LuceneTestCase extends Assert { } public static MockDirectoryWrapper newMockDirectory(Random r) { - return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), false, false); + return (MockDirectoryWrapper) + wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), false, false); } public static MockDirectoryWrapper newMockDirectory(Random r, LockFactory lf) { - return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), false, false); + return (MockDirectoryWrapper) + wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), false, false); } public static MockDirectoryWrapper newMockFSDirectory(Path f) { @@ -1373,9 +1393,8 @@ public abstract class LuceneTestCase extends Assert { } /** - * Returns a new Directory instance, with contents copied from the - * provided directory. See {@link #newDirectory()} for more - * information. + * Returns a new Directory instance, with contents copied from the provided directory. See {@link + * #newDirectory()} for more information. */ public static BaseDirectoryWrapper newDirectory(Directory d) throws IOException { return newDirectory(random(), d); @@ -1402,7 +1421,7 @@ public abstract class LuceneTestCase extends Assert { private static BaseDirectoryWrapper newFSDirectory(Path f, LockFactory lf, boolean bare) { String fsdirClass = TEST_DIRECTORY; if (fsdirClass.equals("random")) { - fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES); + fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES); } Class clazz; @@ -1426,28 +1445,31 @@ public abstract class LuceneTestCase extends Assert { private static Directory newFileSwitchDirectory(Random random, Directory dir1, Directory dir2) { List fileExtensions = - Arrays.asList("fdt", "fdx", "tim", "tip", "si", "fnm", "pos", "dii", "dim", "nvm", "nvd", "dvm", "dvd"); + Arrays.asList( + "fdt", "fdx", "tim", "tip", "si", "fnm", "pos", "dii", "dim", "nvm", "nvd", "dvm", + "dvd"); Collections.shuffle(fileExtensions, random); fileExtensions = fileExtensions.subList(0, 1 + random.nextInt(fileExtensions.size())); return new FileSwitchDirectory(new HashSet<>(fileExtensions), dir1, dir2, true); } /** - * Returns a new Directory instance, using the specified random - * with contents copied from the provided directory. See - * {@link #newDirectory()} for more information. + * Returns a new Directory instance, using the specified random with contents copied from the + * provided directory. See {@link #newDirectory()} for more information. */ public static BaseDirectoryWrapper newDirectory(Random r, Directory d) throws IOException { Directory impl = newDirectoryImpl(r, TEST_DIRECTORY); for (String file : d.listAll()) { - if (file.startsWith(IndexFileNames.SEGMENTS) || IndexFileNames.CODEC_FILE_PATTERN.matcher(file).matches()) { + if (file.startsWith(IndexFileNames.SEGMENTS) + || IndexFileNames.CODEC_FILE_PATTERN.matcher(file).matches()) { impl.copyFrom(d, file, file, newIOContext(r)); } } return wrapDirectory(r, impl, rarely(r), false); } - - private static BaseDirectoryWrapper wrapDirectory(Random random, Directory directory, boolean bare, boolean filesystem) { + + private static BaseDirectoryWrapper wrapDirectory( + Random random, Directory directory, boolean bare, boolean filesystem) { // IOContext randomization might make NRTCachingDirectory make bad decisions, so avoid // using it if the user requested a filesystem directory. if (rarely(random) && !bare && filesystem == false) { @@ -1460,45 +1482,70 @@ public abstract class LuceneTestCase extends Assert { return base; } else { MockDirectoryWrapper mock = new MockDirectoryWrapper(random, directory); - + mock.setThrottling(TEST_THROTTLING); closeAfterSuite(new CloseableDirectory(mock, suiteFailureMarker)); return mock; } } - + public static Field newStringField(String name, String value, Store stored) { - return newField(random(), name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED); + return newField( + random(), + name, + value, + stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED); } public static Field newStringField(String name, BytesRef value, Store stored) { - return newField(random(), name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED); + return newField( + random(), + name, + value, + stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED); } public static Field newTextField(String name, String value, Store stored) { - return newField(random(), name, value, stored == Store.YES ? TextField.TYPE_STORED : TextField.TYPE_NOT_STORED); + return newField( + random(), + name, + value, + stored == Store.YES ? TextField.TYPE_STORED : TextField.TYPE_NOT_STORED); } - + public static Field newStringField(Random random, String name, String value, Store stored) { - return newField(random, name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED); + return newField( + random, + name, + value, + stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED); } public static Field newStringField(Random random, String name, BytesRef value, Store stored) { - return newField(random, name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED); + return newField( + random, + name, + value, + stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED); } - + public static Field newTextField(Random random, String name, String value, Store stored) { - return newField(random, name, value, stored == Store.YES ? TextField.TYPE_STORED : TextField.TYPE_NOT_STORED); + return newField( + random, + name, + value, + stored == Store.YES ? TextField.TYPE_STORED : TextField.TYPE_NOT_STORED); } - + public static Field newField(String name, String value, FieldType type) { return newField(random(), name, value, type); } - /** Returns a FieldType derived from newType but whose - * term vector options match the old type */ + /** Returns a FieldType derived from newType but whose term vector options match the old type */ private static FieldType mergeTermVectorOptions(FieldType newType, FieldType oldType) { - if (newType.indexOptions() != IndexOptions.NONE && oldType.storeTermVectors() == true && newType.storeTermVectors() == false) { + if (newType.indexOptions() != IndexOptions.NONE + && oldType.storeTermVectors() == true + && newType.storeTermVectors() == false) { newType = new FieldType(newType); newType.setStoreTermVectors(oldType.storeTermVectors()); newType.setStoreTermVectorPositions(oldType.storeTermVectorPositions()); @@ -1515,7 +1562,8 @@ public abstract class LuceneTestCase extends Assert { // write-once schema sort of helper class then we can // remove the sync here. We can also fold the random // "enable norms" (now commented out, below) into that: - public synchronized static Field newField(Random random, String name, Object value, FieldType type) { + public static synchronized Field newField( + Random random, String name, Object value, FieldType type) { // Defeat any consumers that illegally rely on intern'd // strings (we removed this from Lucene a while back): @@ -1549,14 +1597,14 @@ public abstract class LuceneTestCase extends Assert { newType.setStoreTermVectors(true); if (!newType.storeTermVectorPositions()) { newType.setStoreTermVectorPositions(random.nextBoolean()); - + if (newType.storeTermVectorPositions()) { if (!newType.storeTermVectorPayloads()) { newType.setStoreTermVectorPayloads(random.nextBoolean()); } } } - + if (!newType.storeTermVectorOffsets()) { newType.setStoreTermVectorOffsets(random.nextBoolean()); } @@ -1576,7 +1624,7 @@ public abstract class LuceneTestCase extends Assert { newType.setOmitNorms(random.nextBoolean()); } */ - + return createField(name, value, newType); } @@ -1589,23 +1637,27 @@ public abstract class LuceneTestCase extends Assert { throw new IllegalArgumentException("value must be String or BytesRef"); } } - - private static final String[] availableLanguageTags = Arrays.stream(Locale.getAvailableLocales()) - .map(Locale::toLanguageTag) - .sorted() - .distinct() - .toArray(String[]::new); - /** + private static final String[] availableLanguageTags = + Arrays.stream(Locale.getAvailableLocales()) + .map(Locale::toLanguageTag) + .sorted() + .distinct() + .toArray(String[]::new); + + /** * Return a random Locale from the available locales on the system. + * * @see LUCENE-4020 */ public static Locale randomLocale(Random random) { - return localeForLanguageTag(availableLanguageTags[random.nextInt(availableLanguageTags.length)]); + return localeForLanguageTag( + availableLanguageTags[random.nextInt(availableLanguageTags.length)]); } - /** + /** * Return a random TimeZone from the available timezones on the system + * * @see LUCENE-4020 */ public static TimeZone randomTimeZone(Random random) { @@ -1618,7 +1670,8 @@ public abstract class LuceneTestCase extends Assert { return new Locale.Builder().setLanguageTag(languageTag).build(); } - private static Directory newFSDirectoryImpl(Class clazz, Path path, LockFactory lf) throws IOException { + private static Directory newFSDirectoryImpl( + Class clazz, Path path, LockFactory lf) throws IOException { FSDirectory d = null; try { d = CommandLineUtil.newFSDirectory(clazz, path, lf); @@ -1631,19 +1684,22 @@ public abstract class LuceneTestCase extends Assert { static Directory newDirectoryImpl(Random random, String clazzName) { return newDirectoryImpl(random, clazzName, FSLockFactory.getDefault()); } - + static Directory newDirectoryImpl(Random random, String clazzName, LockFactory lf) { if (clazzName.equals("random")) { if (rarely(random)) { clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES); } else if (rarely(random)) { - String clazzName1 = rarely(random) - ? RandomPicks.randomFrom(random, CORE_DIRECTORIES) - : ByteBuffersDirectory.class.getName(); - String clazzName2 = rarely(random) - ? RandomPicks.randomFrom(random, CORE_DIRECTORIES) - : ByteBuffersDirectory.class.getName(); - return newFileSwitchDirectory(random, + String clazzName1 = + rarely(random) + ? RandomPicks.randomFrom(random, CORE_DIRECTORIES) + : ByteBuffersDirectory.class.getName(); + String clazzName2 = + rarely(random) + ? RandomPicks.randomFrom(random, CORE_DIRECTORIES) + : ByteBuffersDirectory.class.getName(); + return newFileSwitchDirectory( + random, newDirectoryImpl(random, clazzName1, lf), newDirectoryImpl(random, clazzName2, lf)); } else { @@ -1662,14 +1718,16 @@ public abstract class LuceneTestCase extends Assert { // See if it has a Path/LockFactory ctor even though it's not an // FSDir subclass: try { - Constructor pathCtor = clazz.getConstructor(Path.class, LockFactory.class); + Constructor pathCtor = + clazz.getConstructor(Path.class, LockFactory.class); final Path dir = createTempDir("index"); return pathCtor.newInstance(dir, lf); } catch (NoSuchMethodException nsme) { // Ignore } - - // the remaining dirs are no longer filesystem based, so we must check that the passedLockFactory is not file based: + + // the remaining dirs are no longer filesystem based, so we must check that the + // passedLockFactory is not file based: if (!(lf instanceof FSLockFactory)) { // try ctor with only LockFactory try { @@ -1689,96 +1747,109 @@ public abstract class LuceneTestCase extends Assert { public static IndexReader wrapReader(IndexReader r) throws IOException { Random random = random(); - - for (int i = 0, c = random.nextInt(6)+1; i < c; i++) { - switch(random.nextInt(5)) { - case 0: - // will create no FC insanity in atomic case, as ParallelLeafReader has own cache key: - if (VERBOSE) { - System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with ParallelLeaf/CompositeReader"); - } - r = (r instanceof LeafReader) ? - new ParallelLeafReader((LeafReader) r) : - new ParallelCompositeReader((CompositeReader) r); - break; - case 1: - if (r instanceof LeafReader) { - final LeafReader ar = (LeafReader) r; - final List allFields = new ArrayList<>(); - for (FieldInfo fi : ar.getFieldInfos()) { - allFields.add(fi.name); - } - Collections.shuffle(allFields, random); - final int end = allFields.isEmpty() ? 0 : random.nextInt(allFields.size()); - final Set fields = new HashSet<>(allFields.subList(0, end)); - // will create no FC insanity as ParallelLeafReader has own cache key: + + for (int i = 0, c = random.nextInt(6) + 1; i < c; i++) { + switch (random.nextInt(5)) { + case 0: + // will create no FC insanity in atomic case, as ParallelLeafReader has own cache key: if (VERBOSE) { - System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with ParallelLeafReader"); + System.out.println( + "NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + + r + + " with ParallelLeaf/CompositeReader"); } - r = new ParallelLeafReader( - new FieldFilterLeafReader(ar, fields, false), - new FieldFilterLeafReader(ar, fields, true) - ); - } - break; - case 2: - // Häckidy-Hick-Hack: a standard Reader will cause FC insanity, so we use - // QueryUtils' reader with a fake cache key, so insanity checker cannot walk - // along our reader: - if (VERBOSE) { - System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with AssertingLeaf/DirectoryReader"); - } - if (r instanceof LeafReader) { - r = new AssertingLeafReader((LeafReader)r); - } else if (r instanceof DirectoryReader) { - r = new AssertingDirectoryReader((DirectoryReader)r); - } - break; - case 3: - if (VERBOSE) { - System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with MismatchedLeaf/DirectoryReader"); - } - if (r instanceof LeafReader) { - r = new MismatchedLeafReader((LeafReader)r, random); - } else if (r instanceof DirectoryReader) { - r = new MismatchedDirectoryReader((DirectoryReader)r, random); - } - break; - case 4: - if (VERBOSE) { - System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with MergingCodecReader"); - } - if (r instanceof CodecReader) { - r = new MergingCodecReader((CodecReader) r); - } else if (r instanceof DirectoryReader) { - boolean allLeavesAreCodecReaders = true; - for (LeafReaderContext ctx : r.leaves()) { - if (ctx.reader() instanceof CodecReader == false) { - allLeavesAreCodecReaders = false; - break; + r = + (r instanceof LeafReader) + ? new ParallelLeafReader((LeafReader) r) + : new ParallelCompositeReader((CompositeReader) r); + break; + case 1: + if (r instanceof LeafReader) { + final LeafReader ar = (LeafReader) r; + final List allFields = new ArrayList<>(); + for (FieldInfo fi : ar.getFieldInfos()) { + allFields.add(fi.name); + } + Collections.shuffle(allFields, random); + final int end = allFields.isEmpty() ? 0 : random.nextInt(allFields.size()); + final Set fields = new HashSet<>(allFields.subList(0, end)); + // will create no FC insanity as ParallelLeafReader has own cache key: + if (VERBOSE) { + System.out.println( + "NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + + r + + " with ParallelLeafReader"); + } + r = + new ParallelLeafReader( + new FieldFilterLeafReader(ar, fields, false), + new FieldFilterLeafReader(ar, fields, true)); + } + break; + case 2: + // Häckidy-Hick-Hack: a standard Reader will cause FC insanity, so we use + // QueryUtils' reader with a fake cache key, so insanity checker cannot walk + // along our reader: + if (VERBOSE) { + System.out.println( + "NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + + r + + " with AssertingLeaf/DirectoryReader"); + } + if (r instanceof LeafReader) { + r = new AssertingLeafReader((LeafReader) r); + } else if (r instanceof DirectoryReader) { + r = new AssertingDirectoryReader((DirectoryReader) r); + } + break; + case 3: + if (VERBOSE) { + System.out.println( + "NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + + r + + " with MismatchedLeaf/DirectoryReader"); + } + if (r instanceof LeafReader) { + r = new MismatchedLeafReader((LeafReader) r, random); + } else if (r instanceof DirectoryReader) { + r = new MismatchedDirectoryReader((DirectoryReader) r, random); + } + break; + case 4: + if (VERBOSE) { + System.out.println( + "NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + + r + + " with MergingCodecReader"); + } + if (r instanceof CodecReader) { + r = new MergingCodecReader((CodecReader) r); + } else if (r instanceof DirectoryReader) { + boolean allLeavesAreCodecReaders = true; + for (LeafReaderContext ctx : r.leaves()) { + if (ctx.reader() instanceof CodecReader == false) { + allLeavesAreCodecReaders = false; + break; + } + } + if (allLeavesAreCodecReaders) { + r = new MergingDirectoryReaderWrapper((DirectoryReader) r); } } - if (allLeavesAreCodecReaders) { - r = new MergingDirectoryReaderWrapper((DirectoryReader) r); - } - } - break; - default: - fail("should not get here"); + break; + default: + fail("should not get here"); } } if (VERBOSE) { - System.out.println("wrapReader wrapped: " +r); + System.out.println("wrapReader wrapped: " + r); } return r; } - - /** - * Sometimes wrap the IndexReader as slow, parallel or filter reader (or - * combinations of that) - */ + + /** Sometimes wrap the IndexReader as slow, parallel or filter reader (or combinations of that) */ public static IndexReader maybeWrapReader(IndexReader r) throws IOException { if (rarely()) { r = wrapReader(r); @@ -1798,39 +1869,46 @@ public abstract class LuceneTestCase extends Assert { if (oldContext.flushInfo != null) { // Always return at least the estimatedSegmentSize of // the incoming IOContext: - return new IOContext(new FlushInfo(randomNumDocs, Math.max(oldContext.flushInfo.estimatedSegmentSize, size))); + return new IOContext( + new FlushInfo(randomNumDocs, Math.max(oldContext.flushInfo.estimatedSegmentSize, size))); } else if (oldContext.mergeInfo != null) { // Always return at least the estimatedMergeBytes of // the incoming IOContext: - return new IOContext(new MergeInfo(randomNumDocs, Math.max(oldContext.mergeInfo.estimatedMergeBytes, size), random.nextBoolean(), TestUtil.nextInt(random, 1, 100))); + return new IOContext( + new MergeInfo( + randomNumDocs, + Math.max(oldContext.mergeInfo.estimatedMergeBytes, size), + random.nextBoolean(), + TestUtil.nextInt(random, 1, 100))); } else { // Make a totally random IOContext: final IOContext context; switch (random.nextInt(5)) { - case 0: - context = IOContext.DEFAULT; - break; - case 1: - context = IOContext.READ; - break; - case 2: - context = IOContext.READONCE; - break; - case 3: - context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1)); - break; - case 4: - context = new IOContext(new FlushInfo(randomNumDocs, size)); - break; - default: - context = IOContext.DEFAULT; + case 0: + context = IOContext.DEFAULT; + break; + case 1: + context = IOContext.READ; + break; + case 2: + context = IOContext.READONCE; + break; + case 3: + context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1)); + break; + case 4: + context = new IOContext(new FlushInfo(randomNumDocs, size)); + break; + default: + context = IOContext.DEFAULT; } return context; } } private static final QueryCache DEFAULT_QUERY_CACHE = IndexSearcher.getDefaultQueryCache(); - private static final QueryCachingPolicy DEFAULT_CACHING_POLICY = IndexSearcher.getDefaultQueryCachingPolicy(); + private static final QueryCachingPolicy DEFAULT_CACHING_POLICY = + IndexSearcher.getDefaultQueryCachingPolicy(); @Before public void overrideTestDefaultQueryCache() { @@ -1842,7 +1920,8 @@ public abstract class LuceneTestCase extends Assert { public static void overrideDefaultQueryCache() { // we need to reset the query cache in an @BeforeClass so that tests that // instantiate an IndexSearcher in an @BeforeClass method use a fresh new cache - IndexSearcher.setDefaultQueryCache(new LRUQueryCache(10000, 1 << 25, context -> true, Float.POSITIVE_INFINITY)); + IndexSearcher.setDefaultQueryCache( + new LRUQueryCache(10000, 1 << 25, context -> true, Float.POSITIVE_INFINITY)); IndexSearcher.setDefaultQueryCachingPolicy(MAYBE_CACHE_POLICY); } @@ -1857,7 +1936,8 @@ public abstract class LuceneTestCase extends Assert { // Randomize core count so CMS varies its dynamic defaults, and this also "fixes" core // count from the master seed so it will always be the same on reproduce: int numCores = TestUtil.nextInt(random(), 1, 4); - System.setProperty(ConcurrentMergeScheduler.DEFAULT_CPU_CORE_COUNT_PROPERTY, Integer.toString(numCores)); + System.setProperty( + ConcurrentMergeScheduler.DEFAULT_CPU_CORE_COUNT_PROPERTY, Integer.toString(numCores)); } @AfterClass @@ -1865,30 +1945,24 @@ public abstract class LuceneTestCase extends Assert { System.clearProperty(ConcurrentMergeScheduler.DEFAULT_CPU_CORE_COUNT_PROPERTY); } - /** - * Create a new searcher over the reader. This searcher might randomly use - * threads. - */ + /** Create a new searcher over the reader. This searcher might randomly use threads. */ public static IndexSearcher newSearcher(IndexReader r) { return newSearcher(r, true); } - /** - * Create a new searcher over the reader. This searcher might randomly use - * threads. - */ + /** Create a new searcher over the reader. This searcher might randomly use threads. */ public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) { return newSearcher(r, maybeWrap, true); } /** - * Create a new searcher over the reader. This searcher might randomly use - * threads. if maybeWrap is true, this searcher might wrap the - * reader with one that returns null for getSequentialSubReaders. If - * wrapWithAssertions is true, this searcher might be an + * Create a new searcher over the reader. This searcher might randomly use threads. if + * maybeWrap is true, this searcher might wrap the reader with one that returns null for + * getSequentialSubReaders. If wrapWithAssertions is true, this searcher might be an * {@link AssertingIndexSearcher} instance. */ - public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap, boolean wrapWithAssertions) { + public static IndexSearcher newSearcher( + IndexReader r, boolean maybeWrap, boolean wrapWithAssertions) { Random random = random(); if (usually()) { if (maybeWrap) { @@ -1898,8 +1972,9 @@ public abstract class LuceneTestCase extends Assert { Rethrow.rethrow(e); } } - // TODO: this whole check is a coverage hack, we should move it to tests for various filterreaders. - // ultimately whatever you do will be checkIndex'd at the end anyway. + // TODO: this whole check is a coverage hack, we should move it to tests for various + // filterreaders. + // ultimately whatever you do will be checkIndex'd at the end anyway. if (random.nextInt(500) == 0 && r instanceof LeafReader) { // TODO: not useful to check DirectoryReader (redundant with checkindex) // but maybe sometimes run this on the other crazy readers maybeWrapReader creates? @@ -1911,7 +1986,10 @@ public abstract class LuceneTestCase extends Assert { } final IndexSearcher ret; if (wrapWithAssertions) { - ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getContext()); + ret = + random.nextBoolean() + ? new AssertingIndexSearcher(random, r) + : new AssertingIndexSearcher(random, r.getContext()); } else { ret = random.nextBoolean() ? new IndexSearcher(r) : new IndexSearcher(r.getContext()); } @@ -1924,36 +2002,44 @@ public abstract class LuceneTestCase extends Assert { ex = null; } else { threads = TestUtil.nextInt(random, 1, 8); - ex = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), - new NamedThreadFactory("LuceneTestCase")); + ex = + new ThreadPoolExecutor( + threads, + threads, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), + new NamedThreadFactory("LuceneTestCase")); // uncomment to intensify LUCENE-3840 // ex.prestartAllCoreThreads(); } if (ex != null) { - if (VERBOSE) { - System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads"); - } - r.getReaderCacheHelper().addClosedListener(cacheKey -> TestUtil.shutdownExecutorService(ex)); + if (VERBOSE) { + System.out.println( + "NOTE: newSearcher using ExecutorService with " + threads + " threads"); + } + r.getReaderCacheHelper() + .addClosedListener(cacheKey -> TestUtil.shutdownExecutorService(ex)); } IndexSearcher ret; if (wrapWithAssertions) { - ret = random.nextBoolean() - ? new AssertingIndexSearcher(random, r, ex) - : new AssertingIndexSearcher(random, r.getContext(), ex); + ret = + random.nextBoolean() + ? new AssertingIndexSearcher(random, r, ex) + : new AssertingIndexSearcher(random, r.getContext(), ex); } else if (random.nextBoolean()) { int maxDocPerSlice = 1 + random.nextInt(100000); int maxSegmentsPerSlice = 1 + random.nextInt(20); - ret = new IndexSearcher(r, ex) { - @Override - protected LeafSlice[] slices(List leaves) { - return slices(leaves, maxDocPerSlice, maxSegmentsPerSlice); - } - }; + ret = + new IndexSearcher(r, ex) { + @Override + protected LeafSlice[] slices(List leaves) { + return slices(leaves, maxDocPerSlice, maxSegmentsPerSlice); + } + }; } else { - ret = random.nextBoolean() - ? new IndexSearcher(r, ex) - : new IndexSearcher(r.getContext(), ex); + ret = + random.nextBoolean() ? new IndexSearcher(r, ex) : new IndexSearcher(r.getContext(), ex); } ret.setSimilarity(classEnvRule.similarity); ret.setQueryCachingPolicy(MAYBE_CACHE_POLICY); @@ -1962,9 +2048,8 @@ public abstract class LuceneTestCase extends Assert { } /** - * Gets a resource from the test's classpath as {@link Path}. This method should only - * be used, if a real file is needed. To get a stream, code should prefer - * {@link #getDataInputStream(String)}. + * Gets a resource from the test's classpath as {@link Path}. This method should only be used, if + * a real file is needed. To get a stream, code should prefer {@link #getDataInputStream(String)}. */ protected Path getDataPath(String name) throws IOException { try { @@ -1974,9 +2059,7 @@ public abstract class LuceneTestCase extends Assert { } } - /** - * Gets a resource from the test's classpath as {@link InputStream}. - */ + /** Gets a resource from the test's classpath as {@link InputStream}. */ protected InputStream getDataInputStream(String name) throws IOException { InputStream in = this.getClass().getResourceAsStream(name); if (in == null) { @@ -1985,7 +2068,8 @@ public abstract class LuceneTestCase extends Assert { return in; } - public void assertReaderEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { + public void assertReaderEquals(String info, IndexReader leftReader, IndexReader rightReader) + throws IOException { assertReaderStatisticsEquals(info, leftReader, rightReader); assertTermsEquals(info, leftReader, rightReader, true); assertNormsEquals(info, leftReader, rightReader); @@ -1997,10 +2081,9 @@ public abstract class LuceneTestCase extends Assert { assertPointsEquals(info, leftReader, rightReader); } - /** - * checks that reader-level statistics are the same - */ - public void assertReaderStatisticsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { + /** checks that reader-level statistics are the same */ + public void assertReaderStatisticsEquals( + String info, IndexReader leftReader, IndexReader rightReader) throws IOException { // Somewhat redundant: we never delete docs assertEquals(info, leftReader.maxDoc(), rightReader.maxDoc()); assertEquals(info, leftReader.numDocs(), rightReader.numDocs()); @@ -2008,23 +2091,28 @@ public abstract class LuceneTestCase extends Assert { assertEquals(info, leftReader.hasDeletions(), rightReader.hasDeletions()); } - /** - * Fields api equivalency - */ - public void assertTermsEquals(String info, IndexReader leftReader, IndexReader rightReader, boolean deep) throws IOException { + /** Fields api equivalency */ + public void assertTermsEquals( + String info, IndexReader leftReader, IndexReader rightReader, boolean deep) + throws IOException { Set leftFields = new HashSet<>(FieldInfos.getIndexedFields(leftReader)); Set rightFields = new HashSet<>(FieldInfos.getIndexedFields(rightReader)); assertEquals(info, leftFields, rightFields); for (String field : leftFields) { - assertTermsEquals(info, leftReader, MultiTerms.getTerms(leftReader, field), MultiTerms.getTerms(rightReader, field), deep); + assertTermsEquals( + info, + leftReader, + MultiTerms.getTerms(leftReader, field), + MultiTerms.getTerms(rightReader, field), + deep); } } - /** - * Terms api equivalency - */ - public void assertTermsEquals(String info, IndexReader leftReader, Terms leftTerms, Terms rightTerms, boolean deep) throws IOException { + /** Terms api equivalency */ + public void assertTermsEquals( + String info, IndexReader leftReader, Terms leftTerms, Terms rightTerms, boolean deep) + throws IOException { if (leftTerms == null || rightTerms == null) { assertNull(info, leftTerms); assertNull(info, rightTerms); @@ -2038,14 +2126,15 @@ public abstract class LuceneTestCase extends Assert { TermsEnum leftTermsEnum = leftTerms.iterator(); TermsEnum rightTermsEnum = rightTerms.iterator(); assertTermsEnumEquals(info, leftReader, leftTermsEnum, rightTermsEnum, true); - + assertTermsSeekingEquals(info, leftTerms, rightTerms); - + if (deep) { int numIntersections = atLeast(3); for (int i = 0; i < numIntersections; i++) { String re = AutomatonTestUtil.randomRegexp(random()); - CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton()); + CompiledAutomaton automaton = + new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton()); if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) { // TODO: test start term too TermsEnum leftIntersection = leftTerms.intersect(automaton, null); @@ -2056,10 +2145,9 @@ public abstract class LuceneTestCase extends Assert { } } - /** - * checks collection-level statistics on Terms - */ - public void assertTermsStatisticsEquals(String info, Terms leftTerms, Terms rightTerms) throws IOException { + /** checks collection-level statistics on Terms */ + public void assertTermsStatisticsEquals(String info, Terms leftTerms, Terms rightTerms) + throws IOException { assertEquals(info, leftTerms.getDocCount(), rightTerms.getDocCount()); assertEquals(info, leftTerms.getSumDocFreq(), rightTerms.getSumDocFreq()); assertEquals(info, leftTerms.getSumTotalTermFreq(), rightTerms.getSumTotalTermFreq()); @@ -2070,16 +2158,16 @@ public abstract class LuceneTestCase extends Assert { private static class RandomBits implements Bits { FixedBitSet bits; - + RandomBits(int maxDoc, double pctLive, Random random) { bits = new FixedBitSet(maxDoc); for (int i = 0; i < maxDoc; i++) { - if (random.nextDouble() <= pctLive) { + if (random.nextDouble() <= pctLive) { bits.set(i); } } } - + @Override public boolean get(int index) { return bits.get(index); @@ -2091,49 +2179,67 @@ public abstract class LuceneTestCase extends Assert { } } - /** - * checks the terms enum sequentially - * if deep is false, it does a 'shallow' test that doesnt go down to the docsenums + /** + * checks the terms enum sequentially if deep is false, it does a 'shallow' test that doesnt go + * down to the docsenums */ - public void assertTermsEnumEquals(String info, IndexReader leftReader, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws IOException { + public void assertTermsEnumEquals( + String info, + IndexReader leftReader, + TermsEnum leftTermsEnum, + TermsEnum rightTermsEnum, + boolean deep) + throws IOException { BytesRef term; PostingsEnum leftPositions = null; PostingsEnum rightPositions = null; PostingsEnum leftDocs = null; PostingsEnum rightDocs = null; - + while ((term = leftTermsEnum.next()) != null) { assertEquals(info, term, rightTermsEnum.next()); assertTermStatsEquals(info, leftTermsEnum, rightTermsEnum); if (deep) { - assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.ALL), - rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.ALL)); - - assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), - leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.ALL), - rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.ALL)); + assertDocsAndPositionsEnumEquals( + info, + leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.ALL), + rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.ALL)); + assertPositionsSkippingEquals( + info, + leftReader, + leftTermsEnum.docFreq(), + leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.ALL), + rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.ALL)); // with freqs: - assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(leftDocs), - rightDocs = rightTermsEnum.postings(rightDocs), - true); - - - // w/o freqs: - assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(leftDocs, PostingsEnum.NONE), - rightDocs = rightTermsEnum.postings(rightDocs, PostingsEnum.NONE), - false); - - - // with freqs: - assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), + assertDocsEnumEquals( + info, leftDocs = leftTermsEnum.postings(leftDocs), rightDocs = rightTermsEnum.postings(rightDocs), true); // w/o freqs: - assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), + assertDocsEnumEquals( + info, + leftDocs = leftTermsEnum.postings(leftDocs, PostingsEnum.NONE), + rightDocs = rightTermsEnum.postings(rightDocs, PostingsEnum.NONE), + false); + + // with freqs: + assertDocsSkippingEquals( + info, + leftReader, + leftTermsEnum.docFreq(), + leftDocs = leftTermsEnum.postings(leftDocs), + rightDocs = rightTermsEnum.postings(rightDocs), + true); + + // w/o freqs: + assertDocsSkippingEquals( + info, + leftReader, + leftTermsEnum.docFreq(), leftDocs = leftTermsEnum.postings(leftDocs, PostingsEnum.NONE), rightDocs = rightTermsEnum.postings(rightDocs, PostingsEnum.NONE), false); @@ -2142,11 +2248,9 @@ public abstract class LuceneTestCase extends Assert { assertNull(info, rightTermsEnum.next()); } - - /** - * checks docs + freqs + positions + payloads, sequentially - */ - public void assertDocsAndPositionsEnumEquals(String info, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException { + /** checks docs + freqs + positions + payloads, sequentially */ + public void assertDocsAndPositionsEnumEquals( + String info, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException { assertNotNull(leftDocs); assertNotNull(rightDocs); assertEquals(info, -1, leftDocs.docID()); @@ -2165,11 +2269,11 @@ public abstract class LuceneTestCase extends Assert { } assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc()); } - - /** - * checks docs + freqs, sequentially - */ - public void assertDocsEnumEquals(String info, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) throws IOException { + + /** checks docs + freqs, sequentially */ + public void assertDocsEnumEquals( + String info, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) + throws IOException { if (leftDocs == null) { assertNull(rightDocs); return; @@ -2185,17 +2289,22 @@ public abstract class LuceneTestCase extends Assert { } assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc()); } - - /** - * checks advancing docs - */ - public void assertDocsSkippingEquals(String info, IndexReader leftReader, int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) throws IOException { + + /** checks advancing docs */ + public void assertDocsSkippingEquals( + String info, + IndexReader leftReader, + int docFreq, + PostingsEnum leftDocs, + PostingsEnum rightDocs, + boolean hasFreqs) + throws IOException { if (leftDocs == null) { assertNull(rightDocs); return; } int docid = -1; - int averageGap = leftReader.maxDoc() / (1+docFreq); + int averageGap = leftReader.maxDoc() / (1 + docFreq); int skipInterval = 16; while (true) { @@ -2205,11 +2314,12 @@ public abstract class LuceneTestCase extends Assert { assertEquals(info, docid, rightDocs.nextDoc()); } else { // advance() - int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap)); + int skip = + docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap)); docid = leftDocs.advance(skip); assertEquals(info, docid, rightDocs.advance(skip)); } - + if (docid == DocIdSetIterator.NO_MORE_DOCS) { return; } @@ -2218,19 +2328,23 @@ public abstract class LuceneTestCase extends Assert { } } } - - /** - * checks advancing docs + positions - */ - public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException { + + /** checks advancing docs + positions */ + public void assertPositionsSkippingEquals( + String info, + IndexReader leftReader, + int docFreq, + PostingsEnum leftDocs, + PostingsEnum rightDocs) + throws IOException { if (leftDocs == null || rightDocs == null) { assertNull(leftDocs); assertNull(rightDocs); return; } - + int docid = -1; - int averageGap = leftReader.maxDoc() / (1+docFreq); + int averageGap = leftReader.maxDoc() / (1 + docFreq); int skipInterval = 16; while (true) { @@ -2240,11 +2354,12 @@ public abstract class LuceneTestCase extends Assert { assertEquals(info, docid, rightDocs.nextDoc()); } else { // advance() - int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap)); + int skip = + docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap)); docid = leftDocs.advance(skip); assertEquals(info, docid, rightDocs.advance(skip)); } - + if (docid == DocIdSetIterator.NO_MORE_DOCS) { return; } @@ -2257,8 +2372,8 @@ public abstract class LuceneTestCase extends Assert { } } - - private void assertTermsSeekingEquals(String info, Terms leftTerms, Terms rightTerms) throws IOException { + private void assertTermsSeekingEquals(String info, Terms leftTerms, Terms rightTerms) + throws IOException { // just an upper bound int numTests = atLeast(20); @@ -2286,7 +2401,7 @@ public abstract class LuceneTestCase extends Assert { } } else if (code == 2) { // term, but ensure a non-zero offset - byte newbytes[] = new byte[term.length+5]; + byte newbytes[] = new byte[term.length + 5]; System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length); tests.add(new BytesRef(newbytes, 5, term.length)); } else if (code == 3) { @@ -2335,23 +2450,21 @@ public abstract class LuceneTestCase extends Assert { } } } - - /** - * checks term-level statistics - */ - public void assertTermStatsEquals(String info, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum) throws IOException { + + /** checks term-level statistics */ + public void assertTermStatsEquals(String info, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum) + throws IOException { assertEquals(info, leftTermsEnum.docFreq(), rightTermsEnum.docFreq()); assertEquals(info, leftTermsEnum.totalTermFreq(), rightTermsEnum.totalTermFreq()); } - - /** - * checks that norms are the same across all fields - */ - public void assertNormsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { + + /** checks that norms are the same across all fields */ + public void assertNormsEquals(String info, IndexReader leftReader, IndexReader rightReader) + throws IOException { Set leftFields = new HashSet<>(FieldInfos.getIndexedFields(leftReader)); Set rightFields = new HashSet<>(FieldInfos.getIndexedFields(rightReader)); assertEquals(info, leftFields, rightFields); - + for (String field : leftFields) { NumericDocValues leftNorms = MultiDocValues.getNormValues(leftReader, field); NumericDocValues rightNorms = MultiDocValues.getNormValues(rightReader, field); @@ -2363,27 +2476,27 @@ public abstract class LuceneTestCase extends Assert { } } } - - /** - * checks that stored fields of all documents are the same - */ - public void assertStoredFieldsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { + + /** checks that stored fields of all documents are the same */ + public void assertStoredFieldsEquals(String info, IndexReader leftReader, IndexReader rightReader) + throws IOException { assert leftReader.maxDoc() == rightReader.maxDoc(); for (int i = 0; i < leftReader.maxDoc(); i++) { Document leftDoc = leftReader.document(i); Document rightDoc = rightReader.document(i); - + // TODO: I think this is bogus because we don't document what the order should be // from these iterators, etc. I think the codec/IndexReader should be free to order this stuff // in whatever way it wants (e.g. maybe it packs related fields together or something) // To fix this, we sort the fields in both documents by name, but // we still assume that all instances with same name are in order: - Comparator comp = new Comparator() { - @Override - public int compare(IndexableField arg0, IndexableField arg1) { - return arg0.name().compareTo(arg1.name()); - } - }; + Comparator comp = + new Comparator() { + @Override + public int compare(IndexableField arg0, IndexableField arg1) { + return arg0.name().compareTo(arg1.name()); + } + }; List leftFields = new ArrayList<>(leftDoc.getFields()); List rightFields = new ArrayList<>(rightDoc.getFields()); Collections.sort(leftFields, comp); @@ -2398,22 +2511,20 @@ public abstract class LuceneTestCase extends Assert { assertFalse(info, rightIterator.hasNext()); } } - - /** - * checks that two stored fields are equivalent - */ - public void assertStoredFieldEquals(String info, IndexableField leftField, IndexableField rightField) { + + /** checks that two stored fields are equivalent */ + public void assertStoredFieldEquals( + String info, IndexableField leftField, IndexableField rightField) { assertEquals(info, leftField.name(), rightField.name()); assertEquals(info, leftField.binaryValue(), rightField.binaryValue()); assertEquals(info, leftField.stringValue(), rightField.stringValue()); assertEquals(info, leftField.numericValue(), rightField.numericValue()); // TODO: should we check the FT at all? } - - /** - * checks that term vectors across all fields are equivalent - */ - public void assertTermVectorsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { + + /** checks that term vectors across all fields are equivalent */ + public void assertTermVectorsEquals(String info, IndexReader leftReader, IndexReader rightReader) + throws IOException { assert leftReader.maxDoc() == rightReader.maxDoc(); for (int i = 0; i < leftReader.maxDoc(); i++) { Fields leftFields = leftReader.getTermVectors(i); @@ -2435,7 +2546,8 @@ public abstract class LuceneTestCase extends Assert { while (leftEnum.hasNext()) { String field = leftEnum.next(); assertEquals(info, field, rightEnum.next()); - assertTermsEquals(info, leftReader, leftFields.terms(field), rightFields.terms(field), rarely()); + assertTermsEquals( + info, leftReader, leftFields.terms(field), rightFields.terms(field), rarely()); } assertFalse(rightEnum.hasNext()); } @@ -2443,7 +2555,7 @@ public abstract class LuceneTestCase extends Assert { private static Set getDVFields(IndexReader reader) { Set fields = new HashSet<>(); - for(FieldInfo fi : FieldInfos.getMergedFieldInfos(reader)) { + for (FieldInfo fi : FieldInfos.getMergedFieldInfos(reader)) { if (fi.getDocValuesType() != DocValuesType.NONE) { fields.add(fi.name); } @@ -2451,11 +2563,10 @@ public abstract class LuceneTestCase extends Assert { return fields; } - - /** - * checks that docvalues across all fields are equivalent - */ - public void assertDocValuesEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { + + /** checks that docvalues across all fields are equivalent */ + public void assertDocValuesEquals(String info, IndexReader leftReader, IndexReader rightReader) + throws IOException { Set leftFields = getDVFields(leftReader); Set rightFields = getDVFields(rightReader); assertEquals(info, leftFields, rightFields); @@ -2468,8 +2579,12 @@ public abstract class LuceneTestCase extends Assert { if (leftValues != null && rightValues != null) { assertDocValuesEquals(info, leftReader.maxDoc(), leftValues, rightValues); } else { - assertTrue(info + ": left numeric doc values for field=\"" + field + "\" are not null", leftValues == null || leftValues.nextDoc() == NO_MORE_DOCS); - assertTrue(info + ": right numeric doc values for field=\"" + field + "\" are not null", rightValues == null || rightValues.nextDoc() == NO_MORE_DOCS); + assertTrue( + info + ": left numeric doc values for field=\"" + field + "\" are not null", + leftValues == null || leftValues.nextDoc() == NO_MORE_DOCS); + assertTrue( + info + ": right numeric doc values for field=\"" + field + "\" are not null", + rightValues == null || rightValues.nextDoc() == NO_MORE_DOCS); } } @@ -2490,7 +2605,7 @@ public abstract class LuceneTestCase extends Assert { assertTrue(info, rightValues == null || rightValues.nextDoc() == NO_MORE_DOCS); } } - + { SortedDocValues leftValues = MultiDocValues.getSortedValues(leftReader, field); SortedDocValues rightValues = MultiDocValues.getSortedValues(rightReader, field); @@ -2504,7 +2619,7 @@ public abstract class LuceneTestCase extends Assert { assertEquals(info, left, right); } // bytes - for(int docID=0;docID left = new TreeSet<>(); TreeSet right = new TreeSet<>(); - + for (FieldInfo fi : leftInfos) { left.add(fi.name); } - + for (FieldInfo fi : rightInfos) { right.add(fi.name); } - + assertEquals(info, left, right); } - // naive silly memory heavy uninversion!! maps docID -> packed values (a Set because a given doc can be multi-valued) - private Map> uninvert(String fieldName, IndexReader reader) throws IOException { - final Map> docValues = new HashMap<>(); - for(LeafReaderContext ctx : reader.leaves()) { + // naive silly memory heavy uninversion!! maps docID -> packed values (a Set because a given doc + // can be multi-valued) + private Map> uninvert(String fieldName, IndexReader reader) + throws IOException { + final Map> docValues = new HashMap<>(); + for (LeafReaderContext ctx : reader.leaves()) { PointValues points = ctx.reader().getPointValues(fieldName); if (points == null) { @@ -2634,59 +2757,69 @@ public abstract class LuceneTestCase extends Assert { } points.intersect( - new PointValues.IntersectVisitor() { - @Override - public void visit(int docID) { - throw new UnsupportedOperationException(); - } + new PointValues.IntersectVisitor() { + @Override + public void visit(int docID) { + throw new UnsupportedOperationException(); + } - @Override - public void visit(int docID, byte[] packedValue) throws IOException { - int topDocID = ctx.docBase + docID; - if (docValues.containsKey(topDocID) == false) { - docValues.put(topDocID, new HashSet()); - } - docValues.get(topDocID).add(new BytesRef(packedValue.clone())); - } + @Override + public void visit(int docID, byte[] packedValue) throws IOException { + int topDocID = ctx.docBase + docID; + if (docValues.containsKey(topDocID) == false) { + docValues.put(topDocID, new HashSet()); + } + docValues.get(topDocID).add(new BytesRef(packedValue.clone())); + } - @Override - public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { - // We pretend our query shape is so hairy that it crosses every single cell: - return PointValues.Relation.CELL_CROSSES_QUERY; - } - }); + @Override + public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { + // We pretend our query shape is so hairy that it crosses every single cell: + return PointValues.Relation.CELL_CROSSES_QUERY; + } + }); } return docValues; } - public void assertPointsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { + public void assertPointsEquals(String info, IndexReader leftReader, IndexReader rightReader) + throws IOException { FieldInfos fieldInfos1 = FieldInfos.getMergedFieldInfos(leftReader); FieldInfos fieldInfos2 = FieldInfos.getMergedFieldInfos(rightReader); - for(FieldInfo fieldInfo1 : fieldInfos1) { + for (FieldInfo fieldInfo1 : fieldInfos1) { if (fieldInfo1.getPointDimensionCount() != 0) { FieldInfo fieldInfo2 = fieldInfos2.fieldInfo(fieldInfo1.name); // same data dimension count? - assertEquals(info, fieldInfo2.getPointDimensionCount(), fieldInfo2.getPointDimensionCount()); + assertEquals( + info, fieldInfo2.getPointDimensionCount(), fieldInfo2.getPointDimensionCount()); // same index dimension count? - assertEquals(info, fieldInfo2.getPointIndexDimensionCount(), fieldInfo2.getPointIndexDimensionCount()); + assertEquals( + info, + fieldInfo2.getPointIndexDimensionCount(), + fieldInfo2.getPointIndexDimensionCount()); // same bytes per dimension? assertEquals(info, fieldInfo2.getPointNumBytes(), fieldInfo2.getPointNumBytes()); - assertEquals(info + " field=" + fieldInfo1.name, - uninvert(fieldInfo1.name, leftReader), - uninvert(fieldInfo1.name, rightReader)); + assertEquals( + info + " field=" + fieldInfo1.name, + uninvert(fieldInfo1.name, leftReader), + uninvert(fieldInfo1.name, rightReader)); } } // make sure FieldInfos2 doesn't have any point fields that FieldInfo1 didn't have - for(FieldInfo fieldInfo2 : fieldInfos2) { + for (FieldInfo fieldInfo2 : fieldInfos2) { if (fieldInfo2.getPointDimensionCount() != 0) { FieldInfo fieldInfo1 = fieldInfos1.fieldInfo(fieldInfo2.name); // same data dimension count? - assertEquals(info, fieldInfo2.getPointDimensionCount(), fieldInfo1.getPointDimensionCount()); + assertEquals( + info, fieldInfo2.getPointDimensionCount(), fieldInfo1.getPointDimensionCount()); // same index dimension count? - assertEquals(info, fieldInfo2.getPointIndexDimensionCount(), fieldInfo1.getPointIndexDimensionCount()); + assertEquals( + info, + fieldInfo2.getPointIndexDimensionCount(), + fieldInfo1.getPointIndexDimensionCount()); // same bytes per dimension? assertEquals(info, fieldInfo2.getPointNumBytes(), fieldInfo1.getPointNumBytes()); @@ -2694,26 +2827,41 @@ public abstract class LuceneTestCase extends Assert { } } } - + /** Inspects stack trace to figure out if a method of a specific class called us. */ public static boolean callStackContains(Class clazz, String methodName) { final String className = clazz.getName(); - return StackWalker.getInstance().walk(s -> s.skip(1) // exclude this utility method - .anyMatch(f -> className.equals(f.getClassName()) && methodName.equals(f.getMethodName()))); + return StackWalker.getInstance() + .walk( + s -> + s.skip(1) // exclude this utility method + .anyMatch( + f -> + className.equals(f.getClassName()) + && methodName.equals(f.getMethodName()))); } - /** Inspects stack trace to figure out if one of the given method names (no class restriction) called us. */ + /** + * Inspects stack trace to figure out if one of the given method names (no class restriction) + * called us. + */ public static boolean callStackContainsAnyOf(String... methodNames) { - return StackWalker.getInstance().walk(s -> s.skip(1) // exclude this utility method - .map(StackFrame::getMethodName) - .anyMatch(Set.of(methodNames)::contains)); + return StackWalker.getInstance() + .walk( + s -> + s.skip(1) // exclude this utility method + .map(StackFrame::getMethodName) + .anyMatch(Set.of(methodNames)::contains)); } /** Inspects stack trace if the given class called us. */ public static boolean callStackContains(Class clazz) { - return StackWalker.getInstance().walk(s -> s.skip(1) // exclude this utility method - .map(StackFrame::getClassName) - .anyMatch(clazz.getName()::equals)); + return StackWalker.getInstance() + .walk( + s -> + s.skip(1) // exclude this utility method + .map(StackFrame::getClassName) + .anyMatch(clazz.getName()::equals)); } /** A runnable that can throw any checked exception. */ @@ -2723,12 +2871,17 @@ public abstract class LuceneTestCase extends Assert { } /** Checks a specific exception class is thrown by the given runnable, and returns it. */ - public static T expectThrows(Class expectedType, ThrowingRunnable runnable) { - return expectThrows(expectedType, "Expected exception "+ expectedType.getSimpleName() + " but no exception was thrown", runnable); + public static T expectThrows( + Class expectedType, ThrowingRunnable runnable) { + return expectThrows( + expectedType, + "Expected exception " + expectedType.getSimpleName() + " but no exception was thrown", + runnable); } /** Checks a specific exception class is thrown by the given runnable, and returns it. */ - public static T expectThrows(Class expectedType, String noExceptionMessage, ThrowingRunnable runnable) { + public static T expectThrows( + Class expectedType, String noExceptionMessage, ThrowingRunnable runnable) { final Throwable thrown = _expectThrows(Collections.singletonList(expectedType), runnable); if (expectedType.isInstance(thrown)) { return expectedType.cast(thrown); @@ -2736,13 +2889,19 @@ public abstract class LuceneTestCase extends Assert { if (null == thrown) { throw new AssertionFailedError(noExceptionMessage); } - AssertionFailedError assertion = new AssertionFailedError("Unexpected exception type, expected " + expectedType.getSimpleName() + " but got " + thrown); + AssertionFailedError assertion = + new AssertionFailedError( + "Unexpected exception type, expected " + + expectedType.getSimpleName() + + " but got " + + thrown); assertion.initCause(thrown); throw assertion; } /** Checks a specific exception class is thrown by the given runnable, and returns it. */ - public static T expectThrowsAnyOf(List> expectedTypes, ThrowingRunnable runnable) { + public static T expectThrowsAnyOf( + List> expectedTypes, ThrowingRunnable runnable) { if (expectedTypes.isEmpty()) { throw new AssertionError("At least one expected exception type is required?"); } @@ -2756,68 +2915,88 @@ public abstract class LuceneTestCase extends Assert { } } - List exceptionTypes = expectedTypes.stream().map(c -> c.getSimpleName()).collect(Collectors.toList()); + List exceptionTypes = + expectedTypes.stream().map(c -> c.getSimpleName()).collect(Collectors.toList()); if (thrown != null) { - AssertionFailedError assertion = new AssertionFailedError("Unexpected exception type, expected any of " + - exceptionTypes + - " but got: " + thrown); + AssertionFailedError assertion = + new AssertionFailedError( + "Unexpected exception type, expected any of " + + exceptionTypes + + " but got: " + + thrown); assertion.initCause(thrown); throw assertion; } else { - throw new AssertionFailedError("Expected any of the following exception types: " + - exceptionTypes+ " but no exception was thrown."); + throw new AssertionFailedError( + "Expected any of the following exception types: " + + exceptionTypes + + " but no exception was thrown."); } } /** - * Checks that specific wrapped and outer exception classes are thrown - * by the given runnable, and returns the wrapped exception. + * Checks that specific wrapped and outer exception classes are thrown by the given runnable, and + * returns the wrapped exception. */ - public static TW expectThrows - (Class expectedOuterType, Class expectedWrappedType, ThrowingRunnable runnable) { + public static TW expectThrows( + Class expectedOuterType, Class expectedWrappedType, ThrowingRunnable runnable) { final Throwable thrown = _expectThrows(Collections.singletonList(expectedOuterType), runnable); if (null == thrown) { - throw new AssertionFailedError("Expected outer exception " + expectedOuterType.getSimpleName() - + " but no exception was thrown."); + throw new AssertionFailedError( + "Expected outer exception " + + expectedOuterType.getSimpleName() + + " but no exception was thrown."); } if (expectedOuterType.isInstance(thrown)) { Throwable cause = thrown.getCause(); if (expectedWrappedType.isInstance(cause)) { return expectedWrappedType.cast(cause); } else { - AssertionFailedError assertion = new AssertionFailedError - ("Unexpected wrapped exception type, expected " + expectedWrappedType.getSimpleName() - + " but got: " + cause); + AssertionFailedError assertion = + new AssertionFailedError( + "Unexpected wrapped exception type, expected " + + expectedWrappedType.getSimpleName() + + " but got: " + + cause); assertion.initCause(thrown); throw assertion; } } - AssertionFailedError assertion = new AssertionFailedError - ("Unexpected outer exception type, expected " + expectedOuterType.getSimpleName() - + " but got: " + thrown); + AssertionFailedError assertion = + new AssertionFailedError( + "Unexpected outer exception type, expected " + + expectedOuterType.getSimpleName() + + " but got: " + + thrown); assertion.initCause(thrown); throw assertion; } /** - * Checks that one of the specified wrapped and outer exception classes are thrown - * by the given runnable, and returns the outer exception. - * - * This method accepts outer exceptions with no wrapped exception; - * an empty list of expected wrapped exception types indicates no wrapped exception. + * Checks that one of the specified wrapped and outer exception classes are thrown by the given + * runnable, and returns the outer exception. + * + *

    This method accepts outer exceptions with no wrapped exception; an empty list of expected + * wrapped exception types indicates no wrapped exception. */ - public static TO expectThrowsAnyOf - (LinkedHashMap,List>> expectedOuterToWrappedTypes, ThrowingRunnable runnable) { - final List> outerClasses = expectedOuterToWrappedTypes.keySet().stream().collect(Collectors.toList()); + public static TO expectThrowsAnyOf( + LinkedHashMap, List>> expectedOuterToWrappedTypes, + ThrowingRunnable runnable) { + final List> outerClasses = + expectedOuterToWrappedTypes.keySet().stream().collect(Collectors.toList()); final Throwable thrown = _expectThrows(outerClasses, runnable); - + if (null == thrown) { - List outerTypes = outerClasses.stream().map(Class::getSimpleName).collect(Collectors.toList()); - throw new AssertionFailedError("Expected any of the following outer exception types: " + outerTypes - + " but no exception was thrown."); + List outerTypes = + outerClasses.stream().map(Class::getSimpleName).collect(Collectors.toList()); + throw new AssertionFailedError( + "Expected any of the following outer exception types: " + + outerTypes + + " but no exception was thrown."); } - for (Map.Entry, List>> entry : expectedOuterToWrappedTypes.entrySet()) { + for (Map.Entry, List>> entry : + expectedOuterToWrappedTypes.entrySet()) { Class expectedOuterType = entry.getKey(); List> expectedWrappedTypes = entry.getValue(); Throwable cause = thrown.getCause(); @@ -2830,29 +3009,41 @@ public abstract class LuceneTestCase extends Assert { return expectedOuterType.cast(thrown); } } - List wrappedTypes = expectedWrappedTypes.stream().map(Class::getSimpleName).collect(Collectors.toList()); - AssertionFailedError assertion = new AssertionFailedError - ("Unexpected wrapped exception type, expected one of " + wrappedTypes + " but got: " + cause); + List wrappedTypes = + expectedWrappedTypes.stream().map(Class::getSimpleName).collect(Collectors.toList()); + AssertionFailedError assertion = + new AssertionFailedError( + "Unexpected wrapped exception type, expected one of " + + wrappedTypes + + " but got: " + + cause); assertion.initCause(thrown); throw assertion; } } } - List outerTypes = outerClasses.stream().map(Class::getSimpleName).collect(Collectors.toList()); - AssertionFailedError assertion = new AssertionFailedError - ("Unexpected outer exception type, expected one of " + outerTypes + " but got: " + thrown); + List outerTypes = + outerClasses.stream().map(Class::getSimpleName).collect(Collectors.toList()); + AssertionFailedError assertion = + new AssertionFailedError( + "Unexpected outer exception type, expected one of " + + outerTypes + + " but got: " + + thrown); assertion.initCause(thrown); throw assertion; } /** - * Helper method for {@link #expectThrows} and {@link #expectThrowsAnyOf} that takes care of propagating - * any {@link AssertionError} or {@link AssumptionViolatedException} instances thrown if and only if they - * are super classes of the expectedTypes. Otherwise simply returns any {@link Throwable} - * thrown, regardless of type, or null if the runnable completed w/o error. + * Helper method for {@link #expectThrows} and {@link #expectThrowsAnyOf} that takes care of + * propagating any {@link AssertionError} or {@link AssumptionViolatedException} instances thrown + * if and only if they are super classes of the expectedTypes. Otherwise simply + * returns any {@link Throwable} thrown, regardless of type, or null if the runnable + * completed w/o error. */ - private static Throwable _expectThrows(List> expectedTypes, ThrowingRunnable runnable) { - + private static Throwable _expectThrows( + List> expectedTypes, ThrowingRunnable runnable) { + try { runnable.run(); } catch (AssertionError | AssumptionViolatedException ae) { @@ -2867,11 +3058,11 @@ public abstract class LuceneTestCase extends Assert { } return null; } - - /** Returns true if the file exists (can be opened), false - * if it cannot be opened, and (unlike Java's - * File.exists) throws IOException if there's some - * unexpected error. */ + + /** + * Returns true if the file exists (can be opened), false if it cannot be opened, and (unlike + * Java's File.exists) throws IOException if there's some unexpected error. + */ public static boolean slowFileExists(Directory dir, String fileName) throws IOException { try { dir.openInput(fileName, IOContext.DEFAULT).close(); @@ -2882,18 +3073,17 @@ public abstract class LuceneTestCase extends Assert { } /** - * This method is deprecated for a reason. Do not use it. Call {@link #createTempDir()} - * or {@link #createTempDir(String)} or {@link #createTempFile(String, String)}. + * This method is deprecated for a reason. Do not use it. Call {@link #createTempDir()} or {@link + * #createTempDir(String)} or {@link #createTempFile(String, String)}. */ @Deprecated public static Path getBaseTempDirForTestClass() { return tempFilesCleanupRule.getPerTestClassTempDir(); } - /** * Creates an empty, temporary folder (when the name of the folder is of no importance). - * + * * @see #createTempDir(String) */ public static Path createTempDir() { @@ -2901,24 +3091,22 @@ public abstract class LuceneTestCase extends Assert { } /** - * Creates an empty, temporary folder with the given name prefix under the - * test class's {@link #getBaseTempDirForTestClass()}. - * - *

    The folder will be automatically removed after the - * test class completes successfully. The test should close any file handles that would prevent - * the folder from being removed. + * Creates an empty, temporary folder with the given name prefix under the test class's {@link + * #getBaseTempDirForTestClass()}. + * + *

    The folder will be automatically removed after the test class completes successfully. The + * test should close any file handles that would prevent the folder from being removed. */ public static Path createTempDir(String prefix) { return tempFilesCleanupRule.createTempDir(prefix); } - + /** - * Creates an empty file with the given prefix and suffix under the - * test class's {@link #getBaseTempDirForTestClass()}. - * - *

    The file will be automatically removed after the - * test class completes successfully. The test should close any file handles that would prevent - * the folder from being removed. + * Creates an empty file with the given prefix and suffix under the test class's {@link + * #getBaseTempDirForTestClass()}. + * + *

    The file will be automatically removed after the test class completes successfully. The test + * should close any file handles that would prevent the folder from being removed. */ public static Path createTempFile(String prefix, String suffix) throws IOException { return tempFilesCleanupRule.createTempFile(prefix, suffix); @@ -2926,28 +3114,33 @@ public abstract class LuceneTestCase extends Assert { /** * Creates an empty temporary file. - * - * @see #createTempFile(String, String) + * + * @see #createTempFile(String, String) */ public static Path createTempFile() throws IOException { return createTempFile("tempFile", ".tmp"); } - - /** - * Runs a code part with restricted permissions (be sure to add all required permissions, - * because it would start with empty permissions). You cannot grant more permissions than - * our policy file allows, but you may restrict writing to several dirs... - *

    Note: This assumes a {@link SecurityManager} enabled, otherwise it - * stops test execution. If enabled, it needs the following {@link SecurityPermission}: - * {@code "createAccessControlContext"} + + /** + * Runs a code part with restricted permissions (be sure to add all required permissions, because + * it would start with empty permissions). You cannot grant more permissions than our policy file + * allows, but you may restrict writing to several dirs... + * + *

    Note: This assumes a {@link SecurityManager} enabled, otherwise it stops test + * execution. If enabled, it needs the following {@link SecurityPermission}: {@code + * "createAccessControlContext"} */ - public static T runWithRestrictedPermissions(PrivilegedExceptionAction action, Permission... permissions) throws Exception { - assumeTrue("runWithRestrictedPermissions requires a SecurityManager enabled", System.getSecurityManager() != null); + public static T runWithRestrictedPermissions( + PrivilegedExceptionAction action, Permission... permissions) throws Exception { + assumeTrue( + "runWithRestrictedPermissions requires a SecurityManager enabled", + System.getSecurityManager() != null); // be sure to have required permission, otherwise doPrivileged runs with *no* permissions: AccessController.checkPermission(new SecurityPermission("createAccessControlContext")); final PermissionCollection perms = new Permissions(); Arrays.stream(permissions).forEach(perms::add); - final AccessControlContext ctx = new AccessControlContext(new ProtectionDomain[] { new ProtectionDomain(null, perms) }); + final AccessControlContext ctx = + new AccessControlContext(new ProtectionDomain[] {new ProtectionDomain(null, perms)}); try { return AccessController.doPrivileged(action, ctx); } catch (PrivilegedActionException e) { @@ -2963,11 +3156,11 @@ public abstract class LuceneTestCase extends Assert { assert enabled = true; // Intentional side-effect!!! assertsAreEnabled = enabled; } - - /** - * Compares two strings with a collator, also looking to see if the the strings - * are impacted by jdk bugs. may not avoid all jdk bugs in tests. - * see https://bugs.openjdk.java.net/browse/JDK-8071862 + + /** + * Compares two strings with a collator, also looking to see if the the strings are impacted by + * jdk bugs. may not avoid all jdk bugs in tests. see + * https://bugs.openjdk.java.net/browse/JDK-8071862 */ @SuppressForbidden(reason = "dodges JDK-8071862") public static int collate(Collator collator, String s1, String s2) { @@ -2978,10 +3171,7 @@ public abstract class LuceneTestCase extends Assert { return v1; } - - /** - * Ensures that the MergePolicy has sane values for tests that test with lots of documents. - */ + /** Ensures that the MergePolicy has sane values for tests that test with lots of documents. */ protected static IndexWriterConfig ensureSaneIWCOnNightly(IndexWriterConfig conf) { if (LuceneTestCase.TEST_NIGHTLY) { // newIWConfig makes smallish max seg size, which diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/NullInfoStream.java b/lucene/test-framework/src/java/org/apache/lucene/util/NullInfoStream.java index 0a121c9d182..fb69c684b8a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/NullInfoStream.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/NullInfoStream.java @@ -19,8 +19,9 @@ package org.apache.lucene.util; import java.io.IOException; /** - * Prints nothing. Just to make sure tests pass w/ and without enabled InfoStream - * without actually making noise. + * Prints nothing. Just to make sure tests pass w/ and without enabled InfoStream without actually + * making noise. + * * @lucene.experimental */ public class NullInfoStream extends InfoStream { @@ -30,7 +31,7 @@ public class NullInfoStream extends InfoStream { assert component != null; assert message != null; } - + @Override public boolean isEnabled(String component) { assert component != null; @@ -38,6 +39,5 @@ public class NullInfoStream extends InfoStream { } @Override - public void close() throws IOException { - } + public void close() throws IOException {} } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/QuickPatchThreadsFilter.java b/lucene/test-framework/src/java/org/apache/lucene/util/QuickPatchThreadsFilter.java index 8447e90b904..c9e590fb9f4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/QuickPatchThreadsFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/QuickPatchThreadsFilter.java @@ -18,12 +18,10 @@ package org.apache.lucene.util; import com.carrotsearch.randomizedtesting.ThreadFilter; -/** - * Last minute patches. - */ +/** Last minute patches. */ public class QuickPatchThreadsFilter implements ThreadFilter { static final boolean isJ9; - + static { isJ9 = Constants.JAVA_VENDOR.startsWith("IBM"); } @@ -37,8 +35,9 @@ public class QuickPatchThreadsFilter implements ThreadFilter { } // LUCENE-4736 - StackTraceElement [] stack = t.getStackTrace(); - if (stack.length > 0 && stack[stack.length - 1].getClassName().equals("java.util.Timer$TimerImpl")) { + StackTraceElement[] stack = t.getStackTrace(); + if (stack.length > 0 + && stack[stack.length - 1].getClassName().equals("java.util.Timer$TimerImpl")) { return true; } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java b/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java index 5ff5c992b6d..39d25563c88 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java @@ -45,33 +45,39 @@ import java.util.stream.StreamSupport; /** Crawls object graph to collect RAM usage for testing */ public final class RamUsageTester { - /** An accumulator of object references. This class allows for customizing RAM usage estimation. */ + /** + * An accumulator of object references. This class allows for customizing RAM usage estimation. + */ public static class Accumulator { - /** Accumulate transitive references for the provided fields of the given - * object into queue and return the shallow size of this object. */ - public long accumulateObject(Object o, long shallowSize, Map fieldValues, Collection queue) { + /** + * Accumulate transitive references for the provided fields of the given object into queue + * and return the shallow size of this object. + */ + public long accumulateObject( + Object o, long shallowSize, Map fieldValues, Collection queue) { queue.addAll(fieldValues.values()); return shallowSize; } - /** Accumulate transitive references for the provided values of the given - * array into queue and return the shallow size of this array. */ - public long accumulateArray(Object array, long shallowSize, List values, Collection queue) { + /** + * Accumulate transitive references for the provided values of the given array into queue + * and return the shallow size of this array. + */ + public long accumulateArray( + Object array, long shallowSize, List values, Collection queue) { queue.addAll(values); return shallowSize; } - } /** - * Estimates the RAM usage by the given object. It will - * walk the object tree and sum up all referenced objects. + * Estimates the RAM usage by the given object. It will walk the object tree and sum up all + * referenced objects. * - *

    Resource Usage: This method internally uses a set of - * every object seen during traversals so it does allocate memory - * (it isn't side-effect free). After the method exits, this memory - * should be GCed.

    + *

    Resource Usage: This method internally uses a set of every object seen during + * traversals so it does allocate memory (it isn't side-effect free). After the method exits, this + * memory should be GCed. */ public static long sizeOf(Object obj, Accumulator accumulator) { return measureObjectSize(obj, accumulator); @@ -84,6 +90,7 @@ public final class RamUsageTester { /** * Return a human-readable size of a given object. + * * @see #sizeOf(Object) * @see RamUsageEstimator#humanReadableUnits(long) */ @@ -100,9 +107,9 @@ public final class RamUsageTester { private static long measureObjectSize(Object root, Accumulator accumulator) { // Objects seen so far. final Set seen = Collections.newSetFromMap(new IdentityHashMap()); - // Class cache with reference Field and precalculated shallow size. + // Class cache with reference Field and precalculated shallow size. final IdentityHashMap, ClassCache> classCache = new IdentityHashMap<>(); - // Stack of objects pending traversal. Recursion caused stack overflows. + // Stack of objects pending traversal. Recursion caused stack overflows. final ArrayList stack = new ArrayList<>(); stack.add(root); @@ -117,7 +124,8 @@ public final class RamUsageTester { final long obSize; final Class obClazz = ob.getClass(); - assert obClazz != null : "jvm bug detected (Object.getClass() == null). please report this to your vendor"; + assert obClazz != null + : "jvm bug detected (Object.getClass() == null). please report this to your vendor"; if (obClazz.isArray()) { obSize = handleArray(accumulator, stack, ob, obClazz); } else { @@ -137,7 +145,12 @@ public final class RamUsageTester { return totalSize; } - private static long handleOther(Accumulator accumulator, IdentityHashMap, ClassCache> classCache, ArrayList stack, Object ob, Class obClazz) { + private static long handleOther( + Accumulator accumulator, + IdentityHashMap, ClassCache> classCache, + ArrayList stack, + Object ob, + Class obClazz) { /* * Consider an object. Push any references it has to the processing stack * and accumulate this object's shallow size. @@ -146,30 +159,45 @@ public final class RamUsageTester { if (Constants.JRE_IS_MINIMUM_JAVA9) { long alignedShallowInstanceSize = RamUsageEstimator.shallowSizeOf(ob); - Predicate> isJavaModule = (clazz) -> { - return clazz.getName().startsWith("java."); - }; + Predicate> isJavaModule = + (clazz) -> { + return clazz.getName().startsWith("java."); + }; - // Java 9: Best guess for some known types, as we cannot precisely look into runtime classes: + // Java 9: Best guess for some known types, as we cannot precisely look into runtime + // classes: final ToLongFunction func = SIMPLE_TYPES.get(obClazz); - if (func != null) { // some simple type like String where the size is easy to get from public properties - return accumulator.accumulateObject(ob, alignedShallowInstanceSize + func.applyAsLong(ob), - Collections.emptyMap(), stack); + if (func + != null) { // some simple type like String where the size is easy to get from public + // properties + return accumulator.accumulateObject( + ob, alignedShallowInstanceSize + func.applyAsLong(ob), Collections.emptyMap(), stack); } else if (ob instanceof Enum) { return alignedShallowInstanceSize; } else if (ob instanceof ByteBuffer) { // Approximate ByteBuffers with their underlying storage (ignores field overhead). return byteArraySize(((ByteBuffer) ob).capacity()); - } else if (isJavaModule.test(obClazz) && ob instanceof Map) { - final List values = ((Map) ob).entrySet().stream() - .flatMap(e -> Stream.of(e.getKey(), e.getValue())) - .collect(Collectors.toList()); - return accumulator.accumulateArray(ob, alignedShallowInstanceSize + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER, values, stack) + } else if (isJavaModule.test(obClazz) && ob instanceof Map) { + final List values = + ((Map) ob) + .entrySet().stream() + .flatMap(e -> Stream.of(e.getKey(), e.getValue())) + .collect(Collectors.toList()); + return accumulator.accumulateArray( + ob, + alignedShallowInstanceSize + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER, + values, + stack) + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER; } else if (isJavaModule.test(obClazz) && ob instanceof Iterable) { - final List values = StreamSupport.stream(((Iterable) ob).spliterator(), false) - .collect(Collectors.toList()); - return accumulator.accumulateArray(ob, alignedShallowInstanceSize + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER, values, stack) + final List values = + StreamSupport.stream(((Iterable) ob).spliterator(), false) + .collect(Collectors.toList()); + return accumulator.accumulateArray( + ob, + alignedShallowInstanceSize + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER, + values, + stack) + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER; } else { // Fallback to reflective access. @@ -185,14 +213,16 @@ public final class RamUsageTester { for (Field f : cachedInfo.referenceFields) { fieldValues.put(f, f.get(ob)); } - return accumulator.accumulateObject(ob, cachedInfo.alignedShallowInstanceSize, fieldValues, stack); + return accumulator.accumulateObject( + ob, cachedInfo.alignedShallowInstanceSize, fieldValues, stack); } catch (IllegalAccessException e) { // this should never happen as we enabled setAccessible(). throw new RuntimeException("Reflective field access failed?", e); } } - private static long handleArray(Accumulator accumulator, ArrayList stack, Object ob, Class obClazz) { + private static long handleArray( + Accumulator accumulator, ArrayList stack, Object ob, Class obClazz) { /* * Consider an array, possibly of primitive types. Push any of its references to * the processing stack and accumulate this array's shallow size. @@ -204,58 +234,67 @@ public final class RamUsageTester { if (componentClazz.isPrimitive()) { values = Collections.emptyList(); } else { - values = new AbstractList() { + values = + new AbstractList() { - @Override - public Object get(int index) { - return Array.get(ob, index); - } + @Override + public Object get(int index) { + return Array.get(ob, index); + } - @Override - public int size() { - return len; - } - - }; + @Override + public int size() { + return len; + } + }; } return accumulator.accumulateArray(ob, shallowSize, values, stack); } /** - * This map contains a function to calculate sizes of some "simple types" like String just from their public properties. - * This is needed for Java 9, which does not allow to look into runtime class fields. + * This map contains a function to calculate sizes of some "simple types" like String just from + * their public properties. This is needed for Java 9, which does not allow to look into runtime + * class fields. */ @SuppressWarnings("serial") - private static final Map, ToLongFunction> SIMPLE_TYPES = Collections.unmodifiableMap(new IdentityHashMap, ToLongFunction>() { - { init(); } - - @SuppressForbidden(reason = "We measure some forbidden classes") - private void init() { - // String types: - a(String.class, v -> charArraySize(v.length())); // may not be correct with Java 9's compact strings! - a(StringBuilder.class, v -> charArraySize(v.capacity())); - a(StringBuffer.class, v -> charArraySize(v.capacity())); - // Types with large buffers: - a(ByteArrayOutputStream.class, v -> byteArraySize(v.size())); - // For File and Path, we just take the length of String representation as approximation: - a(File.class, v -> charArraySize(v.toString().length())); - a(Path.class, v -> charArraySize(v.toString().length())); - a(ByteOrder.class, v -> 0); // Instances of ByteOrder are constants - } - - @SuppressWarnings("unchecked") - private void a(Class clazz, ToLongFunction func) { - put(clazz, (ToLongFunction) func); - } - - private long charArraySize(int len) { - return RamUsageEstimator.alignObjectSize((long)RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long)Character.BYTES * len); - } - }); - - /** - * Cached information about a given class. - */ + private static final Map, ToLongFunction> SIMPLE_TYPES = + Collections.unmodifiableMap( + new IdentityHashMap, ToLongFunction>() { + { + init(); + } + + @SuppressForbidden(reason = "We measure some forbidden classes") + private void init() { + // String types: + a( + String.class, + v -> + charArraySize( + v.length())); // may not be correct with Java 9's compact strings! + a(StringBuilder.class, v -> charArraySize(v.capacity())); + a(StringBuffer.class, v -> charArraySize(v.capacity())); + // Types with large buffers: + a(ByteArrayOutputStream.class, v -> byteArraySize(v.size())); + // For File and Path, we just take the length of String representation as + // approximation: + a(File.class, v -> charArraySize(v.toString().length())); + a(Path.class, v -> charArraySize(v.toString().length())); + a(ByteOrder.class, v -> 0); // Instances of ByteOrder are constants + } + + @SuppressWarnings("unchecked") + private void a(Class clazz, ToLongFunction func) { + put(clazz, (ToLongFunction) func); + } + + private long charArraySize(int len) { + return RamUsageEstimator.alignObjectSize( + (long) RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Character.BYTES * len); + } + }); + + /** Cached information about a given class. */ private static final class ClassCache { public final long alignedShallowInstanceSize; public final Field[] referenceFields; @@ -263,49 +302,52 @@ public final class RamUsageTester { public ClassCache(long alignedShallowInstanceSize, Field[] referenceFields) { this.alignedShallowInstanceSize = alignedShallowInstanceSize; this.referenceFields = referenceFields; - } + } } - - /** - * Create a cached information about shallow size and reference fields for - * a given class. - */ + + /** Create a cached information about shallow size and reference fields for a given class. */ @SuppressForbidden(reason = "We need to access private fields of measured objects.") private static ClassCache createCacheEntry(final Class clazz) { - return AccessController.doPrivileged((PrivilegedAction) () -> { - ClassCache cachedInfo; - long shallowInstanceSize = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER; - final ArrayList referenceFields = new ArrayList<>(32); - for (Class c = clazz; c != null; c = c.getSuperclass()) { - if (c == Class.class) { - // prevent inspection of Class' fields, throws SecurityException in Java 9! - continue; - } - final Field[] fields = c.getDeclaredFields(); - for (final Field f : fields) { - if (!Modifier.isStatic(f.getModifiers())) { - shallowInstanceSize = RamUsageEstimator.adjustForField(shallowInstanceSize, f); - - if (!f.getType().isPrimitive()) { - try { - f.setAccessible(true); - referenceFields.add(f); - } catch (RuntimeException re) { - throw new RuntimeException(String.format(Locale.ROOT, - "Can't access field '%s' of class '%s' for RAM estimation.", - f.getName(), - clazz.getName()), re); + return AccessController.doPrivileged( + (PrivilegedAction) + () -> { + ClassCache cachedInfo; + long shallowInstanceSize = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER; + final ArrayList referenceFields = new ArrayList<>(32); + for (Class c = clazz; c != null; c = c.getSuperclass()) { + if (c == Class.class) { + // prevent inspection of Class' fields, throws SecurityException in Java 9! + continue; + } + final Field[] fields = c.getDeclaredFields(); + for (final Field f : fields) { + if (!Modifier.isStatic(f.getModifiers())) { + shallowInstanceSize = RamUsageEstimator.adjustForField(shallowInstanceSize, f); + + if (!f.getType().isPrimitive()) { + try { + f.setAccessible(true); + referenceFields.add(f); + } catch (RuntimeException re) { + throw new RuntimeException( + String.format( + Locale.ROOT, + "Can't access field '%s' of class '%s' for RAM estimation.", + f.getName(), + clazz.getName()), + re); + } + } + } + } } - } - } - } - } - - cachedInfo = new ClassCache( - RamUsageEstimator.alignObjectSize(shallowInstanceSize), - referenceFields.toArray(new Field[referenceFields.size()])); - return cachedInfo; - }); + + cachedInfo = + new ClassCache( + RamUsageEstimator.alignObjectSize(shallowInstanceSize), + referenceFields.toArray(new Field[referenceFields.size()])); + return cachedInfo; + }); } private static long byteArraySize(int len) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RemoveUponClose.java b/lucene/test-framework/src/java/org/apache/lucene/util/RemoveUponClose.java index af4ec85d1dd..5d5c17207e4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/RemoveUponClose.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/RemoveUponClose.java @@ -21,9 +21,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -/** - * A {@link Closeable} that attempts to remove a given file/folder. - */ +/** A {@link Closeable} that attempts to remove a given file/folder. */ final class RemoveUponClose implements Closeable { private final Path path; private final TestRuleMarkFailure failureMarker; @@ -49,10 +47,13 @@ final class RemoveUponClose implements Closeable { IOUtils.rm(path); } catch (IOException e) { throw new IOException( - "Could not remove temporary location '" - + path.toAbsolutePath() + "', created at stack trace:\n" + creationStack, e); + "Could not remove temporary location '" + + path.toAbsolutePath() + + "', created at stack trace:\n" + + creationStack, + e); } } } } -} \ No newline at end of file +} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/Rethrow.java b/lucene/test-framework/src/java/org/apache/lucene/util/Rethrow.java index 9e52a1975d9..f80155128f2 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/Rethrow.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/Rethrow.java @@ -17,25 +17,23 @@ package org.apache.lucene.util; /** - * Sneaky: rethrowing checked exceptions as unchecked - * ones. Eh, it is sometimes useful... + * Sneaky: rethrowing checked exceptions as unchecked ones. Eh, it is sometimes useful... * - *

    Pulled from Java Puzzlers.

    - * @see http://www.amazon.com/Java-Puzzlers-Traps-Pitfalls-Corner/dp/032133678X + *

    Pulled from Java Puzzlers. + * + * @see http://www.amazon.com/Java-Puzzlers-Traps-Pitfalls-Corner/dp/032133678X */ public final class Rethrow { private Rethrow() {} - /** - * Rethrows t (identical object). - */ + /** Rethrows t (identical object). */ public static void rethrow(Throwable t) { Rethrow.rethrow0(t); } - + @SuppressWarnings("unchecked") private static void rethrow0(Throwable t) throws T { throw (T) t; } } - diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java b/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java index 6a8917136a8..e088a7130b7 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java @@ -18,34 +18,29 @@ package org.apache.lucene.util; import static org.apache.lucene.util.LuceneTestCase.*; +import com.carrotsearch.randomizedtesting.LifecycleScope; +import com.carrotsearch.randomizedtesting.RandomizedContext; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.regex.Pattern; - import org.junit.runner.Description; import org.junit.runner.Result; import org.junit.runner.notification.Failure; import org.junit.runner.notification.RunListener; -import com.carrotsearch.randomizedtesting.LifecycleScope; -import com.carrotsearch.randomizedtesting.RandomizedContext; - /** - * A suite listener printing a "reproduce string". This ensures test result - * events are always captured properly even if exceptions happen at - * initialization or suite/ hooks level. + * A suite listener printing a "reproduce string". This ensures test result events are always + * captured properly even if exceptions happen at initialization or suite/ hooks level. */ public final class RunListenerPrintReproduceInfo extends RunListener { /** - * A list of all test suite classes executed so far in this JVM (ehm, - * under this class's classloader). + * A list of all test suite classes executed so far in this JVM (ehm, under this class's + * classloader). */ private static List testClassesRun = new ArrayList<>(); - /** - * The currently executing scope. - */ + /** The currently executing scope. */ private LifecycleScope scope; /** Current test failed. */ @@ -56,11 +51,10 @@ public final class RunListenerPrintReproduceInfo extends RunListener { /** A marker to print full env. diagnostics after the suite. */ private boolean printDiagnosticsAfterClass; - + /** true if we should skip the reproduce string (diagnostics are independent) */ private boolean suppressReproduceLine; - @Override public void testRunStarted(Description description) throws Exception { suiteFailed = false; @@ -68,7 +62,8 @@ public final class RunListenerPrintReproduceInfo extends RunListener { scope = LifecycleScope.SUITE; Class targetClass = RandomizedContext.current().getTargetClass(); - suppressReproduceLine = targetClass.isAnnotationPresent(LuceneTestCase.SuppressReproduceLine.class); + suppressReproduceLine = + targetClass.isAnnotationPresent(LuceneTestCase.SuppressReproduceLine.class); testClassesRun.add(targetClass.getSimpleName()); } @@ -91,19 +86,17 @@ public final class RunListenerPrintReproduceInfo extends RunListener { @Override public void testFinished(Description description) throws Exception { if (testFailed) { - reportAdditionalFailureInfo( - stripTestNameAugmentations( - description.getMethodName())); + reportAdditionalFailureInfo(stripTestNameAugmentations(description.getMethodName())); } scope = LifecycleScope.SUITE; testFailed = false; } /** - * The {@link Description} object in JUnit does not expose the actual test method, - * instead it has the concept of a unique "name" of a test. To run the same method (tests) - * repeatedly, randomizedtesting must make those "names" unique: it appends the current iteration - * and seeds to the test method's name. We strip this information here. + * The {@link Description} object in JUnit does not expose the actual test method, instead it has + * the concept of a unique "name" of a test. To run the same method (tests) repeatedly, + * randomizedtesting must make those "names" unique: it appends the current iteration and seeds to + * the test method's name. We strip this information here. */ private String stripTestNameAugmentations(String methodName) { if (methodName != null) { @@ -126,22 +119,30 @@ public final class RunListenerPrintReproduceInfo extends RunListener { /** print some useful debugging information about the environment */ private static void printDebuggingInformation() { if (classEnvRule != null && classEnvRule.isInitialized()) { - System.err.println("NOTE: test params are: codec=" + classEnvRule.codec + - ", sim=" + classEnvRule.similarity + - ", locale=" + classEnvRule.locale.toLanguageTag() + - ", timezone=" + (classEnvRule.timeZone == null ? "(null)" : classEnvRule.timeZone.getID())); + System.err.println( + ("NOTE: test params are: codec=" + classEnvRule.codec) + + (", sim=" + classEnvRule.similarity) + + (", locale=" + classEnvRule.locale.toLanguageTag()) + + (", timezone=" + + (classEnvRule.timeZone == null ? "(null)" : classEnvRule.timeZone.getID()))); } - System.err.println("NOTE: " + System.getProperty("os.name") + " " - + System.getProperty("os.version") + " " - + System.getProperty("os.arch") + "/" - + System.getProperty("java.vendor") + " " - + System.getProperty("java.version") + " " - + (Constants.JRE_IS_64BIT ? "(64-bit)" : "(32-bit)") + "/" - + "cpus=" + Runtime.getRuntime().availableProcessors() + "," - + "threads=" + Thread.activeCount() + "," - + "free=" + Runtime.getRuntime().freeMemory() + "," - + "total=" + Runtime.getRuntime().totalMemory()); - System.err.println("NOTE: All tests run in this JVM: " + Arrays.toString(testClassesRun.toArray())); + System.err.println( + "NOTE: " + + (System.getProperty("os.name") + " ") + + (System.getProperty("os.version") + " ") + + (System.getProperty("os.arch") + "/" + System.getProperty("java.vendor")) + + (" " + System.getProperty("java.version")) + + (" " + + (Constants.JRE_IS_64BIT ? "(64-bit)" : "(32-bit)") + + "/" + + "cpus=" + + Runtime.getRuntime().availableProcessors() + + ",") + + ("threads=" + Thread.activeCount() + ",") + + ("free=" + Runtime.getRuntime().freeMemory() + ",") + + ("total=" + Runtime.getRuntime().totalMemory())); + System.err.println( + "NOTE: All tests run in this JVM: " + Arrays.toString(testClassesRun.toArray())); } private void reportAdditionalFailureInfo(final String testName) { @@ -149,8 +150,9 @@ public final class RunListenerPrintReproduceInfo extends RunListener { return; } if (TEST_LINE_DOCS_FILE.endsWith(JENKINS_LARGE_LINE_DOCS_FILE)) { - System.err.println("NOTE: download the large Jenkins line-docs file by running " + - "'ant get-jenkins-line-docs' in the lucene directory."); + System.err.println( + "NOTE: download the large Jenkins line-docs file by running " + + "'ant get-jenkins-line-docs' in the lucene directory."); } final StringBuilder b = new StringBuilder(); @@ -178,12 +180,15 @@ public final class RunListenerPrintReproduceInfo extends RunListener { // Codec, postings, directories. if (!TEST_CODEC.equals("random")) addVmOpt(b, "tests.codec", TEST_CODEC); - if (!TEST_POSTINGSFORMAT.equals("random")) addVmOpt(b, "tests.postingsformat", TEST_POSTINGSFORMAT); - if (!TEST_DOCVALUESFORMAT.equals("random")) addVmOpt(b, "tests.docvaluesformat", TEST_DOCVALUESFORMAT); + if (!TEST_POSTINGSFORMAT.equals("random")) + addVmOpt(b, "tests.postingsformat", TEST_POSTINGSFORMAT); + if (!TEST_DOCVALUESFORMAT.equals("random")) + addVmOpt(b, "tests.docvaluesformat", TEST_DOCVALUESFORMAT); if (!TEST_DIRECTORY.equals("random")) addVmOpt(b, "tests.directory", TEST_DIRECTORY); // Environment. - if (!TEST_LINE_DOCS_FILE.equals(DEFAULT_LINE_DOCS_FILE)) addVmOpt(b, "tests.linedocsfile", TEST_LINE_DOCS_FILE); + if (!TEST_LINE_DOCS_FILE.equals(DEFAULT_LINE_DOCS_FILE)) + addVmOpt(b, "tests.linedocsfile", TEST_LINE_DOCS_FILE); if (classEnvRule != null && classEnvRule.isInitialized()) { addVmOpt(b, "tests.locale", classEnvRule.locale.toLanguageTag()); if (classEnvRule.timeZone != null) { @@ -203,8 +208,8 @@ public final class RunListenerPrintReproduceInfo extends RunListener { } /** - * Append a VM option (-Dkey=value) to a {@link StringBuilder}. Add quotes if - * spaces or other funky characters are detected. + * Append a VM option (-Dkey=value) to a {@link StringBuilder}. Add quotes if spaces or other + * funky characters are detected. */ static void addVmOpt(StringBuilder b, String key, Object value) { if (value == null) return; diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleAssertionsRequired.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleAssertionsRequired.java index baf8e32fe91..7663d5eb5f7 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleAssertionsRequired.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleAssertionsRequired.java @@ -20,9 +20,7 @@ import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; -/** - * Require assertions for Lucene/Solr packages. - */ +/** Require assertions for Lucene/Solr packages. */ public class TestRuleAssertionsRequired implements TestRule { @Override public Statement apply(final Statement base, final Description description) { @@ -40,7 +38,7 @@ public class TestRuleAssertionsRequired implements TestRule { } if (LuceneTestCase.TEST_ASSERTS_ENABLED) { msg += " but -Dtests.asserts=true"; - } else { + } else { msg += " but -Dtests.asserts=false"; } System.err.println(msg); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java index 515afd68534..29ba4e42777 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java @@ -17,14 +17,13 @@ package org.apache.lucene.util; import java.util.concurrent.atomic.AtomicReference; - import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; /** - * A {@link TestRule} that delegates to another {@link TestRule} via a delegate - * contained in a an {@link AtomicReference}. + * A {@link TestRule} that delegates to another {@link TestRule} via a delegate contained in a an + * {@link AtomicReference}. */ final class TestRuleDelegate implements TestRule { private AtomicReference delegate; diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleIgnoreAfterMaxFailures.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleIgnoreAfterMaxFailures.java index 34357617615..b0f0b3038ba 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleIgnoreAfterMaxFailures.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleIgnoreAfterMaxFailures.java @@ -16,34 +16,29 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.junit.Assert; import org.junit.internal.AssumptionViolatedException; import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; -import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.annotations.Repeat; - /** - * This rule keeps a count of failed tests (suites) and will result in an - * {@link AssumptionViolatedException} after a given number of failures for all - * tests following this condition. - * - *

    - * Aborting quickly on failed tests can be useful when used in combination with - * test repeats (via the {@link Repeat} annotation or system property). + * This rule keeps a count of failed tests (suites) and will result in an {@link + * AssumptionViolatedException} after a given number of failures for all tests following this + * condition. + * + *

    Aborting quickly on failed tests can be useful when used in combination with test repeats (via + * the {@link Repeat} annotation or system property). */ public final class TestRuleIgnoreAfterMaxFailures implements TestRule { - /** - * Maximum failures. Package scope for tests. - */ + /** Maximum failures. Package scope for tests. */ int maxFailures; - + /** - * @param maxFailures - * The number of failures after which all tests are ignored. Must be - * greater or equal 1. + * @param maxFailures The number of failures after which all tests are ignored. Must be greater or + * equal 1. */ public TestRuleIgnoreAfterMaxFailures(int maxFailures) { Assert.assertTrue("maxFailures must be >= 1: " + maxFailures, maxFailures >= 1); @@ -57,8 +52,9 @@ public final class TestRuleIgnoreAfterMaxFailures implements TestRule { public void evaluate() throws Throwable { int failuresSoFar = FailureMarker.getFailures(); if (failuresSoFar >= maxFailures) { - RandomizedTest.assumeTrue("Ignored, failures limit reached (" + - failuresSoFar + " >= " + maxFailures + ").", false); + RandomizedTest.assumeTrue( + "Ignored, failures limit reached (" + failuresSoFar + " >= " + maxFailures + ").", + false); } s.evaluate(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleIgnoreTestSuites.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleIgnoreTestSuites.java index bf4805eb41d..7182e34b7b3 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleIgnoreTestSuites.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleIgnoreTestSuites.java @@ -21,44 +21,35 @@ import org.junit.runner.Description; import org.junit.runners.model.Statement; /** - * This rule will cause the suite to be assumption-ignored if - * the test class implements a given marker interface and a special - * property is not set. - * - *

    This is a workaround for problems with certain JUnit containers (IntelliJ) - * which automatically discover test suites and attempt to run nested classes - * that we use for testing the test framework itself. + * This rule will cause the suite to be assumption-ignored if the test class implements a given + * marker interface and a special property is not set. + * + *

    This is a workaround for problems with certain JUnit containers (IntelliJ) which automatically + * discover test suites and attempt to run nested classes that we use for testing the test framework + * itself. */ public final class TestRuleIgnoreTestSuites implements TestRule { - /** - * Marker interface for nested suites that should be ignored - * if executed in stand-alone mode. - */ + /** Marker interface for nested suites that should be ignored if executed in stand-alone mode. */ public static interface NestedTestSuite {} - - /** - * A boolean system property indicating nested suites should be executed - * normally. - */ - public final static String PROPERTY_RUN_NESTED = "tests.runnested"; - + + /** A boolean system property indicating nested suites should be executed normally. */ + public static final String PROPERTY_RUN_NESTED = "tests.runnested"; + @Override public Statement apply(final Statement s, final Description d) { return new Statement() { @Override public void evaluate() throws Throwable { if (NestedTestSuite.class.isAssignableFrom(d.getTestClass())) { - LuceneTestCase.assumeTrue("Nested suite class ignored (started as stand-alone).", - isRunningNested()); + LuceneTestCase.assumeTrue( + "Nested suite class ignored (started as stand-alone).", isRunningNested()); } s.evaluate(); } }; } - /** - * Check if a suite class is running as a nested test. - */ + /** Check if a suite class is running as a nested test. */ public static boolean isRunningNested() { return Boolean.getBoolean(PROPERTY_RUN_NESTED); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleLimitSysouts.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleLimitSysouts.java index 029f3174c88..cfa8689ed1a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleLimitSysouts.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleLimitSysouts.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; @@ -32,21 +34,18 @@ import java.util.List; import java.util.Locale; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; - -import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import org.apache.lucene.util.LuceneTestCase.Monster; import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks; - /** * This test rule serves two purposes: - *

      - *
    • it fails the test if it prints too much to stdout and stderr (tests that chatter too much - * are discouraged)
    • - *
    • the rule ensures an absolute hard limit of stuff written to stdout and stderr to prevent - * accidental infinite loops from filling all available disk space with persisted output.
    • - *
    + * + *
      + *
    • it fails the test if it prints too much to stdout and stderr (tests that chatter too much + * are discouraged) + *
    • the rule ensures an absolute hard limit of stuff written to stdout and stderr to prevent + * accidental infinite loops from filling all available disk space with persisted output. + *
    * * The rule is not enforced for certain test types (see {@link #isEnforced()}). */ @@ -56,25 +55,20 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { private static final long GB = MB * 1024; /** - * Max limit of bytes printed to either {@link System#out} or {@link System#err}. - * This limit is enforced per-class (suite). + * Max limit of bytes printed to either {@link System#out} or {@link System#err}. This limit is + * enforced per-class (suite). */ - public final static long DEFAULT_LIMIT = 8 * KB; + public static final long DEFAULT_LIMIT = 8 * KB; + + /** Max hard limit of sysout bytes. */ + public static final long DEFAULT_HARD_LIMIT = 2 * GB; /** - * Max hard limit of sysout bytes. + * Maximum limit allowed for {@link Limit#bytes()} before sysout check suppression is suggested. */ - public final static long DEFAULT_HARD_LIMIT = 2 * GB; + public static final int MAX_LIMIT = 1 * 1024 * 1024; - /** - * Maximum limit allowed for {@link Limit#bytes()} before sysout check suppression - * is suggested. - */ - public final static int MAX_LIMIT = 1 * 1024 * 1024; - - /** - * An annotation specifying the limit of bytes per class. - */ + /** An annotation specifying the limit of bytes per class. */ @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @@ -87,27 +81,26 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { long bytes(); /** - * Maximum number of bytes passed to actual stdout or stderr. Any writes beyond this limit will be - * ignored (will actually cause an IOException on the underlying output, but this is silently ignored - * by PrintStreams). + * Maximum number of bytes passed to actual stdout or stderr. Any writes beyond this limit will + * be ignored (will actually cause an IOException on the underlying output, but this is silently + * ignored by PrintStreams). */ long hardLimit() default DEFAULT_HARD_LIMIT; } - private final static AtomicLong bytesWritten = new AtomicLong(); + private static final AtomicLong bytesWritten = new AtomicLong(); - private final static PrintStream capturedSystemOut; - private final static PrintStream capturedSystemErr; + private static final PrintStream capturedSystemOut; + private static final PrintStream capturedSystemErr; - private final static AtomicLong hardLimit; + private static final AtomicLong hardLimit; /** - * We capture system output and error streams as early as possible because - * certain components (like the Java logging system) steal these references and - * never refresh them. - * - * Also, for this exact reason, we cannot change delegate streams for every suite. - * This isn't as elegant as it should be, but there's no workaround for this. + * We capture system output and error streams as early as possible because certain components + * (like the Java logging system) steal these references and never refresh them. + * + *

    Also, for this exact reason, we cannot change delegate streams for every suite. This isn't + * as elegant as it should be, but there's no workaround for this. */ static { PrintStream sout = System.out; @@ -117,22 +110,25 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { serr.flush(); hardLimit = new AtomicLong(Integer.MAX_VALUE); - LimitPredicate limitCheck = (before, after) -> { - long limit = hardLimit.get(); - if (after > limit) { - if (before < limit) { - // Crossing the boundary. Write directly to stderr. - serr.println("\nNOTE: Hard limit on sysout exceeded, further output truncated.\n"); - serr.flush(); - } - throw new IOException("Hard limit on sysout exceeded."); - } - }; + LimitPredicate limitCheck = + (before, after) -> { + long limit = hardLimit.get(); + if (after > limit) { + if (before < limit) { + // Crossing the boundary. Write directly to stderr. + serr.println("\nNOTE: Hard limit on sysout exceeded, further output truncated.\n"); + serr.flush(); + } + throw new IOException("Hard limit on sysout exceeded."); + } + }; final String csn = Charset.defaultCharset().name(); try { - capturedSystemOut = new PrintStream(new DelegateStream(sout, bytesWritten, limitCheck), true, csn); - capturedSystemErr = new PrintStream(new DelegateStream(serr, bytesWritten, limitCheck), true, csn); + capturedSystemOut = + new PrintStream(new DelegateStream(sout, bytesWritten, limitCheck), true, csn); + capturedSystemErr = + new PrintStream(new DelegateStream(serr, bytesWritten, limitCheck), true, csn); } catch (UnsupportedEncodingException e) { throw new UncheckedIOException(e); } @@ -141,9 +137,7 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { System.setErr(capturedSystemErr); } - /** - * Test failures from any tests or rules before. - */ + /** Test failures from any tests or rules before. */ private final TestRuleMarkFailure failureMarker; static interface LimitPredicate { @@ -151,15 +145,16 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { } /** - * Tracks the number of bytes written to an underlying stream by - * incrementing an {@link AtomicInteger}. + * Tracks the number of bytes written to an underlying stream by incrementing an {@link + * AtomicInteger}. */ - final static class DelegateStream extends OutputStream { + static final class DelegateStream extends OutputStream { private final OutputStream delegate; private final LimitPredicate limitPredicate; private final AtomicLong bytesCounter; - public DelegateStream(OutputStream delegate, AtomicLong bytesCounter, LimitPredicate limitPredicate) { + public DelegateStream( + OutputStream delegate, AtomicLong bytesCounter, LimitPredicate limitPredicate) { this.delegate = delegate; this.bytesCounter = bytesCounter; this.limitPredicate = limitPredicate; @@ -169,7 +164,7 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { public void write(byte[] b) throws IOException { this.write(b, 0, b.length); } - + @Override public void write(byte[] b, int off, int len) throws IOException { if (len > 0) { @@ -221,38 +216,43 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { Limit limitAnn = target.getAnnotation(Limit.class); long bytes = limitAnn.bytes(); if (bytes < 0 || bytes > MAX_LIMIT) { - throw new AssertionError("This sysout limit is very high: " + bytes + ". Did you want to use " - + "@" + LuceneTestCase.SuppressSysoutChecks.class.getName() + " annotation to " - + "avoid sysout checks entirely (this is discouraged)?"); + throw new AssertionError( + "This sysout limit is very high: " + + bytes + + ". Did you want to use " + + "@" + + LuceneTestCase.SuppressSysoutChecks.class.getName() + + " annotation to " + + "avoid sysout checks entirely (this is discouraged)?"); } hardLimit.set(limitAnn.hardLimit()); } } - /** - * Ensures {@link System#out} and {@link System#err} point to delegate streams. - */ + /** Ensures {@link System#out} and {@link System#err} point to delegate streams. */ public static void checkCaptureStreams() { // Make sure we still hold the right references to wrapper streams. if (System.out != capturedSystemOut) { - throw new AssertionError("Something has changed System.out to: " + System.out.getClass().getName()); + throw new AssertionError( + "Something has changed System.out to: " + System.out.getClass().getName()); } if (System.err != capturedSystemErr) { - throw new AssertionError("Something has changed System.err to: " + System.err.getClass().getName()); + throw new AssertionError( + "Something has changed System.err to: " + System.err.getClass().getName()); } } protected boolean isEnforced() { Class target = RandomizedTest.getContext().getTargetClass(); - if (LuceneTestCase.VERBOSE || - LuceneTestCase.INFOSTREAM || - target.isAnnotationPresent(Monster.class) || - target.isAnnotationPresent(SuppressSysoutChecks.class)) { + if (LuceneTestCase.VERBOSE + || LuceneTestCase.INFOSTREAM + || target.isAnnotationPresent(Monster.class) + || target.isAnnotationPresent(SuppressSysoutChecks.class)) { return false; } - + if (!target.isAnnotationPresent(Limit.class)) { return false; } @@ -261,33 +261,35 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { } /** - * We're only interested in failing the suite if it was successful (otherwise - * just propagate the original problem and don't bother doing anything else). + * We're only interested in failing the suite if it was successful (otherwise just propagate the + * original problem and don't bother doing anything else). */ @Override protected void afterIfSuccessful() throws Throwable { if (isEnforced()) { checkCaptureStreams(); - + // Flush any buffers. capturedSystemOut.flush(); capturedSystemErr.flush(); - + // Check for offenders, but only if everything was successful so far. Limit ann = RandomizedTest.getContext().getTargetClass().getAnnotation(Limit.class); long limit = ann.bytes(); long hardLimit = ann.hardLimit(); long written = bytesWritten.get(); if (written >= limit && failureMarker.wasSuccessful()) { - throw new AssertionError(String.format(Locale.ENGLISH, - "The test or suite printed %d bytes to stdout and stderr," + - " even though the limit was set to %d bytes.%s Increase the limit with @%s, ignore it completely" + - " with @%s or run with -Dtests.verbose=true", - written, - limit, - written <= hardLimit ? "" : "Hard limit was enforced so output is truncated.", - Limit.class.getSimpleName(), - SuppressSysoutChecks.class.getSimpleName())); + throw new AssertionError( + String.format( + Locale.ENGLISH, + "The test or suite printed %d bytes to stdout and stderr," + + " even though the limit was set to %d bytes.%s Increase the limit with @%s, ignore it completely" + + " with @%s or run with -Dtests.verbose=true", + written, + limit, + written <= hardLimit ? "" : "Hard limit was enforced so output is truncated.", + Limit.class.getSimpleName(), + SuppressSysoutChecks.class.getSimpleName())); } } } @@ -304,4 +306,3 @@ public class TestRuleLimitSysouts extends TestRuleAdapter { hardLimit.set(Integer.MAX_VALUE); } } - diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleMarkFailure.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleMarkFailure.java index 11558db4bee..e706af2803c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleMarkFailure.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleMarkFailure.java @@ -18,23 +18,20 @@ package org.apache.lucene.util; import java.util.ArrayList; import java.util.List; - import org.junit.internal.AssumptionViolatedException; import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; -/** - * A rule for marking failed tests and suites. - */ +/** A rule for marking failed tests and suites. */ public final class TestRuleMarkFailure implements TestRule { - private final TestRuleMarkFailure [] chained; + private final TestRuleMarkFailure[] chained; private volatile boolean failures; public TestRuleMarkFailure(TestRuleMarkFailure... chained) { this.chained = chained; } - + @Override public Statement apply(final Statement s, Description d) { return new Statement() { @@ -56,8 +53,7 @@ public final class TestRuleMarkFailure implements TestRule { } /** - * Is a given exception (or a MultipleFailureException) an - * {@link AssumptionViolatedException}? + * Is a given exception (or a MultipleFailureException) an {@link AssumptionViolatedException}? */ public static boolean isAssumption(Throwable t) { for (Throwable t2 : expandFromMultiple(t)) { @@ -68,9 +64,7 @@ public final class TestRuleMarkFailure implements TestRule { return true; } - /** - * Expand from multi-exception wrappers. - */ + /** Expand from multi-exception wrappers. */ private static List expandFromMultiple(Throwable t) { return expandFromMultiple(t, new ArrayList()); } @@ -88,9 +82,7 @@ public final class TestRuleMarkFailure implements TestRule { return list; } - /** - * Taints this object and any chained as having failures. - */ + /** Taints this object and any chained as having failures. */ public void markFailed() { failures = true; for (TestRuleMarkFailure next : chained) { @@ -98,16 +90,12 @@ public final class TestRuleMarkFailure implements TestRule { } } - /** - * Check if this object had any marked failures. - */ + /** Check if this object had any marked failures. */ public boolean hadFailures() { return failures; } - /** - * Check if this object was successful (the opposite of {@link #hadFailures()}). - */ + /** Check if this object was successful (the opposite of {@link #hadFailures()}). */ public boolean wasSuccessful() { return !hadFailures(); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleRestoreSystemProperties.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleRestoreSystemProperties.java index c554fefa19e..23ff9b58f8f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleRestoreSystemProperties.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleRestoreSystemProperties.java @@ -16,30 +16,28 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule; +import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import java.util.HashMap; import java.util.List; import java.util.Map; -import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule; -import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; - /** - * Restore a given set of system properties to a snapshot taken at the beginning - * of the rule. - * - * This is semantically similar to {@link SystemPropertiesRestoreRule} but - * the list of properties to restore must be provided explicitly (because the security - * manager prevents us from accessing the whole set of properties). - * - * All properties to be restored must have r/w property permission. + * Restore a given set of system properties to a snapshot taken at the beginning of the rule. + * + *

    This is semantically similar to {@link SystemPropertiesRestoreRule} but the list of properties + * to restore must be provided explicitly (because the security manager prevents us from accessing + * the whole set of properties). + * + *

    All properties to be restored must have r/w property permission. */ -public class TestRuleRestoreSystemProperties extends TestRuleAdapter { +public class TestRuleRestoreSystemProperties extends TestRuleAdapter { private final String[] propertyNames; private final Map restore = new HashMap(); public TestRuleRestoreSystemProperties(String... propertyNames) { this.propertyNames = propertyNames; - + if (propertyNames.length == 0) { throw new IllegalArgumentException("No properties to restore? Odd."); } @@ -54,7 +52,7 @@ public class TestRuleRestoreSystemProperties extends TestRuleAdapter { restore.put(key, System.getProperty(key)); } } - + @Override protected void afterAlways(List errors) throws Throwable { for (String key : propertyNames) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java index 1d83913b976..feaafe2f76d 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java @@ -16,15 +16,25 @@ */ package org.apache.lucene.util; +import static org.apache.lucene.util.LuceneTestCase.INFOSTREAM; +import static org.apache.lucene.util.LuceneTestCase.TEST_CODEC; +import static org.apache.lucene.util.LuceneTestCase.TEST_DOCVALUESFORMAT; +import static org.apache.lucene.util.LuceneTestCase.TEST_POSTINGSFORMAT; +import static org.apache.lucene.util.LuceneTestCase.VERBOSE; +import static org.apache.lucene.util.LuceneTestCase.assumeFalse; +import static org.apache.lucene.util.LuceneTestCase.localeForLanguageTag; +import static org.apache.lucene.util.LuceneTestCase.random; +import static org.apache.lucene.util.LuceneTestCase.randomLocale; +import static org.apache.lucene.util.LuceneTestCase.randomTimeZone; + +import com.carrotsearch.randomizedtesting.RandomizedContext; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import java.io.PrintStream; import java.util.Arrays; import java.util.HashSet; import java.util.Locale; import java.util.Random; import java.util.TimeZone; - -import com.carrotsearch.randomizedtesting.RandomizedContext; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; @@ -44,21 +54,7 @@ import org.apache.lucene.util.LuceneTestCase.LiveIWCFlushMode; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.junit.internal.AssumptionViolatedException; -import static org.apache.lucene.util.LuceneTestCase.INFOSTREAM; -import static org.apache.lucene.util.LuceneTestCase.TEST_CODEC; -import static org.apache.lucene.util.LuceneTestCase.TEST_DOCVALUESFORMAT; -import static org.apache.lucene.util.LuceneTestCase.TEST_POSTINGSFORMAT; -import static org.apache.lucene.util.LuceneTestCase.VERBOSE; -import static org.apache.lucene.util.LuceneTestCase.assumeFalse; -import static org.apache.lucene.util.LuceneTestCase.localeForLanguageTag; -import static org.apache.lucene.util.LuceneTestCase.random; -import static org.apache.lucene.util.LuceneTestCase.randomLocale; -import static org.apache.lucene.util.LuceneTestCase.randomTimeZone; - -/** - * Setup and restore suite-level environment (fine grained junk that - * doesn't fit anywhere else). - */ +/** Setup and restore suite-level environment (fine grained junk that doesn't fit anywhere else). */ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { private Codec savedCodec; private Locale savedLocale; @@ -70,14 +66,10 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { Similarity similarity; Codec codec; - /** - * Indicates whether the rule has executed its {@link #before()} method fully. - */ + /** Indicates whether the rule has executed its {@link #before()} method fully. */ private boolean initialized; - /** - * @see SuppressCodecs - */ + /** @see SuppressCodecs */ HashSet avoidCodecs; static class ThreadNameFixingPrintStreamInfoStream extends PrintStreamInfoStream { @@ -98,10 +90,11 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { } else { name = Thread.currentThread().getName(); } - stream.println(component + " " + messageID + " [" + getTimestamp() + "; " + name + "]: " + message); + stream.println( + component + " " + messageID + " [" + getTimestamp() + "; " + name + "]: " + message); } } - + public boolean isInitialized() { return initialized; } @@ -136,15 +129,16 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { SuppressCodecs a = targetClass.getAnnotation(SuppressCodecs.class); avoidCodecs.addAll(Arrays.asList(a.value())); } - + savedCodec = Codec.getDefault(); int randomVal = random.nextInt(11); if ("default".equals(TEST_CODEC)) { codec = savedCodec; // just use the default, don't randomize - } else if (("random".equals(TEST_POSTINGSFORMAT) == false) || ("random".equals(TEST_DOCVALUESFORMAT) == false)) { + } else if (("random".equals(TEST_POSTINGSFORMAT) == false) + || ("random".equals(TEST_DOCVALUESFORMAT) == false)) { // the user wired postings or DV: this is messy // refactor into RandomCodec.... - + final PostingsFormat format; if ("random".equals(TEST_POSTINGSFORMAT)) { format = new AssertingPostingsFormat(); @@ -153,42 +147,53 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { } else { format = PostingsFormat.forName(TEST_POSTINGSFORMAT); } - + final DocValuesFormat dvFormat; if ("random".equals(TEST_DOCVALUESFORMAT)) { dvFormat = new AssertingDocValuesFormat(); } else { dvFormat = DocValuesFormat.forName(TEST_DOCVALUESFORMAT); } - - codec = new AssertingCodec() { - @Override - public PostingsFormat getPostingsFormatForField(String field) { - return format; - } - @Override - public DocValuesFormat getDocValuesFormatForField(String field) { - return dvFormat; - } + codec = + new AssertingCodec() { + @Override + public PostingsFormat getPostingsFormatForField(String field) { + return format; + } - @Override - public String toString() { - return super.toString() + ": " + format.toString() + ", " + dvFormat.toString(); - } - }; - } else if ("SimpleText".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 9 && LuceneTestCase.rarely(random) && !shouldAvoidCodec("SimpleText"))) { + @Override + public DocValuesFormat getDocValuesFormatForField(String field) { + return dvFormat; + } + + @Override + public String toString() { + return super.toString() + ": " + format.toString() + ", " + dvFormat.toString(); + } + }; + } else if ("SimpleText".equals(TEST_CODEC) + || ("random".equals(TEST_CODEC) + && randomVal == 9 + && LuceneTestCase.rarely(random) + && !shouldAvoidCodec("SimpleText"))) { codec = new SimpleTextCodec(); - } else if ("CheapBastard".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 8 && !shouldAvoidCodec("CheapBastard") && !shouldAvoidCodec("Lucene41"))) { + } else if ("CheapBastard".equals(TEST_CODEC) + || ("random".equals(TEST_CODEC) + && randomVal == 8 + && !shouldAvoidCodec("CheapBastard") + && !shouldAvoidCodec("Lucene41"))) { // we also avoid this codec if Lucene41 is avoided, since thats the postings format it uses. codec = new CheapBastardCodec(); - } else if ("Asserting".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 7 && !shouldAvoidCodec("Asserting"))) { + } else if ("Asserting".equals(TEST_CODEC) + || ("random".equals(TEST_CODEC) && randomVal == 7 && !shouldAvoidCodec("Asserting"))) { codec = new AssertingCodec(); - } else if ("Compressing".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 6 && !shouldAvoidCodec("Compressing"))) { + } else if ("Compressing".equals(TEST_CODEC) + || ("random".equals(TEST_CODEC) && randomVal == 6 && !shouldAvoidCodec("Compressing"))) { codec = CompressingCodec.randomInstance(random); - } else if ("Lucene90".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 5 && !shouldAvoidCodec("Lucene90"))) { - codec = new Lucene90Codec(RandomPicks.randomFrom(random, Lucene90Codec.Mode.values()) - ); + } else if ("Lucene90".equals(TEST_CODEC) + || ("random".equals(TEST_CODEC) && randomVal == 5 && !shouldAvoidCodec("Lucene90"))) { + codec = new Lucene90Codec(RandomPicks.randomFrom(random, Lucene90Codec.Mode.values())); } else if (!"random".equals(TEST_CODEC)) { codec = Codec.forName(TEST_CODEC); } else if ("random".equals(TEST_POSTINGSFORMAT)) { @@ -218,26 +223,31 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { try { checkCodecRestrictions(codec); } catch (AssumptionViolatedException e) { - System.err.println("NOTE: " + e.getMessage() + " Suppressed codecs: " + - Arrays.toString(avoidCodecs.toArray())); + System.err.println( + "NOTE: " + + e.getMessage() + + " Suppressed codecs: " + + Arrays.toString(avoidCodecs.toArray())); throw e; } - // We have "stickiness" so that sometimes all we do is vary the RAM buffer size, other times just the doc count to flush by, else both. - // This way the assertMemory in DocumentsWriterFlushControl sometimes runs (when we always flush by RAM). + // We have "stickiness" so that sometimes all we do is vary the RAM buffer size, other times + // just the doc count to flush by, else both. + // This way the assertMemory in DocumentsWriterFlushControl sometimes runs (when we always flush + // by RAM). LiveIWCFlushMode flushMode; switch (random().nextInt(3)) { - case 0: - flushMode = LiveIWCFlushMode.BY_RAM; - break; - case 1: - flushMode = LiveIWCFlushMode.BY_DOCS; - break; - case 2: - flushMode = LiveIWCFlushMode.EITHER; - break; - default: - throw new AssertionError(); + case 0: + flushMode = LiveIWCFlushMode.BY_RAM; + break; + case 1: + flushMode = LiveIWCFlushMode.BY_DOCS; + break; + case 2: + flushMode = LiveIWCFlushMode.EITHER; + break; + default: + throw new AssertionError(); } LuceneTestCase.setLiveIWCFlushMode(flushMode); @@ -247,31 +257,32 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { /** * Check codec restrictions. - * + * * @throws AssumptionViolatedException if the class does not work with a given codec. */ private void checkCodecRestrictions(Codec codec) { - assumeFalse("Class not allowed to use codec: " + codec.getName() + ".", + assumeFalse( + "Class not allowed to use codec: " + codec.getName() + ".", shouldAvoidCodec(codec.getName())); if (codec instanceof RandomCodec && !avoidCodecs.isEmpty()) { - for (String name : ((RandomCodec)codec).formatNames) { - assumeFalse("Class not allowed to use postings format: " + name + ".", - shouldAvoidCodec(name)); + for (String name : ((RandomCodec) codec).formatNames) { + assumeFalse( + "Class not allowed to use postings format: " + name + ".", shouldAvoidCodec(name)); } } PostingsFormat pf = codec.postingsFormat(); - assumeFalse("Class not allowed to use postings format: " + pf.getName() + ".", + assumeFalse( + "Class not allowed to use postings format: " + pf.getName() + ".", shouldAvoidCodec(pf.getName())); - assumeFalse("Class not allowed to use postings format: " + LuceneTestCase.TEST_POSTINGSFORMAT + ".", + assumeFalse( + "Class not allowed to use postings format: " + LuceneTestCase.TEST_POSTINGSFORMAT + ".", shouldAvoidCodec(LuceneTestCase.TEST_POSTINGSFORMAT)); } - /** - * After suite cleanup (always invoked). - */ + /** After suite cleanup (always invoked). */ @Override protected void after() throws Exception { Codec.setDefault(savedCodec); @@ -280,9 +291,7 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { if (savedTimeZone != null) TimeZone.setDefault(savedTimeZone); } - /** - * Should a given codec be avoided for the currently executing suite? - */ + /** Should a given codec be avoided for the currently executing suite? */ private boolean shouldAvoidCodec(String codec) { return !avoidCodecs.isEmpty() && avoidCodecs.contains(codec); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreInstanceEnv.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreInstanceEnv.java index b5128f389a0..c97a05d97d1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreInstanceEnv.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreInstanceEnv.java @@ -19,8 +19,8 @@ package org.apache.lucene.util; import org.apache.lucene.search.IndexSearcher; /** - * Prepares and restores {@link LuceneTestCase} at instance level - * (fine grained junk that doesn't fit anywhere else). + * Prepares and restores {@link LuceneTestCase} at instance level (fine grained junk that doesn't + * fit anywhere else). */ final class TestRuleSetupAndRestoreInstanceEnv extends AbstractBeforeAfterRule { private int savedBoolMaxClauseCount; diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupTeardownChained.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupTeardownChained.java index a169269b0bf..8871bdf9ee4 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupTeardownChained.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupTeardownChained.java @@ -22,21 +22,16 @@ import org.junit.runner.Description; import org.junit.runners.model.Statement; /** - * Make sure {@link LuceneTestCase#setUp()} and {@link LuceneTestCase#tearDown()} were invoked even if they - * have been overriden. We assume nobody will call these out of non-overriden - * methods (they have to be public by contract, unfortunately). The top-level - * methods just set a flag that is checked upon successful execution of each test - * case. + * Make sure {@link LuceneTestCase#setUp()} and {@link LuceneTestCase#tearDown()} were invoked even + * if they have been overriden. We assume nobody will call these out of non-overriden methods (they + * have to be public by contract, unfortunately). The top-level methods just set a flag that is + * checked upon successful execution of each test case. */ class TestRuleSetupTeardownChained implements TestRule { - /** - * @see TestRuleSetupTeardownChained - */ + /** @see TestRuleSetupTeardownChained */ public boolean setupCalled; - /** - * @see TestRuleSetupTeardownChained - */ + /** @see TestRuleSetupTeardownChained */ public boolean teardownCalled; @Override @@ -50,13 +45,13 @@ class TestRuleSetupTeardownChained implements TestRule { // I assume we don't want to check teardown chaining if something happens in the // test because this would obscure the original exception? - if (!setupCalled) { + if (!setupCalled) { Assert.fail("One of the overrides of setUp does not propagate the call."); } - if (!teardownCalled) { + if (!teardownCalled) { Assert.fail("One of the overrides of tearDown does not propagate the call."); } } }; } -} \ No newline at end of file +} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleStoreClassName.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleStoreClassName.java index 8e3332f9fee..6ed86333cc3 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleStoreClassName.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleStoreClassName.java @@ -20,10 +20,7 @@ import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; -/** - * Stores the suite name so you can retrieve it - * from {@link #getTestClass()} - */ +/** Stores the suite name so you can retrieve it from {@link #getTestClass()} */ public class TestRuleStoreClassName implements TestRule { private volatile Description description; @@ -37,7 +34,7 @@ public class TestRuleStoreClassName implements TestRule { @Override public void evaluate() throws Throwable { try { - description = d; + description = d; s.evaluate(); } finally { description = null; @@ -45,10 +42,8 @@ public class TestRuleStoreClassName implements TestRule { } }; } - - /** - * Returns the test class currently executing in this rule. - */ + + /** Returns the test class currently executing in this rule. */ public Class getTestClass() { Description localDescription = description; if (localDescription == null) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleTemporaryFilesCleanup.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleTemporaryFilesCleanup.java index 48675548540..56ce8c72ec7 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleTemporaryFilesCleanup.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleTemporaryFilesCleanup.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.RandomizedContext; +import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import java.io.IOException; import java.net.URI; import java.nio.file.FileSystem; @@ -31,7 +33,6 @@ import java.util.List; import java.util.Locale; import java.util.Random; import java.util.Set; - import org.apache.lucene.mockfile.DisableFsyncFS; import org.apache.lucene.mockfile.ExtrasFS; import org.apache.lucene.mockfile.HandleLimitFS; @@ -43,55 +44,40 @@ import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems; import org.apache.lucene.util.LuceneTestCase.SuppressFsync; import org.apache.lucene.util.LuceneTestCase.SuppressTempFileChecks; -import com.carrotsearch.randomizedtesting.RandomizedContext; -import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; - /** * Checks and cleans up temporary files. - * + * * @see LuceneTestCase#createTempDir() * @see LuceneTestCase#createTempFile() */ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { - /** - * Retry to create temporary file name this many times. - */ + /** Retry to create temporary file name this many times. */ private static final int TEMP_NAME_RETRY_THRESHOLD = 9999; - /** - * Writeable temporary base folder. - */ + /** Writeable temporary base folder. */ private Path javaTempDir; - /** - * Per-test class temporary folder. - */ + /** Per-test class temporary folder. */ private Path tempDirBase; - - /** - * Per-test filesystem - */ + + /** Per-test filesystem */ private FileSystem fileSystem; - /** - * Suite failure marker. - */ + /** Suite failure marker. */ private final TestRuleMarkFailure failureMarker; /** - * A queue of temporary resources to be removed after the - * suite completes. + * A queue of temporary resources to be removed after the suite completes. + * * @see #registerToRemoveAfterSuite(Path) */ - private final static List cleanupQueue = new ArrayList(); + private static final List cleanupQueue = new ArrayList(); public TestRuleTemporaryFilesCleanup(TestRuleMarkFailure failureMarker) { this.failureMarker = failureMarker; } - /** - * Register temporary folder for removal after the suite completes. - */ + /** Register temporary folder for removal after the suite completes. */ void registerToRemoveAfterSuite(Path f) { assert f != null; @@ -113,11 +99,11 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { fileSystem = initializeFileSystem(); javaTempDir = initializeJavaTempDir(); } - + // os/config-independent limit for too many open files // TODO: can we make this lower? private static final int MAX_OPEN_FILES = 2048; - + private boolean allowed(Set avoid, Class clazz) { if (avoid.contains("*") || avoid.contains(clazz.getSimpleName())) { return false; @@ -125,7 +111,7 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { return true; } } - + private FileSystem initializeFileSystem() { Class targetClass = RandomizedContext.current().getTargetClass(); Set avoid = new HashSet<>(); @@ -135,26 +121,31 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { } FileSystem fs = FileSystems.getDefault(); if (LuceneTestCase.VERBOSE && allowed(avoid, VerboseFS.class)) { - fs = new VerboseFS(fs, new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream(System.out)).getFileSystem(null); + fs = + new VerboseFS( + fs, + new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream( + System.out)) + .getFileSystem(null); } - + Random random = RandomizedContext.current().getRandom(); - + // speed up tests by omitting actual fsync calls to the hardware most of the time. if (targetClass.isAnnotationPresent(SuppressFsync.class) || random.nextInt(100) > 0) { if (allowed(avoid, DisableFsyncFS.class)) { fs = new DisableFsyncFS(fs).getFileSystem(null); } } - + // impacts test reproducibility across platforms. if (random.nextInt(100) > 0) { if (allowed(avoid, ShuffleFS.class)) { fs = new ShuffleFS(fs, random.nextLong()).getFileSystem(null); } } - - // otherwise, wrap with mockfilesystems for additional checks. some + + // otherwise, wrap with mockfilesystems for additional checks. some // of these have side effects (e.g. concurrency) so it doesn't always happen. if (random.nextInt(10) > 0) { if (allowed(avoid, LeakFS.class)) { @@ -182,12 +173,12 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { } private Path initializeJavaTempDir() throws IOException { - Path javaTempDir = fileSystem.getPath(System.getProperty("tempDir", System.getProperty("java.io.tmpdir"))); - + Path javaTempDir = + fileSystem.getPath(System.getProperty("tempDir", System.getProperty("java.io.tmpdir"))); + Files.createDirectories(javaTempDir); - assert Files.isDirectory(javaTempDir) && - Files.isWritable(javaTempDir); + assert Files.isDirectory(javaTempDir) && Files.isWritable(javaTempDir); return javaTempDir.toRealPath(); } @@ -195,14 +186,14 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { @Override protected void afterAlways(List errors) throws Throwable { // Drain cleanup queue and clear it. - final Path [] everything; + final Path[] everything; final String tempDirBasePath; synchronized (cleanupQueue) { tempDirBasePath = (tempDirBase != null ? tempDirBase.toAbsolutePath().toString() : null); tempDirBase = null; Collections.reverse(cleanupQueue); - everything = new Path [cleanupQueue.size()]; + everything = new Path[cleanupQueue.size()]; cleanupQueue.toArray(everything); cleanupQueue.clear(); } @@ -217,9 +208,11 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { } catch (IOException e) { Class suiteClass = RandomizedContext.current().getTargetClass(); if (suiteClass.isAnnotationPresent(SuppressTempFileChecks.class)) { - System.err.println("WARNING: Leftover undeleted temporary files (bugUrl: " - + suiteClass.getAnnotation(SuppressTempFileChecks.class).bugUrl() + "): " - + e.getMessage()); + System.err.println( + "WARNING: Leftover undeleted temporary files (bugUrl: " + + suiteClass.getAnnotation(SuppressTempFileChecks.class).bugUrl() + + "): " + + e.getMessage()); return; } throw e; @@ -233,7 +226,7 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { } } } - + Path getPerTestClassTempDir() { if (tempDirBase == null) { RandomizedContext ctx = RandomizedContext.current(); @@ -249,14 +242,20 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { if (attempt++ >= TEMP_NAME_RETRY_THRESHOLD) { throw new RuntimeException( "Failed to get a temporary name too many times, check your temp directory and consider manually cleaning it: " - + javaTempDir.toAbsolutePath()); + + javaTempDir.toAbsolutePath()); } - f = javaTempDir.resolve(prefix + "_" + ctx.getRunnerSeedAsString() - + "-" + String.format(Locale.ENGLISH, "%03d", attempt)); + f = + javaTempDir.resolve( + prefix + + "_" + + ctx.getRunnerSeedAsString() + + "-" + + String.format(Locale.ENGLISH, "%03d", attempt)); try { Files.createDirectory(f); success = true; - } catch (IOException ignore) {} + } catch (IOException ignore) { + } } while (!success); tempDirBase = f; @@ -264,10 +263,8 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { } return tempDirBase; } - - /** - * @see LuceneTestCase#createTempDir() - */ + + /** @see LuceneTestCase#createTempDir() */ public Path createTempDir(String prefix) { Path base = getPerTestClassTempDir(); @@ -278,22 +275,21 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { if (attempt++ >= TEMP_NAME_RETRY_THRESHOLD) { throw new RuntimeException( "Failed to get a temporary name too many times, check your temp directory and consider manually cleaning it: " - + base.toAbsolutePath()); + + base.toAbsolutePath()); } f = base.resolve(prefix + "-" + String.format(Locale.ENGLISH, "%03d", attempt)); try { Files.createDirectory(f); success = true; - } catch (IOException ignore) {} + } catch (IOException ignore) { + } } while (!success); registerToRemoveAfterSuite(f); return f; } - /** - * @see LuceneTestCase#createTempFile() - */ + /** @see LuceneTestCase#createTempFile() */ public Path createTempFile(String prefix, String suffix) throws IOException { Path base = getPerTestClassTempDir(); @@ -304,13 +300,14 @@ final class TestRuleTemporaryFilesCleanup extends TestRuleAdapter { if (attempt++ >= TEMP_NAME_RETRY_THRESHOLD) { throw new RuntimeException( "Failed to get a temporary name too many times, check your temp directory and consider manually cleaning it: " - + base.toAbsolutePath()); + + base.toAbsolutePath()); } f = base.resolve(prefix + "-" + String.format(Locale.ENGLISH, "%03d", attempt) + suffix); try { Files.createFile(f); success = true; - } catch (IOException ignore) {} + } catch (IOException ignore) { + } } while (!success); registerToRemoveAfterSuite(f); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleThreadAndTestName.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleThreadAndTestName.java index 059fcea6086..a9aef8e7529 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleThreadAndTestName.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleThreadAndTestName.java @@ -20,19 +20,16 @@ import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; -/** - * Saves the executing thread and method name of the test case. - */ +/** Saves the executing thread and method name of the test case. */ final class TestRuleThreadAndTestName implements TestRule { - /** + /** * The thread executing the current test case. + * * @see LuceneTestCase#isTestThread() */ public volatile Thread testCaseThread; - /** - * Test method name. - */ + /** Test method name. */ public volatile String testMethodName = ""; @Override @@ -53,4 +50,4 @@ final class TestRuleThreadAndTestName implements TestRule { } }; } -} \ No newline at end of file +} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestSecurityManager.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestSecurityManager.java index 2eaccbe6d91..622572b3e05 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestSecurityManager.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestSecurityManager.java @@ -21,51 +21,62 @@ import java.util.Locale; import java.util.function.Predicate; /** - * A {@link SecurityManager} that prevents tests calling {@link System#exit(int)}. - * Only the test runner itself is allowed to exit the JVM. - * All other security checks are handled by the default security policy. - *

    - * Use this with {@code -Djava.security.manager=org.apache.lucene.util.TestSecurityManager}. - */ + * A {@link SecurityManager} that prevents tests calling {@link System#exit(int)}. Only the test + * runner itself is allowed to exit the JVM. All other security checks are handled by the default + * security policy. + * + *

    Use this with {@code -Djava.security.manager=org.apache.lucene.util.TestSecurityManager}. + */ public final class TestSecurityManager extends SecurityManager { - + private static final String JUNIT4_TEST_RUNNER_PACKAGE = "com.carrotsearch.ant.tasks.junit4."; - private static final String ECLIPSE_TEST_RUNNER_PACKAGE = "org.eclipse.jdt.internal.junit.runner."; + private static final String ECLIPSE_TEST_RUNNER_PACKAGE = + "org.eclipse.jdt.internal.junit.runner."; private static final String IDEA_TEST_RUNNER_PACKAGE = "com.intellij.rt.execution.junit."; - private static final String GRADLE_TEST_RUNNER_PACKAGE = "worker.org.gradle.process.internal.worker."; + private static final String GRADLE_TEST_RUNNER_PACKAGE = + "worker.org.gradle.process.internal.worker."; private static final String SYSTEM_CLASS_NAME = System.class.getName(); private static final String RUNTIME_CLASS_NAME = Runtime.class.getName(); - + /** - * Creates a new TestSecurityManager. This ctor is called on JVM startup, - * when {@code -Djava.security.manager=org.apache.lucene.util.TestSecurityManager} - * is passed to JVM. + * Creates a new TestSecurityManager. This ctor is called on JVM startup, when {@code + * -Djava.security.manager=org.apache.lucene.util.TestSecurityManager} is passed to JVM. */ public TestSecurityManager() { super(); } - + /** * {@inheritDoc} - *

    This method inspects the stack trace and checks who is calling - * {@link System#exit(int)} and similar methods + * + *

    This method inspects the stack trace and checks who is calling {@link System#exit(int)} and + * similar methods + * * @throws SecurityException if the caller of this method is not the test runner itself. */ @Override public void checkExit(final int status) { - if (StackWalker.getInstance().walk(s -> s - .dropWhile(Predicate.not(TestSecurityManager::isExitStackFrame)) // skip all internal stack frames - .dropWhile(TestSecurityManager::isExitStackFrame) // skip all exit()/halt() stack frames - .limit(1) // only look at one more frame (caller of exit) - .map(StackFrame::getClassName) - .noneMatch(c -> c.startsWith(JUNIT4_TEST_RUNNER_PACKAGE) || - c.startsWith(ECLIPSE_TEST_RUNNER_PACKAGE) || - c.startsWith(IDEA_TEST_RUNNER_PACKAGE) || - c.startsWith(GRADLE_TEST_RUNNER_PACKAGE)))) { - throw new SecurityException(String.format(Locale.ENGLISH, - "System/Runtime.exit(%1$d) or halt(%1$d) calls are not allowed because they terminate the test runner's JVM.", - status)); + if (StackWalker.getInstance() + .walk( + s -> + // skip all internal stack frames + s.dropWhile(Predicate.not(TestSecurityManager::isExitStackFrame)) + // skip all exit()/halt() stack frames + .dropWhile(TestSecurityManager::isExitStackFrame) + .limit(1) // only look at one more frame (caller of exit) + .map(StackFrame::getClassName) + .noneMatch( + c -> + c.startsWith(JUNIT4_TEST_RUNNER_PACKAGE) + || c.startsWith(ECLIPSE_TEST_RUNNER_PACKAGE) + || c.startsWith(IDEA_TEST_RUNNER_PACKAGE) + || c.startsWith(GRADLE_TEST_RUNNER_PACKAGE)))) { + throw new SecurityException( + String.format( + Locale.ENGLISH, + "System/Runtime.exit(%1$d) or halt(%1$d) calls are not allowed because they terminate the test runner's JVM.", + status)); } // we passed the stack check, delegate to super, so default policy can still deny permission: super.checkExit(status); @@ -73,8 +84,7 @@ public final class TestSecurityManager extends SecurityManager { private static boolean isExitStackFrame(StackFrame f) { final String methodName = f.getMethodName(), className = f.getClassName(); - return ("exit".equals(methodName) || "halt".equals(methodName)) && - (SYSTEM_CLASS_NAME.equals(className) || RUNTIME_CLASS_NAME.equals(className)); + return ("exit".equals(methodName) || "halt".equals(methodName)) + && (SYSTEM_CLASS_NAME.equals(className) || RUNTIME_CLASS_NAME.equals(className)); } - } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java index be27b0c23c5..1321a43f880 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import java.io.BufferedInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -45,9 +47,6 @@ import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; - -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; @@ -103,38 +102,37 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.NoLockFactory; import org.junit.Assert; -/** - * General utility methods for Lucene unit tests. - */ +/** General utility methods for Lucene unit tests. */ public final class TestUtil { private TestUtil() { // } - /** - * A comparator that compares UTF-16 strings / char sequences according to Unicode - * code point order. This can be used to verify {@link BytesRef} order. - *

    - * Warning: This comparator is rather inefficient, because - * it converts the strings to a {@code int[]} array on each invocation. - * */ - public static final Comparator STRING_CODEPOINT_COMPARATOR = (a, b) -> { - final int[] aCodePoints = a.codePoints().toArray(); - final int[] bCodePoints = b.codePoints().toArray(); - for(int i = 0, c = Math.min(aCodePoints.length, bCodePoints.length); i < c; i++) { - if (aCodePoints[i] < bCodePoints[i]) { - return -1; - } else if (aCodePoints[i] > bCodePoints[i]) { - return 1; - } - } - return aCodePoints.length - bCodePoints.length; - }; - - /** + /** + * A comparator that compares UTF-16 strings / char sequences according to Unicode code point + * order. This can be used to verify {@link BytesRef} order. + * + *

    Warning: This comparator is rather inefficient, because it converts the strings to a + * {@code int[]} array on each invocation. + */ + public static final Comparator STRING_CODEPOINT_COMPARATOR = + (a, b) -> { + final int[] aCodePoints = a.codePoints().toArray(); + final int[] bCodePoints = b.codePoints().toArray(); + for (int i = 0, c = Math.min(aCodePoints.length, bCodePoints.length); i < c; i++) { + if (aCodePoints[i] < bCodePoints[i]) { + return -1; + } else if (aCodePoints[i] > bCodePoints[i]) { + return 1; + } + } + return aCodePoints.length - bCodePoints.length; + }; + + /** * Convenience method unzipping zipName into destDir. You must pass it a clean destDir. * - * Closes the given InputStream after extracting! + *

    Closes the given InputStream after extracting! */ public static void unzip(InputStream in, Path destDir) throws IOException { in = new BufferedInputStream(in); @@ -144,14 +142,14 @@ public final class TestUtil { byte[] buffer = new byte[8192]; while ((entry = zipInput.getNextEntry()) != null) { Path targetFile = destDir.resolve(entry.getName()); - + // be on the safe side: do not rely on that directories are always extracted // before their children (although this makes sense, but is it guaranteed?) Files.createDirectories(targetFile.getParent()); if (!entry.isDirectory()) { OutputStream out = Files.newOutputStream(targetFile); int len; - while((len = zipInput.read(buffer)) >= 0) { + while ((len = zipInput.read(buffer)) >= 0) { out.write(buffer, 0, len); } out.close(); @@ -160,15 +158,16 @@ public final class TestUtil { } } } - - /** + + /** * Checks that the provided iterator is well-formed. + * *

      *
    • is read-only: does not allow {@code remove} *
    • returns {@code expectedSize} number of elements *
    • does not return null elements, unless {@code allowNull} is true. - *
    • throws NoSuchElementException if {@code next} is called - * after {@code hasNext} returns false. + *
    • throws NoSuchElementException if {@code next} is called after {@code hasNext} returns + * false. *
    */ public static void checkIterator(Iterator iterator, long expectedSize, boolean allowNull) { @@ -195,14 +194,15 @@ public final class TestUtil { // ok } } - - /** + + /** * Checks that the provided iterator is well-formed. + * *
      *
    • is read-only: does not allow {@code remove} *
    • does not return null elements. - *
    • throws NoSuchElementException if {@code next} is called - * after {@code hasNext} returns false. + *
    • throws NoSuchElementException if {@code next} is called after {@code hasNext} returns + * false. *
    */ public static void checkIterator(Iterator iterator) { @@ -226,6 +226,7 @@ public final class TestUtil { /** * Checks that the provided collection is read-only. + * * @see #checkIterator(Iterator) */ public static void checkReadOnly(Collection coll) { @@ -235,8 +236,13 @@ public final class TestUtil { size += 1; } if (size != coll.size()) { - throw new AssertionError("broken collection, reported size is " - + coll.size() + " but iterator has " + size + " elements: " + coll); + throw new AssertionError( + "broken collection, reported size is " + + coll.size() + + " but iterator has " + + size + + " elements: " + + coll); } if (coll.isEmpty() == false) { @@ -270,35 +276,41 @@ public final class TestUtil { } public static void syncConcurrentMerges(MergeScheduler ms) { - if (ms instanceof ConcurrentMergeScheduler) - ((ConcurrentMergeScheduler) ms).sync(); + if (ms instanceof ConcurrentMergeScheduler) ((ConcurrentMergeScheduler) ms).sync(); } - /** This runs the CheckIndex tool on the index in. If any - * issues are hit, a RuntimeException is thrown; else, - * true is returned. */ + /** + * This runs the CheckIndex tool on the index in. If any issues are hit, a RuntimeException is + * thrown; else, true is returned. + */ public static CheckIndex.Status checkIndex(Directory dir) throws IOException { return checkIndex(dir, true); } - public static CheckIndex.Status checkIndex(Directory dir, boolean doSlowChecks) throws IOException { + public static CheckIndex.Status checkIndex(Directory dir, boolean doSlowChecks) + throws IOException { return checkIndex(dir, doSlowChecks, false, null); } - /** If failFast is true, then throw the first exception when index corruption is hit, instead of moving on to other fields/segments to - * look for any other corruption. */ - public static CheckIndex.Status checkIndex(Directory dir, boolean doSlowChecks, boolean failFast, ByteArrayOutputStream output) throws IOException { + /** + * If failFast is true, then throw the first exception when index corruption is hit, instead of + * moving on to other fields/segments to look for any other corruption. + */ + public static CheckIndex.Status checkIndex( + Directory dir, boolean doSlowChecks, boolean failFast, ByteArrayOutputStream output) + throws IOException { if (output == null) { output = new ByteArrayOutputStream(1024); } // TODO: actually use the dir's locking, unless test uses a special method? // some tests e.g. exception tests become much more complicated if they have to close the writer - try (CheckIndex checker = new CheckIndex(dir, NoLockFactory.INSTANCE.obtainLock(dir, "bogus"))) { + try (CheckIndex checker = + new CheckIndex(dir, NoLockFactory.INSTANCE.obtainLock(dir, "bogus"))) { checker.setDoSlowChecks(doSlowChecks); checker.setFailFast(failFast); checker.setInfoStream(new PrintStream(output, false, IOUtils.UTF_8), false); CheckIndex.Status indexStatus = checker.checkIndex(null); - + if (indexStatus == null || indexStatus.clean == false) { System.out.println("CheckIndex failed"); System.out.println(output.toString(IOUtils.UTF_8)); @@ -311,15 +323,17 @@ public final class TestUtil { } } } - - /** This runs the CheckIndex tool on the Reader. If any - * issues are hit, a RuntimeException is thrown */ + + /** + * This runs the CheckIndex tool on the Reader. If any issues are hit, a RuntimeException is + * thrown + */ public static void checkReader(IndexReader reader) throws IOException { for (LeafReaderContext context : reader.leaves()) { checkReader(context.reader(), true); } } - + public static void checkReader(LeafReader reader, boolean doSlowChecks) throws IOException { ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); PrintStream infoStream = new PrintStream(bos, false, IOUtils.UTF_8); @@ -339,18 +353,18 @@ public final class TestUtil { CheckIndex.testTermVectors(codecReader, infoStream, false, doSlowChecks, true); CheckIndex.testDocValues(codecReader, infoStream, true); CheckIndex.testPoints(codecReader, infoStream, true); - + // some checks really against the reader API checkReaderSanity(reader); - + if (LuceneTestCase.INFOSTREAM) { System.out.println(bos.toString(IOUtils.UTF_8)); } - + LeafReader unwrapped = FilterLeafReader.unwrap(reader); if (unwrapped instanceof SegmentReader) { SegmentReader sr = (SegmentReader) unwrapped; - long bytesUsed = sr.ramBytesUsed(); + long bytesUsed = sr.ramBytesUsed(); if (sr.ramBytesUsed() < 0) { throw new IllegalStateException("invalid ramBytesUsed for reader: " + bytesUsed); } @@ -359,70 +373,72 @@ public final class TestUtil { // FieldInfos should be cached at the reader and always return the same instance if (reader.getFieldInfos() != reader.getFieldInfos()) { - throw new RuntimeException("getFieldInfos() returned different instances for class: "+reader.getClass()); + throw new RuntimeException( + "getFieldInfos() returned different instances for class: " + reader.getClass()); } } - + // used by TestUtil.checkReader to check some things really unrelated to the index, // just looking for bugs in indexreader implementations. private static void checkReaderSanity(LeafReader reader) throws IOException { for (FieldInfo info : reader.getFieldInfos()) { - + // reader shouldn't return normValues if the field does not have them if (!info.hasNorms()) { if (reader.getNormValues(info.name) != null) { throw new RuntimeException("field: " + info.name + " should omit norms but has them!"); } } - + // reader shouldn't return docValues if the field does not have them // reader shouldn't return multiple docvalues types for the same field. - switch(info.getDocValuesType()) { + switch (info.getDocValuesType()) { case NONE: - if (reader.getBinaryDocValues(info.name) != null || - reader.getNumericDocValues(info.name) != null || - reader.getSortedDocValues(info.name) != null || - reader.getSortedSetDocValues(info.name) != null) { - throw new RuntimeException("field: " + info.name + " has docvalues but should omit them!"); + if (reader.getBinaryDocValues(info.name) != null + || reader.getNumericDocValues(info.name) != null + || reader.getSortedDocValues(info.name) != null + || reader.getSortedSetDocValues(info.name) != null) { + throw new RuntimeException( + "field: " + info.name + " has docvalues but should omit them!"); } break; case SORTED: - if (reader.getBinaryDocValues(info.name) != null || - reader.getNumericDocValues(info.name) != null || - reader.getSortedNumericDocValues(info.name) != null || - reader.getSortedSetDocValues(info.name) != null) { + if (reader.getBinaryDocValues(info.name) != null + || reader.getNumericDocValues(info.name) != null + || reader.getSortedNumericDocValues(info.name) != null + || reader.getSortedSetDocValues(info.name) != null) { throw new RuntimeException(info.name + " returns multiple docvalues types!"); } break; case SORTED_NUMERIC: - if (reader.getBinaryDocValues(info.name) != null || - reader.getNumericDocValues(info.name) != null || - reader.getSortedSetDocValues(info.name) != null || - reader.getSortedDocValues(info.name) != null) { + if (reader.getBinaryDocValues(info.name) != null + || reader.getNumericDocValues(info.name) != null + || reader.getSortedSetDocValues(info.name) != null + || reader.getSortedDocValues(info.name) != null) { throw new RuntimeException(info.name + " returns multiple docvalues types!"); } break; case SORTED_SET: - if (reader.getBinaryDocValues(info.name) != null || - reader.getNumericDocValues(info.name) != null || - reader.getSortedNumericDocValues(info.name) != null || - reader.getSortedDocValues(info.name) != null) { + if (reader.getBinaryDocValues(info.name) != null + || reader.getNumericDocValues(info.name) != null + || reader.getSortedNumericDocValues(info.name) != null + || reader.getSortedDocValues(info.name) != null) { throw new RuntimeException(info.name + " returns multiple docvalues types!"); } break; case BINARY: - if (reader.getNumericDocValues(info.name) != null || - reader.getSortedDocValues(info.name) != null || - reader.getSortedNumericDocValues(info.name) != null || - reader.getSortedSetDocValues(info.name) != null) { + if (reader.getNumericDocValues(info.name) != null + || reader.getSortedDocValues(info.name) != null + || reader.getSortedNumericDocValues(info.name) != null + || reader.getSortedSetDocValues(info.name) != null) { throw new RuntimeException(info.name + " returns multiple docvalues types!"); } break; case NUMERIC: - if (reader.getBinaryDocValues(info.name) != null || - reader.getSortedDocValues(info.name) != null || - reader.getSortedNumericDocValues(info.name) != null || - reader.getSortedSetDocValues(info.name) != null) { + if (reader.getBinaryDocValues(info.name) != null + || reader.getSortedDocValues(info.name) != null + || reader.getSortedNumericDocValues(info.name) != null + || reader.getSortedSetDocValues(info.name) != null) { throw new RuntimeException(info.name + " returns multiple docvalues types!"); } break; @@ -440,22 +456,22 @@ public final class TestUtil { /** start and end are BOTH inclusive */ public static long nextLong(Random r, long start, long end) { assert end >= start : "start=" + start + ",end=" + end; - final BigInteger range = BigInteger.valueOf(end).add(BigInteger.valueOf(1)).subtract(BigInteger.valueOf(start)); + final BigInteger range = + BigInteger.valueOf(end).add(BigInteger.valueOf(1)).subtract(BigInteger.valueOf(start)); if (range.compareTo(BigInteger.valueOf(Integer.MAX_VALUE)) <= 0) { return start + r.nextInt(range.intValue()); } else { // probably not evenly distributed when range is large, but OK for tests - final BigInteger augend = new BigDecimal(range).multiply(new BigDecimal(r.nextDouble())).toBigInteger(); + final BigInteger augend = + new BigDecimal(range).multiply(new BigDecimal(r.nextDouble())).toBigInteger(); final long result = BigInteger.valueOf(start).add(augend).longValue(); assert result >= start; assert result <= end; return result; } } - - /** - * Returns a randomish big integer with {@code 1 .. maxBytes} storage. - */ + + /** Returns a randomish big integer with {@code 1 .. maxBytes} storage. */ public static BigInteger nextBigInteger(Random random, int maxBytes) { int length = TestUtil.nextInt(random, 1, maxBytes); byte[] buffer = new byte[length]; @@ -466,7 +482,7 @@ public final class TestUtil { public static String randomSimpleString(Random r, int maxLength) { return randomSimpleString(r, 0, maxLength); } - + public static String randomSimpleString(Random r, int minLength, int maxLength) { final int end = nextInt(r, minLength, maxLength); if (end == 0) { @@ -480,7 +496,8 @@ public final class TestUtil { return new String(buffer, 0, end); } - public static String randomSimpleStringRange(Random r, char minChar, char maxChar, int maxLength) { + public static String randomSimpleStringRange( + Random r, char minChar, char maxChar, int maxLength) { final int end = nextInt(r, 0, maxLength); if (end == 0) { // allow 0 length @@ -502,9 +519,7 @@ public final class TestUtil { return randomUnicodeString(r, 20); } - /** - * Returns a random string up to a certain length. - */ + /** Returns a random string up to a certain length. */ public static String randomUnicodeString(Random r, int maxLength) { final int end = nextInt(r, 0, maxLength); if (end == 0) { @@ -516,14 +531,12 @@ public final class TestUtil { return new String(buffer, 0, end); } - /** - * Fills provided char[] with valid random unicode code - * unit sequence. - */ - public static void randomFixedLengthUnicodeString(Random random, char[] chars, int offset, int length) { + /** Fills provided char[] with valid random unicode code unit sequence. */ + public static void randomFixedLengthUnicodeString( + Random random, char[] chars, int offset, int length) { int i = offset; final int end = offset + length; - while(i < end) { + while (i < end) { final int t = random.nextInt(5); if (0 == t && i < length - 1) { // Make a surrogate pair @@ -542,44 +555,44 @@ public final class TestUtil { } } } - + /** - * Returns a String thats "regexpish" (contains lots of operators typically found in regular expressions) - * If you call this enough times, you might get a valid regex! + * Returns a String thats "regexpish" (contains lots of operators typically found in regular + * expressions) If you call this enough times, you might get a valid regex! */ public static String randomRegexpishString(Random r) { return randomRegexpishString(r, 20); } /** - * Maximum recursion bound for '+' and '*' replacements in - * {@link #randomRegexpishString(Random, int)}. + * Maximum recursion bound for '+' and '*' replacements in {@link #randomRegexpishString(Random, + * int)}. */ - private final static int maxRecursionBound = 5; + private static final int maxRecursionBound = 5; + + /** Operators for {@link #randomRegexpishString(Random, int)}. */ + private static final List ops = + Arrays.asList( + ".", + "?", + "{0," + maxRecursionBound + "}", // bounded replacement for '*' + "{1," + maxRecursionBound + "}", // bounded replacement for '+' + "(", + ")", + "-", + "[", + "]", + "|"); /** - * Operators for {@link #randomRegexpishString(Random, int)}. - */ - private final static List ops = Arrays.asList( - ".", "?", - "{0," + maxRecursionBound + "}", // bounded replacement for '*' - "{1," + maxRecursionBound + "}", // bounded replacement for '+' - "(", - ")", - "-", - "[", - "]", - "|" - ); - - /** - * Returns a String thats "regexpish" (contains lots of operators typically found in regular expressions) - * If you call this enough times, you might get a valid regex! - * - *

    Note: to avoid practically endless backtracking patterns we replace asterisk and plus + * Returns a String thats "regexpish" (contains lots of operators typically found in regular + * expressions) If you call this enough times, you might get a valid regex! + * + *

    Note: to avoid practically endless backtracking patterns we replace asterisk and plus * operators with bounded repetitions. See LUCENE-4111 for more info. - * - * @param maxLength A hint about maximum length of the regexpish string. It may be exceeded by a few characters. + * + * @param maxLength A hint about maximum length of the regexpish string. It may be exceeded by a + * few characters. */ public static String randomRegexpishString(Random r, int maxLength) { final StringBuilder regexp = new StringBuilder(maxLength); @@ -594,41 +607,267 @@ public final class TestUtil { } private static final String[] HTML_CHAR_ENTITIES = { - "AElig", "Aacute", "Acirc", "Agrave", "Alpha", "AMP", "Aring", "Atilde", - "Auml", "Beta", "COPY", "Ccedil", "Chi", "Dagger", "Delta", "ETH", - "Eacute", "Ecirc", "Egrave", "Epsilon", "Eta", "Euml", "Gamma", "GT", - "Iacute", "Icirc", "Igrave", "Iota", "Iuml", "Kappa", "Lambda", "LT", - "Mu", "Ntilde", "Nu", "OElig", "Oacute", "Ocirc", "Ograve", "Omega", - "Omicron", "Oslash", "Otilde", "Ouml", "Phi", "Pi", "Prime", "Psi", - "QUOT", "REG", "Rho", "Scaron", "Sigma", "THORN", "Tau", "Theta", - "Uacute", "Ucirc", "Ugrave", "Upsilon", "Uuml", "Xi", "Yacute", "Yuml", - "Zeta", "aacute", "acirc", "acute", "aelig", "agrave", "alefsym", - "alpha", "amp", "and", "ang", "apos", "aring", "asymp", "atilde", - "auml", "bdquo", "beta", "brvbar", "bull", "cap", "ccedil", "cedil", - "cent", "chi", "circ", "clubs", "cong", "copy", "crarr", "cup", - "curren", "dArr", "dagger", "darr", "deg", "delta", "diams", "divide", - "eacute", "ecirc", "egrave", "empty", "emsp", "ensp", "epsilon", - "equiv", "eta", "eth", "euml", "euro", "exist", "fnof", "forall", - "frac12", "frac14", "frac34", "frasl", "gamma", "ge", "gt", "hArr", - "harr", "hearts", "hellip", "iacute", "icirc", "iexcl", "igrave", - "image", "infin", "int", "iota", "iquest", "isin", "iuml", "kappa", - "lArr", "lambda", "lang", "laquo", "larr", "lceil", "ldquo", "le", - "lfloor", "lowast", "loz", "lrm", "lsaquo", "lsquo", "lt", "macr", - "mdash", "micro", "middot", "minus", "mu", "nabla", "nbsp", "ndash", - "ne", "ni", "not", "notin", "nsub", "ntilde", "nu", "oacute", "ocirc", - "oelig", "ograve", "oline", "omega", "omicron", "oplus", "or", "ordf", - "ordm", "oslash", "otilde", "otimes", "ouml", "para", "part", "permil", - "perp", "phi", "pi", "piv", "plusmn", "pound", "prime", "prod", "prop", - "psi", "quot", "rArr", "radic", "rang", "raquo", "rarr", "rceil", - "rdquo", "real", "reg", "rfloor", "rho", "rlm", "rsaquo", "rsquo", - "sbquo", "scaron", "sdot", "sect", "shy", "sigma", "sigmaf", "sim", - "spades", "sub", "sube", "sum", "sup", "sup1", "sup2", "sup3", "supe", - "szlig", "tau", "there4", "theta", "thetasym", "thinsp", "thorn", - "tilde", "times", "trade", "uArr", "uacute", "uarr", "ucirc", "ugrave", - "uml", "upsih", "upsilon", "uuml", "weierp", "xi", "yacute", "yen", - "yuml", "zeta", "zwj", "zwnj" + "AElig", + "Aacute", + "Acirc", + "Agrave", + "Alpha", + "AMP", + "Aring", + "Atilde", + "Auml", + "Beta", + "COPY", + "Ccedil", + "Chi", + "Dagger", + "Delta", + "ETH", + "Eacute", + "Ecirc", + "Egrave", + "Epsilon", + "Eta", + "Euml", + "Gamma", + "GT", + "Iacute", + "Icirc", + "Igrave", + "Iota", + "Iuml", + "Kappa", + "Lambda", + "LT", + "Mu", + "Ntilde", + "Nu", + "OElig", + "Oacute", + "Ocirc", + "Ograve", + "Omega", + "Omicron", + "Oslash", + "Otilde", + "Ouml", + "Phi", + "Pi", + "Prime", + "Psi", + "QUOT", + "REG", + "Rho", + "Scaron", + "Sigma", + "THORN", + "Tau", + "Theta", + "Uacute", + "Ucirc", + "Ugrave", + "Upsilon", + "Uuml", + "Xi", + "Yacute", + "Yuml", + "Zeta", + "aacute", + "acirc", + "acute", + "aelig", + "agrave", + "alefsym", + "alpha", + "amp", + "and", + "ang", + "apos", + "aring", + "asymp", + "atilde", + "auml", + "bdquo", + "beta", + "brvbar", + "bull", + "cap", + "ccedil", + "cedil", + "cent", + "chi", + "circ", + "clubs", + "cong", + "copy", + "crarr", + "cup", + "curren", + "dArr", + "dagger", + "darr", + "deg", + "delta", + "diams", + "divide", + "eacute", + "ecirc", + "egrave", + "empty", + "emsp", + "ensp", + "epsilon", + "equiv", + "eta", + "eth", + "euml", + "euro", + "exist", + "fnof", + "forall", + "frac12", + "frac14", + "frac34", + "frasl", + "gamma", + "ge", + "gt", + "hArr", + "harr", + "hearts", + "hellip", + "iacute", + "icirc", + "iexcl", + "igrave", + "image", + "infin", + "int", + "iota", + "iquest", + "isin", + "iuml", + "kappa", + "lArr", + "lambda", + "lang", + "laquo", + "larr", + "lceil", + "ldquo", + "le", + "lfloor", + "lowast", + "loz", + "lrm", + "lsaquo", + "lsquo", + "lt", + "macr", + "mdash", + "micro", + "middot", + "minus", + "mu", + "nabla", + "nbsp", + "ndash", + "ne", + "ni", + "not", + "notin", + "nsub", + "ntilde", + "nu", + "oacute", + "ocirc", + "oelig", + "ograve", + "oline", + "omega", + "omicron", + "oplus", + "or", + "ordf", + "ordm", + "oslash", + "otilde", + "otimes", + "ouml", + "para", + "part", + "permil", + "perp", + "phi", + "pi", + "piv", + "plusmn", + "pound", + "prime", + "prod", + "prop", + "psi", + "quot", + "rArr", + "radic", + "rang", + "raquo", + "rarr", + "rceil", + "rdquo", + "real", + "reg", + "rfloor", + "rho", + "rlm", + "rsaquo", + "rsquo", + "sbquo", + "scaron", + "sdot", + "sect", + "shy", + "sigma", + "sigmaf", + "sim", + "spades", + "sub", + "sube", + "sum", + "sup", + "sup1", + "sup2", + "sup3", + "supe", + "szlig", + "tau", + "there4", + "theta", + "thetasym", + "thinsp", + "thorn", + "tilde", + "times", + "trade", + "uArr", + "uacute", + "uarr", + "ucirc", + "ugrave", + "uml", + "upsih", + "upsilon", + "uuml", + "weierp", + "xi", + "yacute", + "yen", + "yuml", + "zeta", + "zwj", + "zwnj" }; - + public static String randomHtmlishString(Random random, int numElements) { final int end = nextInt(random, 0, numElements); if (end == 0) { @@ -638,107 +877,158 @@ public final class TestUtil { StringBuilder sb = new StringBuilder(); for (int i = 0; i < end; i++) { int val = random.nextInt(25); - switch(val) { - case 0: sb.append("

    "); break; - case 1: { - sb.append("<"); - sb.append(" ".substring(nextInt(random, 0, 4))); - sb.append(randomSimpleString(random)); - for (int j = 0 ; j < nextInt(random, 0, 10) ; ++j) { - sb.append(' '); + switch (val) { + case 0: + sb.append("

    "); + break; + case 1: + { + sb.append("<"); + sb.append(" ".substring(nextInt(random, 0, 4))); sb.append(randomSimpleString(random)); - sb.append(" ".substring(nextInt(random, 0, 1))); - sb.append('='); - sb.append(" ".substring(nextInt(random, 0, 1))); - sb.append("\"".substring(nextInt(random, 0, 1))); + for (int j = 0; j < nextInt(random, 0, 10); ++j) { + sb.append(' '); + sb.append(randomSimpleString(random)); + sb.append(" ".substring(nextInt(random, 0, 1))); + sb.append('='); + sb.append(" ".substring(nextInt(random, 0, 1))); + sb.append("\"".substring(nextInt(random, 0, 1))); + sb.append(randomSimpleString(random)); + sb.append("\"".substring(nextInt(random, 0, 1))); + } + sb.append(" ".substring(nextInt(random, 0, 4))); + sb.append("/".substring(nextInt(random, 0, 1))); + sb.append(">".substring(nextInt(random, 0, 1))); + break; + } + case 2: + { + sb.append("".substring(nextInt(random, 0, 1))); + break; } - sb.append(" ".substring(nextInt(random, 0, 4))); - sb.append("/".substring(nextInt(random, 0, 1))); - sb.append(">".substring(nextInt(random, 0, 1))); + case 3: + sb.append(">"); break; - } - case 2: { - sb.append("".substring(nextInt(random, 0, 1))); + case 4: + sb.append("

    "); break; - } - case 3: sb.append(">"); break; - case 4: sb.append("

    "); break; - case 5: sb.append(""); break; - case 16: { - sb.append("&"); - switch(nextInt(random, 0, 2)) { - case 0: sb.append(randomSimpleString(random)); break; - case 1: sb.append(HTML_CHAR_ENTITIES[random.nextInt(HTML_CHAR_ENTITIES.length)]); break; - } - sb.append(";".substring(nextInt(random, 0, 1))); + case 5: + sb.append(""); + break; + case 16: + { + sb.append("&"); + switch (nextInt(random, 0, 2)) { + case 0: + sb.append(randomSimpleString(random)); + break; + case 1: + sb.append(HTML_CHAR_ENTITIES[random.nextInt(HTML_CHAR_ENTITIES.length)]); + break; + } sb.append(";".substring(nextInt(random, 0, 1))); + break; } + case 17: + { + sb.append("&#"); + if (0 == nextInt(random, 0, 1)) { + sb.append(nextInt(random, 0, Integer.MAX_VALUE - 1)); + sb.append(";".substring(nextInt(random, 0, 1))); + } + break; + } + case 18: + { + sb.append("&#x"); + if (0 == nextInt(random, 0, 1)) { + sb.append(Integer.toString(nextInt(random, 0, Integer.MAX_VALUE - 1), 16)); + sb.append(";".substring(nextInt(random, 0, 1))); + } + break; + } + + case 19: + sb.append(";"); break; - } - case 18: { - sb.append("&#x"); - if (0 == nextInt(random, 0, 1)) { - sb.append(Integer.toString(nextInt(random, 0, Integer.MAX_VALUE - 1), 16)); - sb.append(";".substring(nextInt(random, 0, 1))); - } + case 20: + sb.append(nextInt(random, 0, Integer.MAX_VALUE - 1)); break; - } - - case 19: sb.append(";"); break; - case 20: sb.append(nextInt(random, 0, Integer.MAX_VALUE - 1)); break; - case 21: sb.append("\n"); break; - case 22: sb.append(" ".substring(nextInt(random, 0, 10))); break; - case 23: { - sb.append("<"); - if (0 == nextInt(random, 0, 3)) { - sb.append(" ".substring(nextInt(random, 1, 10))); - } - if (0 == nextInt(random, 0, 1)) { - sb.append("/"); + case 21: + sb.append("\n"); + break; + case 22: + sb.append(" ".substring(nextInt(random, 0, 10))); + break; + case 23: + { + sb.append("<"); if (0 == nextInt(random, 0, 3)) { sb.append(" ".substring(nextInt(random, 1, 10))); } + if (0 == nextInt(random, 0, 1)) { + sb.append("/"); + if (0 == nextInt(random, 0, 3)) { + sb.append(" ".substring(nextInt(random, 1, 10))); + } + } + switch (nextInt(random, 0, 3)) { + case 0: + sb.append(randomlyRecaseCodePoints(random, "script")); + break; + case 1: + sb.append(randomlyRecaseCodePoints(random, "style")); + break; + case 2: + sb.append(randomlyRecaseCodePoints(random, "br")); + break; + // default: append nothing + } + sb.append(">".substring(nextInt(random, 0, 1))); + break; } - switch (nextInt(random, 0, 3)) { - case 0: sb.append(randomlyRecaseCodePoints(random, "script")); break; - case 1: sb.append(randomlyRecaseCodePoints(random, "style")); break; - case 2: sb.append(randomlyRecaseCodePoints(random, "br")); break; - // default: append nothing - } - sb.append(">".substring(nextInt(random, 0, 1))); - break; - } - default: sb.append(randomSimpleString(random)); + default: + sb.append(randomSimpleString(random)); } } return sb.toString(); } - /** - * Randomly upcases, downcases, or leaves intact each code point in the given string - */ + /** Randomly upcases, downcases, or leaves intact each code point in the given string */ public static String randomlyRecaseCodePoints(Random random, String str) { StringBuilder builder = new StringBuilder(); int pos = 0; @@ -746,77 +1036,79 @@ public final class TestUtil { int codePoint = str.codePointAt(pos); pos += Character.charCount(codePoint); switch (nextInt(random, 0, 2)) { - case 0: builder.appendCodePoint(Character.toUpperCase(codePoint)); break; - case 1: builder.appendCodePoint(Character.toLowerCase(codePoint)); break; - case 2: builder.appendCodePoint(codePoint); // leave intact + case 0: + builder.appendCodePoint(Character.toUpperCase(codePoint)); + break; + case 1: + builder.appendCodePoint(Character.toLowerCase(codePoint)); + break; + case 2: + builder.appendCodePoint(codePoint); // leave intact } } return builder.toString(); } private static final int[] blockStarts = { - 0x0000, 0x0080, 0x0100, 0x0180, 0x0250, 0x02B0, 0x0300, 0x0370, 0x0400, - 0x0500, 0x0530, 0x0590, 0x0600, 0x0700, 0x0750, 0x0780, 0x07C0, 0x0800, - 0x0900, 0x0980, 0x0A00, 0x0A80, 0x0B00, 0x0B80, 0x0C00, 0x0C80, 0x0D00, - 0x0D80, 0x0E00, 0x0E80, 0x0F00, 0x1000, 0x10A0, 0x1100, 0x1200, 0x1380, - 0x13A0, 0x1400, 0x1680, 0x16A0, 0x1700, 0x1720, 0x1740, 0x1760, 0x1780, - 0x1800, 0x18B0, 0x1900, 0x1950, 0x1980, 0x19E0, 0x1A00, 0x1A20, 0x1B00, - 0x1B80, 0x1C00, 0x1C50, 0x1CD0, 0x1D00, 0x1D80, 0x1DC0, 0x1E00, 0x1F00, - 0x2000, 0x2070, 0x20A0, 0x20D0, 0x2100, 0x2150, 0x2190, 0x2200, 0x2300, - 0x2400, 0x2440, 0x2460, 0x2500, 0x2580, 0x25A0, 0x2600, 0x2700, 0x27C0, - 0x27F0, 0x2800, 0x2900, 0x2980, 0x2A00, 0x2B00, 0x2C00, 0x2C60, 0x2C80, - 0x2D00, 0x2D30, 0x2D80, 0x2DE0, 0x2E00, 0x2E80, 0x2F00, 0x2FF0, 0x3000, - 0x3040, 0x30A0, 0x3100, 0x3130, 0x3190, 0x31A0, 0x31C0, 0x31F0, 0x3200, - 0x3300, 0x3400, 0x4DC0, 0x4E00, 0xA000, 0xA490, 0xA4D0, 0xA500, 0xA640, - 0xA6A0, 0xA700, 0xA720, 0xA800, 0xA830, 0xA840, 0xA880, 0xA8E0, 0xA900, - 0xA930, 0xA960, 0xA980, 0xAA00, 0xAA60, 0xAA80, 0xABC0, 0xAC00, 0xD7B0, - 0xE000, 0xF900, 0xFB00, 0xFB50, 0xFE00, 0xFE10, - 0xFE20, 0xFE30, 0xFE50, 0xFE70, 0xFF00, 0xFFF0, - 0x10000, 0x10080, 0x10100, 0x10140, 0x10190, 0x101D0, 0x10280, 0x102A0, - 0x10300, 0x10330, 0x10380, 0x103A0, 0x10400, 0x10450, 0x10480, 0x10800, - 0x10840, 0x10900, 0x10920, 0x10A00, 0x10A60, 0x10B00, 0x10B40, 0x10B60, - 0x10C00, 0x10E60, 0x11080, 0x12000, 0x12400, 0x13000, 0x1D000, 0x1D100, - 0x1D200, 0x1D300, 0x1D360, 0x1D400, 0x1F000, 0x1F030, 0x1F100, 0x1F200, - 0x20000, 0x2A700, 0x2F800, 0xE0000, 0xE0100, 0xF0000, 0x100000 - }; - - private static final int[] blockEnds = { - 0x007F, 0x00FF, 0x017F, 0x024F, 0x02AF, 0x02FF, 0x036F, 0x03FF, 0x04FF, - 0x052F, 0x058F, 0x05FF, 0x06FF, 0x074F, 0x077F, 0x07BF, 0x07FF, 0x083F, - 0x097F, 0x09FF, 0x0A7F, 0x0AFF, 0x0B7F, 0x0BFF, 0x0C7F, 0x0CFF, 0x0D7F, - 0x0DFF, 0x0E7F, 0x0EFF, 0x0FFF, 0x109F, 0x10FF, 0x11FF, 0x137F, 0x139F, - 0x13FF, 0x167F, 0x169F, 0x16FF, 0x171F, 0x173F, 0x175F, 0x177F, 0x17FF, - 0x18AF, 0x18FF, 0x194F, 0x197F, 0x19DF, 0x19FF, 0x1A1F, 0x1AAF, 0x1B7F, - 0x1BBF, 0x1C4F, 0x1C7F, 0x1CFF, 0x1D7F, 0x1DBF, 0x1DFF, 0x1EFF, 0x1FFF, - 0x206F, 0x209F, 0x20CF, 0x20FF, 0x214F, 0x218F, 0x21FF, 0x22FF, 0x23FF, - 0x243F, 0x245F, 0x24FF, 0x257F, 0x259F, 0x25FF, 0x26FF, 0x27BF, 0x27EF, - 0x27FF, 0x28FF, 0x297F, 0x29FF, 0x2AFF, 0x2BFF, 0x2C5F, 0x2C7F, 0x2CFF, - 0x2D2F, 0x2D7F, 0x2DDF, 0x2DFF, 0x2E7F, 0x2EFF, 0x2FDF, 0x2FFF, 0x303F, - 0x309F, 0x30FF, 0x312F, 0x318F, 0x319F, 0x31BF, 0x31EF, 0x31FF, 0x32FF, - 0x33FF, 0x4DBF, 0x4DFF, 0x9FFF, 0xA48F, 0xA4CF, 0xA4FF, 0xA63F, 0xA69F, - 0xA6FF, 0xA71F, 0xA7FF, 0xA82F, 0xA83F, 0xA87F, 0xA8DF, 0xA8FF, 0xA92F, - 0xA95F, 0xA97F, 0xA9DF, 0xAA5F, 0xAA7F, 0xAADF, 0xABFF, 0xD7AF, 0xD7FF, - 0xF8FF, 0xFAFF, 0xFB4F, 0xFDFF, 0xFE0F, 0xFE1F, - 0xFE2F, 0xFE4F, 0xFE6F, 0xFEFF, 0xFFEF, 0xFFFF, - 0x1007F, 0x100FF, 0x1013F, 0x1018F, 0x101CF, 0x101FF, 0x1029F, 0x102DF, - 0x1032F, 0x1034F, 0x1039F, 0x103DF, 0x1044F, 0x1047F, 0x104AF, 0x1083F, - 0x1085F, 0x1091F, 0x1093F, 0x10A5F, 0x10A7F, 0x10B3F, 0x10B5F, 0x10B7F, - 0x10C4F, 0x10E7F, 0x110CF, 0x123FF, 0x1247F, 0x1342F, 0x1D0FF, 0x1D1FF, - 0x1D24F, 0x1D35F, 0x1D37F, 0x1D7FF, 0x1F02F, 0x1F09F, 0x1F1FF, 0x1F2FF, - 0x2A6DF, 0x2B73F, 0x2FA1F, 0xE007F, 0xE01EF, 0xFFFFF, 0x10FFFF + 0x0000, 0x0080, 0x0100, 0x0180, 0x0250, 0x02B0, 0x0300, 0x0370, 0x0400, 0x0500, 0x0530, 0x0590, + 0x0600, 0x0700, 0x0750, 0x0780, 0x07C0, 0x0800, 0x0900, 0x0980, 0x0A00, 0x0A80, 0x0B00, 0x0B80, + 0x0C00, 0x0C80, 0x0D00, 0x0D80, 0x0E00, 0x0E80, 0x0F00, 0x1000, 0x10A0, 0x1100, 0x1200, 0x1380, + 0x13A0, 0x1400, 0x1680, 0x16A0, 0x1700, 0x1720, 0x1740, 0x1760, 0x1780, 0x1800, 0x18B0, 0x1900, + 0x1950, 0x1980, 0x19E0, 0x1A00, 0x1A20, 0x1B00, 0x1B80, 0x1C00, 0x1C50, 0x1CD0, 0x1D00, 0x1D80, + 0x1DC0, 0x1E00, 0x1F00, 0x2000, 0x2070, 0x20A0, 0x20D0, 0x2100, 0x2150, 0x2190, 0x2200, 0x2300, + 0x2400, 0x2440, 0x2460, 0x2500, 0x2580, 0x25A0, 0x2600, 0x2700, 0x27C0, 0x27F0, 0x2800, 0x2900, + 0x2980, 0x2A00, 0x2B00, 0x2C00, 0x2C60, 0x2C80, 0x2D00, 0x2D30, 0x2D80, 0x2DE0, 0x2E00, 0x2E80, + 0x2F00, 0x2FF0, 0x3000, 0x3040, 0x30A0, 0x3100, 0x3130, 0x3190, 0x31A0, 0x31C0, 0x31F0, 0x3200, + 0x3300, 0x3400, 0x4DC0, 0x4E00, 0xA000, 0xA490, 0xA4D0, 0xA500, 0xA640, 0xA6A0, 0xA700, 0xA720, + 0xA800, 0xA830, 0xA840, 0xA880, 0xA8E0, 0xA900, 0xA930, 0xA960, 0xA980, 0xAA00, 0xAA60, 0xAA80, + 0xABC0, 0xAC00, 0xD7B0, 0xE000, 0xF900, 0xFB00, 0xFB50, 0xFE00, 0xFE10, 0xFE20, 0xFE30, 0xFE50, + 0xFE70, 0xFF00, 0xFFF0, 0x10000, 0x10080, 0x10100, 0x10140, 0x10190, 0x101D0, 0x10280, 0x102A0, + 0x10300, 0x10330, 0x10380, 0x103A0, 0x10400, 0x10450, 0x10480, 0x10800, 0x10840, 0x10900, + 0x10920, 0x10A00, 0x10A60, 0x10B00, 0x10B40, 0x10B60, 0x10C00, 0x10E60, 0x11080, 0x12000, + 0x12400, 0x13000, 0x1D000, 0x1D100, 0x1D200, 0x1D300, 0x1D360, 0x1D400, 0x1F000, 0x1F030, + 0x1F100, 0x1F200, 0x20000, 0x2A700, 0x2F800, 0xE0000, 0xE0100, 0xF0000, 0x100000 }; - /** Returns random string of length between 0-20 codepoints, all codepoints within the same unicode block. */ + private static final int[] blockEnds = { + 0x007F, 0x00FF, 0x017F, 0x024F, 0x02AF, 0x02FF, 0x036F, 0x03FF, 0x04FF, 0x052F, 0x058F, 0x05FF, + 0x06FF, 0x074F, 0x077F, 0x07BF, 0x07FF, 0x083F, 0x097F, 0x09FF, 0x0A7F, 0x0AFF, 0x0B7F, 0x0BFF, + 0x0C7F, 0x0CFF, 0x0D7F, 0x0DFF, 0x0E7F, 0x0EFF, 0x0FFF, 0x109F, 0x10FF, 0x11FF, 0x137F, 0x139F, + 0x13FF, 0x167F, 0x169F, 0x16FF, 0x171F, 0x173F, 0x175F, 0x177F, 0x17FF, 0x18AF, 0x18FF, 0x194F, + 0x197F, 0x19DF, 0x19FF, 0x1A1F, 0x1AAF, 0x1B7F, 0x1BBF, 0x1C4F, 0x1C7F, 0x1CFF, 0x1D7F, 0x1DBF, + 0x1DFF, 0x1EFF, 0x1FFF, 0x206F, 0x209F, 0x20CF, 0x20FF, 0x214F, 0x218F, 0x21FF, 0x22FF, 0x23FF, + 0x243F, 0x245F, 0x24FF, 0x257F, 0x259F, 0x25FF, 0x26FF, 0x27BF, 0x27EF, 0x27FF, 0x28FF, 0x297F, + 0x29FF, 0x2AFF, 0x2BFF, 0x2C5F, 0x2C7F, 0x2CFF, 0x2D2F, 0x2D7F, 0x2DDF, 0x2DFF, 0x2E7F, 0x2EFF, + 0x2FDF, 0x2FFF, 0x303F, 0x309F, 0x30FF, 0x312F, 0x318F, 0x319F, 0x31BF, 0x31EF, 0x31FF, 0x32FF, + 0x33FF, 0x4DBF, 0x4DFF, 0x9FFF, 0xA48F, 0xA4CF, 0xA4FF, 0xA63F, 0xA69F, 0xA6FF, 0xA71F, 0xA7FF, + 0xA82F, 0xA83F, 0xA87F, 0xA8DF, 0xA8FF, 0xA92F, 0xA95F, 0xA97F, 0xA9DF, 0xAA5F, 0xAA7F, 0xAADF, + 0xABFF, 0xD7AF, 0xD7FF, 0xF8FF, 0xFAFF, 0xFB4F, 0xFDFF, 0xFE0F, 0xFE1F, 0xFE2F, 0xFE4F, 0xFE6F, + 0xFEFF, 0xFFEF, 0xFFFF, 0x1007F, 0x100FF, 0x1013F, 0x1018F, 0x101CF, 0x101FF, 0x1029F, 0x102DF, + 0x1032F, 0x1034F, 0x1039F, 0x103DF, 0x1044F, 0x1047F, 0x104AF, 0x1083F, 0x1085F, 0x1091F, + 0x1093F, 0x10A5F, 0x10A7F, 0x10B3F, 0x10B5F, 0x10B7F, 0x10C4F, 0x10E7F, 0x110CF, 0x123FF, + 0x1247F, 0x1342F, 0x1D0FF, 0x1D1FF, 0x1D24F, 0x1D35F, 0x1D37F, 0x1D7FF, 0x1F02F, 0x1F09F, + 0x1F1FF, 0x1F2FF, 0x2A6DF, 0x2B73F, 0x2FA1F, 0xE007F, 0xE01EF, 0xFFFFF, 0x10FFFF + }; + + /** + * Returns random string of length between 0-20 codepoints, all codepoints within the same unicode + * block. + */ public static String randomRealisticUnicodeString(Random r) { return randomRealisticUnicodeString(r, 20); } - - /** Returns random string of length up to maxLength codepoints , all codepoints within the same unicode block. */ + + /** + * Returns random string of length up to maxLength codepoints , all codepoints within the same + * unicode block. + */ public static String randomRealisticUnicodeString(Random r, int maxLength) { return randomRealisticUnicodeString(r, 0, maxLength); } - /** Returns random string of length between min and max codepoints, all codepoints within the same unicode block. */ + /** + * Returns random string of length between min and max codepoints, all codepoints within the same + * unicode block. + */ public static String randomRealisticUnicodeString(Random r, int minLength, int maxLength) { final int end = nextInt(r, minLength, maxLength); final int block = r.nextInt(blockStarts.length); @@ -825,11 +1117,11 @@ public final class TestUtil { sb.appendCodePoint(nextInt(r, blockStarts[block], blockEnds[block])); return sb.toString(); } - - /** Returns random string, with a given UTF-8 byte length*/ + + /** Returns random string, with a given UTF-8 byte length */ public static String randomFixedByteLengthUnicodeString(Random r, int length) { - - final char[] buffer = new char[length*3]; + + final char[] buffer = new char[length * 3]; int bytes = length; int i = 0; for (; i < buffer.length && bytes != 0; i++) { @@ -863,7 +1155,6 @@ public final class TestUtil { buffer[i] = (char) nextInt(r, 0xdc00, 0xdfff); bytes -= 4; } - } return new String(buffer, 0, i); } @@ -876,10 +1167,11 @@ public final class TestUtil { b.length = length; return b; } - - /** Return a Codec that can read any of the - * default codecs and formats, but always writes in the specified - * format. */ + + /** + * Return a Codec that can read any of the default codecs and formats, but always writes in the + * specified format. + */ public static Codec alwaysPostingsFormat(final PostingsFormat format) { // TODO: we really need for postings impls etc to announce themselves // (and maybe their params, too) to infostream on flush and merge. @@ -894,10 +1186,11 @@ public final class TestUtil { } }; } - - /** Return a Codec that can read any of the - * default codecs and formats, but always writes in the specified - * format. */ + + /** + * Return a Codec that can read any of the default codecs and formats, but always writes in the + * specified format. + */ public static Codec alwaysDocValuesFormat(final DocValuesFormat format) { // TODO: we really need for docvalues impls etc to announce themselves // (and maybe their params, too) to infostream on flush and merge. @@ -912,43 +1205,51 @@ public final class TestUtil { } }; } - - /** - * Returns the actual default codec (e.g. LuceneMNCodec) for this version of Lucene. - * This may be different than {@link Codec#getDefault()} because that is randomized. + + /** + * Returns the actual default codec (e.g. LuceneMNCodec) for this version of Lucene. This may be + * different than {@link Codec#getDefault()} because that is randomized. */ public static Codec getDefaultCodec() { return new Lucene90Codec(); } - - /** - * Returns the actual default postings format (e.g. LuceneMNPostingsFormat for this version of Lucene. + + /** + * Returns the actual default postings format (e.g. LuceneMNPostingsFormat for this version of + * Lucene. */ public static PostingsFormat getDefaultPostingsFormat() { return new Lucene84PostingsFormat(); } - - /** - * Returns the actual default postings format (e.g. LuceneMNPostingsFormat for this version of Lucene. + + /** + * Returns the actual default postings format (e.g. LuceneMNPostingsFormat for this version of + * Lucene. + * * @lucene.internal this may disappear at any time */ - public static PostingsFormat getDefaultPostingsFormat(int minItemsPerBlock, int maxItemsPerBlock) { + public static PostingsFormat getDefaultPostingsFormat( + int minItemsPerBlock, int maxItemsPerBlock) { return new Lucene84PostingsFormat(minItemsPerBlock, maxItemsPerBlock); } - + /** Returns a random postings format that supports term ordinals */ public static PostingsFormat getPostingsFormatWithOrds(Random r) { switch (r.nextInt(2)) { - case 0: return new LuceneFixedGap(); - case 1: return new BlockTreeOrdsPostingsFormat(); - // TODO: these don't actually support ords! - //case 2: return new FSTOrdPostingsFormat(); - default: throw new AssertionError(); + case 0: + return new LuceneFixedGap(); + case 1: + return new BlockTreeOrdsPostingsFormat(); + // TODO: these don't actually support ords! + // case 2: return new FSTOrdPostingsFormat(); + default: + throw new AssertionError(); } } - - /** - * Returns the actual default docvalues format (e.g. LuceneMNDocValuesFormat for this version of Lucene. + + /** + * Returns the actual default docvalues format (e.g. LuceneMNDocValuesFormat for this version of + * Lucene. */ public static DocValuesFormat getDefaultDocValuesFormat() { return new Lucene80DocValuesFormat(); @@ -959,11 +1260,11 @@ public final class TestUtil { public static String getPostingsFormat(String field) { return getPostingsFormat(Codec.getDefault(), field); } - + public static String getPostingsFormat(Codec codec, String field) { PostingsFormat p = codec.postingsFormat(); if (p instanceof PerFieldPostingsFormat) { - return ((PerFieldPostingsFormat)p).getPostingsFormatForField(field).getName(); + return ((PerFieldPostingsFormat) p).getPostingsFormatForField(field).getName(); } else { return p.getName(); } @@ -972,7 +1273,7 @@ public final class TestUtil { public static String getDocValuesFormat(String field) { return getDocValuesFormat(Codec.getDefault(), field); } - + public static String getDocValuesFormat(Codec codec, String field) { DocValuesFormat f = codec.docValuesFormat(); if (f instanceof PerFieldDocValuesFormat) { @@ -999,8 +1300,9 @@ public final class TestUtil { return false; } } - - public static void addIndexesSlowly(IndexWriter writer, DirectoryReader... readers) throws IOException { + + public static void addIndexesSlowly(IndexWriter writer, DirectoryReader... readers) + throws IOException { List leaves = new ArrayList<>(); for (DirectoryReader reader : readers) { for (LeafReaderContext context : reader.leaves()) { @@ -1010,8 +1312,7 @@ public final class TestUtil { writer.addIndexes(leaves.toArray(new CodecReader[leaves.size()])); } - /** just tries to configure things to keep the open file - * count lowish */ + /** just tries to configure things to keep the open file count lowish */ public static void reduceOpenFiles(IndexWriter w) { // keep number of open files lowish MergePolicy mp = w.getConfig().getMergePolicy(); @@ -1031,25 +1332,28 @@ public final class TestUtil { } } - /** Checks some basic behaviour of an AttributeImpl + /** + * Checks some basic behaviour of an AttributeImpl + * * @param reflectedValues contains a map with "AttributeClass#key" as values */ - public static void assertAttributeReflection(final AttributeImpl att, Map reflectedValues) { - final Map map = new HashMap<>(); - att.reflectWith(new AttributeReflector() { - @Override - public void reflect(Class attClass, String key, Object value) { - map.put(attClass.getName() + '#' + key, value); - } - }); + public static void assertAttributeReflection( + final AttributeImpl att, Map reflectedValues) { + final Map map = new HashMap<>(); + att.reflectWith( + new AttributeReflector() { + @Override + public void reflect(Class attClass, String key, Object value) { + map.put(attClass.getName() + '#' + key, value); + } + }); Assert.assertEquals("Reflection does not produce same map", reflectedValues, map); } - /** - * Assert that the given {@link TopDocs} have the same top docs and consistent hit counts. - */ + /** Assert that the given {@link TopDocs} have the same top docs and consistent hit counts. */ public static void assertConsistent(TopDocs expected, TopDocs actual) { - Assert.assertEquals("wrong total hits", expected.totalHits.value == 0, actual.totalHits.value == 0); + Assert.assertEquals( + "wrong total hits", expected.totalHits.value == 0, actual.totalHits.value == 0); if (expected.totalHits.relation == TotalHits.Relation.EQUAL_TO) { if (actual.totalHits.relation == TotalHits.Relation.EQUAL_TO) { Assert.assertEquals("wrong total hits", expected.totalHits.value, actual.totalHits.value); @@ -1060,16 +1364,17 @@ public final class TestUtil { Assert.assertTrue("wrong total hits", expected.totalHits.value <= actual.totalHits.value); } Assert.assertEquals("wrong hit count", expected.scoreDocs.length, actual.scoreDocs.length); - for(int hitIDX=0;hitIDX wordLength) { sb.setLength(wordLength); - if (Character.isHighSurrogate(sb.charAt(wordLength-1))) { - sb.setLength(wordLength-1); + if (Character.isHighSurrogate(sb.charAt(wordLength - 1))) { + sb.setLength(wordLength - 1); } } @@ -1284,9 +1604,10 @@ public final class TestUtil { } } - /** For debugging: tries to include br.utf8ToString(), but if that - * fails (because it's not valid utf8, which is fine!), just - * use ordinary toString. */ + /** + * For debugging: tries to include br.utf8ToString(), but if that fails (because it's not valid + * utf8, which is fine!), just use ordinary toString. + */ public static String bytesRefToString(BytesRef br) { if (br == null) { return "(null)"; @@ -1301,15 +1622,13 @@ public final class TestUtil { } } } - - /** - * Returns a copy of the source directory, with file contents stored - * in RAM. - */ + + /** Returns a copy of the source directory, with file contents stored in RAM. */ public static Directory ramCopyOf(Directory dir) throws IOException { Directory ram = new ByteBuffersDirectory(); for (String file : dir.listAll()) { - if (file.startsWith(IndexFileNames.SEGMENTS) || IndexFileNames.CODEC_FILE_PATTERN.matcher(file).matches()) { + if (file.startsWith(IndexFileNames.SEGMENTS) + || IndexFileNames.CODEC_FILE_PATTERN.matcher(file).matches()) { ram.copyFrom(dir, file, file, IOContext.DEFAULT); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/ThrottledIndexOutput.java b/lucene/test-framework/src/java/org/apache/lucene/util/ThrottledIndexOutput.java index d6366f92ba0..692945da20a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/ThrottledIndexOutput.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/ThrottledIndexOutput.java @@ -17,13 +17,10 @@ package org.apache.lucene.util; import java.io.IOException; - import org.apache.lucene.store.DataInput; import org.apache.lucene.store.IndexOutput; -/** - * Intentionally slow IndexOutput for testing. - */ +/** Intentionally slow IndexOutput for testing. */ public class ThrottledIndexOutput extends IndexOutput { public static final int DEFAULT_MIN_WRITTEN_BYTES = 1024; private final int bytesPerSecond; @@ -37,18 +34,27 @@ public class ThrottledIndexOutput extends IndexOutput { private final byte[] bytes = new byte[1]; public ThrottledIndexOutput newFromDelegate(IndexOutput output) { - return new ThrottledIndexOutput(bytesPerSecond, flushDelayMillis, - closeDelayMillis, seekDelayMillis, minBytesWritten, output); + return new ThrottledIndexOutput( + bytesPerSecond, + flushDelayMillis, + closeDelayMillis, + seekDelayMillis, + minBytesWritten, + output); } - public ThrottledIndexOutput(int bytesPerSecond, long delayInMillis, - IndexOutput delegate) { - this(bytesPerSecond, delayInMillis, delayInMillis, delayInMillis, - DEFAULT_MIN_WRITTEN_BYTES, delegate); + public ThrottledIndexOutput(int bytesPerSecond, long delayInMillis, IndexOutput delegate) { + this( + bytesPerSecond, + delayInMillis, + delayInMillis, + delayInMillis, + DEFAULT_MIN_WRITTEN_BYTES, + delegate); } - public ThrottledIndexOutput(int bytesPerSecond, long delays, - int minBytesWritten, IndexOutput delegate) { + public ThrottledIndexOutput( + int bytesPerSecond, long delays, int minBytesWritten, IndexOutput delegate) { this(bytesPerSecond, delays, delays, delays, minBytesWritten, delegate); } @@ -56,8 +62,12 @@ public class ThrottledIndexOutput extends IndexOutput { return mbits * 125000000; } - public ThrottledIndexOutput(int bytesPerSecond, long flushDelayMillis, - long closeDelayMillis, long seekDelayMillis, long minBytesWritten, + public ThrottledIndexOutput( + int bytesPerSecond, + long flushDelayMillis, + long closeDelayMillis, + long seekDelayMillis, + long minBytesWritten, IndexOutput delegate) { super("ThrottledIndexOutput(" + delegate + ")", delegate == null ? "n/a" : delegate.getName()); assert bytesPerSecond > 0; @@ -105,15 +115,14 @@ public class ThrottledIndexOutput extends IndexOutput { if (pendingBytes > 0 && (closing || pendingBytes > minBytesWritten)) { long actualBps = (timeElapsed / pendingBytes) * 1000000000l; // nano to sec if (actualBps > bytesPerSecond) { - long expected = (pendingBytes * 1000l / bytesPerSecond) ; - final long delay = expected - (timeElapsed / 1000000l) ; + long expected = (pendingBytes * 1000l / bytesPerSecond); + final long delay = expected - (timeElapsed / 1000000l); pendingBytes = 0; timeElapsed = 0; return delay; } } return 0; - } private static final void sleep(long ms) { @@ -126,7 +135,7 @@ public class ThrottledIndexOutput extends IndexOutput { throw new ThreadInterruptedException(e); } } - + @Override public void copyBytes(DataInput input, long numBytes) throws IOException { delegate.copyBytes(input, numBytes); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TimeUnits.java b/lucene/test-framework/src/java/org/apache/lucene/util/TimeUnits.java index 431364c765c..43190650e0b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TimeUnits.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TimeUnits.java @@ -25,5 +25,5 @@ public final class TimeUnits { /** 1 minute in milliseconds */ public static final int MINUTE = 60 * SECOND; /** 1 hour in milliseconds */ - public static final int HOUR = 60 * MINUTE; + public static final int HOUR = 60 * MINUTE; } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java index e3f26e46032..4809794e5a1 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; - import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.IntsRefBuilder; @@ -33,15 +32,12 @@ import org.apache.lucene.util.UnicodeUtil; /** * Utilities for testing automata. - *

    - * Capable of generating random regular expressions, - * and automata, and also provides a number of very - * basic unoptimized implementations (*slow) for testing. + * + *

    Capable of generating random regular expressions, and automata, and also provides a number of + * very basic unoptimized implementations (*slow) for testing. */ public class AutomatonTestUtil { - /** - * Default maximum number of states that {@link Operations#determinize} should create. - */ + /** Default maximum number of states that {@link Operations#determinize} should create. */ public static final int DEFAULT_MAX_DETERMINIZED_STATES = 1000000; /** Returns random string, including full unicode range. */ @@ -49,12 +45,12 @@ public class AutomatonTestUtil { while (true) { String regexp = randomRegexpString(r); // we will also generate some undefined unicode queries - if (!UnicodeUtil.validUTF16String(regexp)) - continue; + if (!UnicodeUtil.validUTF16String(regexp)) continue; try { new RegExp(regexp, RegExp.NONE); return regexp; - } catch (Exception e) {} + } catch (Exception e) { + } } } @@ -73,8 +69,7 @@ public class AutomatonTestUtil { buffer[i++] = (char) TestUtil.nextInt(r, 0xd800, 0xdbff); // Low surrogate buffer[i] = (char) TestUtil.nextInt(r, 0xdc00, 0xdfff); - } - else if (t <= 1) buffer[i] = (char) r.nextInt(0x80); + } else if (t <= 1) buffer[i] = (char) r.nextInt(0x80); else if (2 == t) buffer[i] = (char) TestUtil.nextInt(r, 0x80, 0x800); else if (3 == t) buffer[i] = (char) TestUtil.nextInt(r, 0x800, 0xd7ff); else if (4 == t) buffer[i] = (char) TestUtil.nextInt(r, 0xe000, 0xffff); @@ -91,35 +86,37 @@ public class AutomatonTestUtil { } return new String(buffer, 0, end); } - - /** picks a random int code point, avoiding surrogates; - * throws IllegalArgumentException if this transition only - * accepts surrogates */ + + /** + * picks a random int code point, avoiding surrogates; throws IllegalArgumentException if this + * transition only accepts surrogates + */ private static int getRandomCodePoint(final Random r, int min, int max) { final int code; - if (max < UnicodeUtil.UNI_SUR_HIGH_START || - min > UnicodeUtil.UNI_SUR_HIGH_END) { + if (max < UnicodeUtil.UNI_SUR_HIGH_START || min > UnicodeUtil.UNI_SUR_HIGH_END) { // easy: entire range is before or after surrogates - code = min+r.nextInt(max-min+1); + code = min + r.nextInt(max - min + 1); } else if (min >= UnicodeUtil.UNI_SUR_HIGH_START) { if (max > UnicodeUtil.UNI_SUR_LOW_END) { // after surrogates - code = 1+UnicodeUtil.UNI_SUR_LOW_END+r.nextInt(max-UnicodeUtil.UNI_SUR_LOW_END); + code = 1 + UnicodeUtil.UNI_SUR_LOW_END + r.nextInt(max - UnicodeUtil.UNI_SUR_LOW_END); } else { - throw new IllegalArgumentException("transition accepts only surrogates: min=" + min + " max=" + max); + throw new IllegalArgumentException( + "transition accepts only surrogates: min=" + min + " max=" + max); } } else if (max <= UnicodeUtil.UNI_SUR_LOW_END) { if (min < UnicodeUtil.UNI_SUR_HIGH_START) { // before surrogates code = min + r.nextInt(UnicodeUtil.UNI_SUR_HIGH_START - min); } else { - throw new IllegalArgumentException("transition accepts only surrogates: min=" + min + " max=" + max); + throw new IllegalArgumentException( + "transition accepts only surrogates: min=" + min + " max=" + max); } } else { // range includes all surrogates int gap1 = UnicodeUtil.UNI_SUR_HIGH_START - min; int gap2 = max - UnicodeUtil.UNI_SUR_LOW_END; - int c = r.nextInt(gap1+gap2); + int c = r.nextInt(gap1 + gap2); if (c < gap1) { code = min + c; } else { @@ -127,21 +124,22 @@ public class AutomatonTestUtil { } } - assert code >= min && code <= max && (code < UnicodeUtil.UNI_SUR_HIGH_START || code > UnicodeUtil.UNI_SUR_LOW_END): - "code=" + code + " min=" + min + " max=" + max; + assert code >= min + && code <= max + && (code < UnicodeUtil.UNI_SUR_HIGH_START || code > UnicodeUtil.UNI_SUR_LOW_END) + : "code=" + code + " min=" + min + " max=" + max; return code; } /** - * Lets you retrieve random strings accepted - * by an Automaton. - *

    - * Once created, call {@link #getRandomAcceptedString(Random)} - * to get a new string (in UTF-32 codepoints). + * Lets you retrieve random strings accepted by an Automaton. + * + *

    Once created, call {@link #getRandomAcceptedString(Random)} to get a new string (in UTF-32 + * codepoints). */ public static class RandomAcceptedStrings { - private final Map leadsToAccept; + private final Map leadsToAccept; private final Automaton a; private final Transition[][] transitions; @@ -163,7 +161,7 @@ public class AutomatonTestUtil { this.transitions = a.getSortedTransitions(); leadsToAccept = new HashMap<>(); - final Map> allArriving = new HashMap<>(); + final Map> allArriving = new HashMap<>(); final LinkedList q = new LinkedList<>(); final Set seen = new HashSet<>(); @@ -171,8 +169,8 @@ public class AutomatonTestUtil { // reverse map the transitions, so we can quickly look // up all arriving transitions to a given state int numStates = a.getNumStates(); - for(int s=0;s tl = allArriving.get(t.dest); if (tl == null) { tl = new ArrayList<>(); @@ -192,7 +190,7 @@ public class AutomatonTestUtil { final int s = q.removeFirst(); List arriving = allArriving.get(s); if (arriving != null) { - for(ArrivingTransition at : arriving) { + for (ArrivingTransition at : arriving) { final int from = at.from; if (!seen.contains(from)) { q.add(from); @@ -210,8 +208,8 @@ public class AutomatonTestUtil { int s = 0; - while(true) { - + while (true) { + if (a.isAccept(s)) { if (a.getNumTransitions(s) == 0) { // stop now @@ -234,7 +232,7 @@ public class AutomatonTestUtil { // pick a transition that we know is the fastest // path to an accept state List toAccept = new ArrayList<>(); - for(Transition t0 : transitions[s]) { + for (Transition t0 : transitions[s]) { if (leadsToAccept.containsKey(t0)) { toAccept.add(t0); } @@ -259,7 +257,8 @@ public class AutomatonTestUtil { private static Automaton randomSingleAutomaton(Random random) { while (true) { try { - Automaton a1 = new RegExp(AutomatonTestUtil.randomRegexp(random), RegExp.NONE).toAutomaton(); + Automaton a1 = + new RegExp(AutomatonTestUtil.randomRegexp(random), RegExp.NONE).toAutomaton(); if (random.nextBoolean()) { a1 = Operations.complement(a1, DEFAULT_MAX_DETERMINIZED_STATES); } @@ -269,7 +268,7 @@ public class AutomatonTestUtil { } } } - + /** return a random NFA/DFA for testing */ public static Automaton randomAutomaton(Random random) { // get two random Automata from regexps @@ -278,24 +277,28 @@ public class AutomatonTestUtil { // combine them in random ways switch (random.nextInt(4)) { - case 0: return Operations.concatenate(a1, a2); - case 1: return Operations.union(a1, a2); - case 2: return Operations.intersection(a1, a2); - default: return Operations.minus(a1, a2, DEFAULT_MAX_DETERMINIZED_STATES); + case 0: + return Operations.concatenate(a1, a2); + case 1: + return Operations.union(a1, a2); + case 2: + return Operations.intersection(a1, a2); + default: + return Operations.minus(a1, a2, DEFAULT_MAX_DETERMINIZED_STATES); } } - - /** - * below are original, unoptimized implementations of DFA operations for testing. - * These are from brics automaton, full license (BSD) below: + + /** + * below are original, unoptimized implementations of DFA operations for testing. These are from + * brics automaton, full license (BSD) below: */ - + /* * dk.brics.automaton - * + * * Copyright (c) 2001-2009 Anders Moeller * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -306,7 +309,7 @@ public class AutomatonTestUtil { * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. @@ -319,9 +322,7 @@ public class AutomatonTestUtil { * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - /** - * Simple, original brics implementation of Brzozowski minimize() - */ + /** Simple, original brics implementation of Brzozowski minimize() */ public static Automaton minimizeSimple(Automaton a) { Set initialSet = new HashSet(); a = determinizeSimple(Operations.reverse(a, initialSet), initialSet); @@ -329,19 +330,17 @@ public class AutomatonTestUtil { a = determinizeSimple(Operations.reverse(a, initialSet), initialSet); return a; } - - /** - * Simple, original brics implementation of determinize() - */ + + /** Simple, original brics implementation of determinize() */ public static Automaton determinizeSimple(Automaton a) { Set initialset = new HashSet<>(); initialset.add(0); return determinizeSimple(a, initialset); } - /** - * Simple, original brics implementation of determinize() - * Determinizes the given automaton using the given set of initial states. + /** + * Simple, original brics implementation of determinize() Determinizes the given automaton using + * the given set of initial states. */ public static Automaton determinizeSimple(Automaton a, Set initialset) { if (a.getNumStates() == 0) { @@ -371,7 +370,7 @@ public class AutomatonTestUtil { Set p = new HashSet<>(); for (int q : s) { int count = a.initTransition(q, t); - for(int i=0;iReturns the set of accepted strings, assuming that at most - * limit strings are accepted. If more than limit - * strings are accepted, the first limit strings found are returned. If limit<0, then - * the limit is infinite. + *

    Returns the set of accepted strings, assuming that at most limit strings are + * accepted. If more than limit strings are accepted, the first limit strings found + * are returned. If limit<0, then the limit is infinite. * - *

    This implementation is recursive: it uses one stack - * frame for each digit in the returned strings (ie, max - * is the max length returned string). + *

    This implementation is recursive: it uses one stack frame for each digit in the returned + * strings (ie, max is the max length returned string). */ public static Set getFiniteStringsRecursive(Automaton a, int limit) { HashSet strings = new HashSet<>(); @@ -420,16 +417,20 @@ public class AutomatonTestUtil { } /** - * Returns the strings that can be produced from the given state, or - * false if more than limit strings are found. - * limit<0 means "infinite". + * Returns the strings that can be produced from the given state, or false if more than + * limit strings are found. limit<0 means "infinite". */ - private static boolean getFiniteStrings(Automaton a, int s, HashSet pathstates, - HashSet strings, IntsRefBuilder path, int limit) { + private static boolean getFiniteStrings( + Automaton a, + int s, + HashSet pathstates, + HashSet strings, + IntsRefBuilder path, + int limit) { pathstates.add(s); Transition t = new Transition(); int count = a.initTransition(s, t); - for (int i=0;i - * WARNING: this method is slow, it will blow up if the automaton is large. - * this is only used to test the correctness of our faster implementation. + * + *

    WARNING: this method is slow, it will blow up if the automaton is large. this is only used + * to test the correctness of our faster implementation. */ public static boolean isFiniteSlow(Automaton a) { if (a.getNumStates() == 0) { @@ -464,10 +465,10 @@ public class AutomatonTestUtil { } return isFiniteSlow(a, 0, new HashSet()); } - + /** - * Checks whether there is a loop containing s. (This is sufficient since - * there are never transitions to dead states.) + * Checks whether there is a loop containing s. (This is sufficient since there are never + * transitions to dead states.) */ // TODO: not great that this is recursive... in theory a // large automata could exceed java's stack @@ -475,7 +476,7 @@ public class AutomatonTestUtil { path.add(s); Transition t = new Transition(); int count = a.initTransition(s, t); - for (int i=0;i { @@ -59,7 +58,13 @@ public class FSTTester { long nodeCount; long arcCount; - public FSTTester(Random random, Directory dir, int inputMode, List> pairs, Outputs outputs, boolean doReverseLookup) { + public FSTTester( + Random random, + Directory dir, + int inputMode, + List> pairs, + Outputs outputs, + boolean doReverseLookup) { this.random = random; this.dir = dir; this.inputMode = inputMode; @@ -86,8 +91,8 @@ public class FSTTester { private static BytesRef toBytesRef(IntsRef ir) { BytesRef br = new BytesRef(ir.length); - for(int i=0;i= 0 && x <= 255; br.bytes[i] = (byte) x; } @@ -96,8 +101,9 @@ public class FSTTester { } /** - * [LUCENE-9600] This was made public because a misc module test depends on it. - * It is not recommended for generic usecase; consider {@link com.carrotsearch.randomizedtesting.generators.RandomStrings} to generate random strings. + * [LUCENE-9600] This was made public because a misc module test depends on it. It is not + * recommended for generic usecase; consider {@link + * com.carrotsearch.randomizedtesting.generators.RandomStrings} to generate random strings. */ public static String getRandomString(Random random) { final String term; @@ -144,8 +150,8 @@ public class FSTTester { int charIdx = 0; int intIdx = 0; ir.clear(); - while(charIdx < charLength) { - ir.grow(intIdx+1); + while (charIdx < charLength) { + ir.grow(intIdx + 1); final int utf32 = s.codePointAt(charIdx); ir.append(utf32); charIdx += Character.charCount(utf32); @@ -157,8 +163,8 @@ public class FSTTester { static IntsRef toIntsRef(BytesRef br, IntsRefBuilder ir) { ir.grow(br.length); ir.clear(); - for(int i=0;i { if (testPruning) { // simple pruning doTest(TestUtil.nextInt(random, 1, 1 + pairs.size()), 0, true); - + // leafy pruning doTest(0, TestUtil.nextInt(random, 1, 1 + pairs.size()), true); } @@ -207,14 +213,16 @@ public class FSTTester { T output = NO_OUTPUT; final FST.BytesReader fstReader = fst.getBytesReader(); - for(int i=0;i<=term.length;i++) { + for (int i = 0; i <= term.length; i++) { final int label; if (i == term.length) { label = FST.END_LABEL; } else { - label = term.ints[term.offset+i]; + label = term.ints[term.offset + i]; } - // System.out.println(" loop i=" + i + " label=" + label + " output=" + fst.outputs.outputToString(output) + " curArc: target=" + arc.target + " isFinal?=" + arc.isFinal()); + // System.out.println(" loop i=" + i + " label=" + label + " output=" + + // fst.outputs.outputToString(output) + " curArc: target=" + arc.target + " isFinal?=" + + // arc.isFinal()); if (fst.findTargetArc(label, arc, arc, fstReader) == null) { // System.out.println(" not found"); if (prefixLength != null) { @@ -243,15 +251,15 @@ public class FSTTester { T output = NO_OUTPUT; final FST.BytesReader fstReader = fst.getBytesReader(); - while(true) { + while (true) { // read all arcs: fst.readFirstTargetArc(arc, arc, fstReader); arcs.add(new FST.Arc().copyFrom(arc)); - while(!arc.isLast()) { + while (!arc.isLast()) { fst.readNextArc(arc, fstReader); arcs.add(new FST.Arc().copyFrom(arc)); } - + // pick one arc = arcs.get(random.nextInt(arcs.size())); arcs.clear(); @@ -270,25 +278,29 @@ public class FSTTester { return output; } - FST doTest(int prune1, int prune2, boolean allowRandomSuffixSharing) throws IOException { if (LuceneTestCase.VERBOSE) { System.out.println("\nTEST: prune1=" + prune1 + " prune2=" + prune2); } - final FSTCompiler fstCompiler = new FSTCompiler.Builder<>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, outputs) - .minSuffixCount1(prune1) - .minSuffixCount2(prune2) - .shouldShareSuffix(prune1==0 && prune2==0) - .shouldShareNonSingletonNodes(allowRandomSuffixSharing ? random.nextBoolean() : true) - .shareMaxTailLength(allowRandomSuffixSharing ? TestUtil.nextInt(random, 1, 10) : Integer.MAX_VALUE) - .build(); + final FSTCompiler fstCompiler = + new FSTCompiler.Builder<>( + inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, outputs) + .minSuffixCount1(prune1) + .minSuffixCount2(prune2) + .shouldShareSuffix(prune1 == 0 && prune2 == 0) + .shouldShareNonSingletonNodes(allowRandomSuffixSharing ? random.nextBoolean() : true) + .shareMaxTailLength( + allowRandomSuffixSharing ? TestUtil.nextInt(random, 1, 10) : Integer.MAX_VALUE) + .build(); - for(InputOutput pair : pairs) { + for (InputOutput pair : pairs) { if (pair.output instanceof List) { - @SuppressWarnings("unchecked") List longValues = (List) pair.output; - @SuppressWarnings("unchecked") final FSTCompiler fstCompilerObject = (FSTCompiler) fstCompiler; - for(Long value : longValues) { + @SuppressWarnings("unchecked") + List longValues = (List) pair.output; + @SuppressWarnings("unchecked") + final FSTCompiler fstCompilerObject = (FSTCompiler) fstCompiler; + for (Long value : longValues) { fstCompilerObject.add(pair.input, value); } } else { @@ -323,7 +335,12 @@ public class FSTTester { if (fst == null) { System.out.println(" fst has 0 nodes (fully pruned)"); } else { - System.out.println(" fst has " + fstCompiler.getNodeCount() + " nodes and " + fstCompiler.getArcCount() + " arcs"); + System.out.println( + " fst has " + + fstCompiler.getNodeCount() + + " nodes and " + + fstCompiler.getArcCount() + + " arcs"); } } @@ -353,10 +370,11 @@ public class FSTTester { long maxLong = Long.MIN_VALUE; if (doReverseLookup) { - @SuppressWarnings("unchecked") FST fstLong0 = (FST) fst; + @SuppressWarnings("unchecked") + FST fstLong0 = (FST) fst; fstLong = fstLong0; validOutputs = new HashSet<>(); - for(InputOutput pair: pairs) { + for (InputOutput pair : pairs) { Long output = (Long) pair.output; maxLong = Math.max(maxLong, output); minLong = Math.min(minLong, output); @@ -374,11 +392,15 @@ public class FSTTester { if (LuceneTestCase.VERBOSE) { System.out.println("TEST: now verify " + pairs.size() + " terms"); - for(InputOutput pair : pairs) { + for (InputOutput pair : pairs) { assertNotNull(pair); assertNotNull(pair.input); assertNotNull(pair.output); - System.out.println(" " + inputToString(inputMode, pair.input) + ": " + outputs.outputToString(pair.output)); + System.out.println( + " " + + inputToString(inputMode, pair.input) + + ": " + + outputs.outputToString(pair.output)); } } @@ -392,10 +414,14 @@ public class FSTTester { } { IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); - for(InputOutput pair : pairs) { + for (InputOutput pair : pairs) { IntsRef term = pair.input; if (LuceneTestCase.VERBOSE) { - System.out.println("TEST: check term=" + inputToString(inputMode, term) + " output=" + fst.outputs.outputToString(pair.output)); + System.out.println( + "TEST: check term=" + + inputToString(inputMode, term) + + " output=" + + fst.outputs.outputToString(pair.output)); } T output = run(fst, term, null); assertNotNull("term " + inputToString(inputMode, term) + " is not accepted", output); @@ -404,25 +430,31 @@ public class FSTTester { // verify enum's next IntsRefFSTEnum.InputOutput t = fstEnum.next(); assertNotNull(t); - assertEquals("expected input=" + inputToString(inputMode, term) + " but fstEnum returned " + inputToString(inputMode, t.input), term, t.input); + assertEquals( + "expected input=" + + inputToString(inputMode, term) + + " but fstEnum returned " + + inputToString(inputMode, t.input), + term, + t.input); assertTrue(outputsEqual(pair.output, t.output)); } assertNull(fstEnum.next()); } - final Map termsMap = new HashMap<>(); - for(InputOutput pair : pairs) { + final Map termsMap = new HashMap<>(); + for (InputOutput pair : pairs) { termsMap.put(pair.input, pair.output); } if (doReverseLookup && maxLong > minLong) { // Do random lookups so we test null (output doesn't // exist) case: - assertNull(Util.getByOutput(fstLong, minLong-7)); - assertNull(Util.getByOutput(fstLong, maxLong+7)); + assertNull(Util.getByOutput(fstLong, minLong - 7)); + assertNull(Util.getByOutput(fstLong, maxLong + 7)); final int num = LuceneTestCase.atLeast(random, 100); - for(int iter=0;iter { } final IntsRefBuilder scratch = new IntsRefBuilder(); int num = LuceneTestCase.atLeast(random, 500); - for(int iter=0;iter fstEnum = new IntsRefFSTEnum<>(fst); num = LuceneTestCase.atLeast(random, 100); - for(int iter=0;iter seekResult; if (random.nextInt(3) == 0) { if (LuceneTestCase.VERBOSE) { - System.out.println(" do non-exist seekExact term=" + inputToString(inputMode, term)); + System.out.println( + " do non-exist seekExact term=" + inputToString(inputMode, term)); } seekResult = fstEnum.seekExact(term); pos = -1; } else if (random.nextBoolean()) { if (LuceneTestCase.VERBOSE) { - System.out.println(" do non-exist seekFloor term=" + inputToString(inputMode, term)); + System.out.println( + " do non-exist seekFloor term=" + inputToString(inputMode, term)); } seekResult = fstEnum.seekFloor(term); pos--; } else { if (LuceneTestCase.VERBOSE) { - System.out.println(" do non-exist seekCeil term=" + inputToString(inputMode, term)); + System.out.println( + " do non-exist seekCeil term=" + inputToString(inputMode, term)); } seekResult = fstEnum.seekCeil(term); } if (pos != -1 && pos < pairs.size()) { - //System.out.println(" got " + inputToString(inputMode,seekResult.input) + " output=" + fst.outputs.outputToString(seekResult.output)); - assertNotNull("got null but expected term=" + inputToString(inputMode, pairs.get(pos).input), seekResult); + // System.out.println(" got " + inputToString(inputMode,seekResult.input) + " + // output=" + fst.outputs.outputToString(seekResult.output)); + assertNotNull( + "got null but expected term=" + inputToString(inputMode, pairs.get(pos).input), + seekResult); if (LuceneTestCase.VERBOSE) { System.out.println(" got " + inputToString(inputMode, seekResult.input)); } - assertEquals("expected " + inputToString(inputMode, pairs.get(pos).input) + " but got " + inputToString(inputMode, seekResult.input), pairs.get(pos).input, seekResult.input); + assertEquals( + "expected " + + inputToString(inputMode, pairs.get(pos).input) + + " but got " + + inputToString(inputMode, seekResult.input), + pairs.get(pos).input, + seekResult.input); assertTrue(outputsEqual(pairs.get(pos).output, seekResult.output)); } else { // seeked before start or beyond end - //System.out.println("seek=" + seekTerm); - assertNull("expected null but got " + (seekResult==null ? "null" : inputToString(inputMode, seekResult.input)), seekResult); + // System.out.println("seek=" + seekTerm); + assertNull( + "expected null but got " + + (seekResult == null ? "null" : inputToString(inputMode, seekResult.input)), + seekResult); if (LuceneTestCase.VERBOSE) { System.out.println(" got null"); } @@ -514,7 +563,8 @@ public class FSTTester { final IntsRefFSTEnum.InputOutput seekResult; if (random.nextInt(3) == 2) { if (LuceneTestCase.VERBOSE) { - System.out.println(" do exists seekExact term=" + inputToString(inputMode, pair.input)); + System.out.println( + " do exists seekExact term=" + inputToString(inputMode, pair.input)); } seekResult = fstEnum.seekExact(pair.input); } else if (random.nextBoolean()) { @@ -529,7 +579,13 @@ public class FSTTester { seekResult = fstEnum.seekCeil(pair.input); } assertNotNull(seekResult); - assertEquals("got " + inputToString(inputMode, seekResult.input) + " but expected " + inputToString(inputMode, pair.input), pair.input, seekResult.input); + assertEquals( + "got " + + inputToString(inputMode, seekResult.input) + + " but expected " + + inputToString(inputMode, pair.input), + pair.input, + seekResult.input); assertTrue(outputsEqual(pair.output, seekResult.output)); } } @@ -540,16 +596,16 @@ public class FSTTester { // test mixed next/seek num = LuceneTestCase.atLeast(random, 100); - for(int iter=0;iter(fst); int upto = -1; - while(true) { + while (true) { boolean isDone = false; - if (upto == pairs.size()-1 || random.nextBoolean()) { + if (upto == pairs.size() - 1 || random.nextBoolean()) { // next upto++; if (LuceneTestCase.VERBOSE) { @@ -558,23 +614,25 @@ public class FSTTester { isDone = fstEnum.next() == null; } else if (upto != -1 && upto < 0.75 * pairs.size() && random.nextBoolean()) { int attempt = 0; - for(;attempt<10;attempt++) { + for (; attempt < 10; attempt++) { IntsRef term = toIntsRef(getRandomString(random), inputMode); if (!termsMap.containsKey(term) && term.compareTo(pairs.get(upto).input) > 0) { int pos = Collections.binarySearch(pairs, new InputOutput(term, null)); assert pos < 0; - upto = -(pos+1); + upto = -(pos + 1); if (random.nextBoolean()) { upto--; assertTrue(upto != -1); if (LuceneTestCase.VERBOSE) { - System.out.println(" do non-exist seekFloor(" + inputToString(inputMode, term) + ")"); + System.out.println( + " do non-exist seekFloor(" + inputToString(inputMode, term) + ")"); } isDone = fstEnum.seekFloor(term) == null; } else { if (LuceneTestCase.VERBOSE) { - System.out.println(" do non-exist seekCeil(" + inputToString(inputMode, term) + ")"); + System.out.println( + " do non-exist seekCeil(" + inputToString(inputMode, term) + ")"); } isDone = fstEnum.seekCeil(term) == null; } @@ -585,7 +643,7 @@ public class FSTTester { if (attempt == 10) { continue; } - + } else { final int inc = random.nextInt(pairs.size() - upto - 1); upto += inc; @@ -595,12 +653,14 @@ public class FSTTester { if (random.nextBoolean()) { if (LuceneTestCase.VERBOSE) { - System.out.println(" do seekCeil(" + inputToString(inputMode, pairs.get(upto).input) + ")"); + System.out.println( + " do seekCeil(" + inputToString(inputMode, pairs.get(upto).input) + ")"); } isDone = fstEnum.seekCeil(pairs.get(upto).input) == null; } else { if (LuceneTestCase.VERBOSE) { - System.out.println(" do seekFloor(" + inputToString(inputMode, pairs.get(upto).input) + ")"); + System.out.println( + " do seekFloor(" + inputToString(inputMode, pairs.get(upto).input) + ")"); } isDone = fstEnum.seekFloor(pairs.get(upto).input) == null; } @@ -656,8 +716,12 @@ public class FSTTester { if (LuceneTestCase.VERBOSE) { System.out.println("TEST: now verify pruned " + pairs.size() + " terms; outputs=" + outputs); - for(InputOutput pair : pairs) { - System.out.println(" " + inputToString(inputMode, pair.input) + ": " + outputs.outputToString(pair.output)); + for (InputOutput pair : pairs) { + System.out.println( + " " + + inputToString(inputMode, pair.input) + + ": " + + outputs.outputToString(pair.output)); } } @@ -668,14 +732,14 @@ public class FSTTester { // NOTE: Crazy RAM intensive!! - //System.out.println("TEST: tally prefixes"); + // System.out.println("TEST: tally prefixes"); // build all prefixes - final Map> prefixes = new HashMap<>(); + final Map> prefixes = new HashMap<>(); final IntsRefBuilder scratch = new IntsRefBuilder(); - for(InputOutput pair: pairs) { + for (InputOutput pair : pairs) { scratch.copyInts(pair.input); - for(int idx=0;idx<=pair.input.length;idx++) { + for (int idx = 0; idx <= pair.input.length; idx++) { scratch.setLength(idx); CountMinOutput cmo = prefixes.get(scratch.get()); if (cmo == null) { @@ -707,13 +771,23 @@ public class FSTTester { } // prune 'em - final Iterator>> it = prefixes.entrySet().iterator(); - while(it.hasNext()) { - Map.Entry> ent = it.next(); + final Iterator>> it = prefixes.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry> ent = it.next(); final IntsRef prefix = ent.getKey(); final CountMinOutput cmo = ent.getValue(); if (LuceneTestCase.VERBOSE) { - System.out.println(" term prefix=" + inputToString(inputMode, prefix, false) + " count=" + cmo.count + " isLeaf=" + cmo.isLeaf + " output=" + outputs.outputToString(cmo.output) + " isFinal=" + cmo.isFinal); + System.out.println( + " term prefix=" + + inputToString(inputMode, prefix, false) + + " count=" + + cmo.count + + " isLeaf=" + + cmo.isLeaf + + " output=" + + outputs.outputToString(cmo.output) + + " isFinal=" + + cmo.isFinal); } final boolean keep; if (prune1 > 0) { @@ -724,11 +798,14 @@ public class FSTTester { keep = true; } else if (prefix.length > 0) { // consult our parent - scratch.setLength(prefix.length-1); + scratch.setLength(prefix.length - 1); System.arraycopy(prefix.ints, prefix.offset, scratch.ints(), 0, scratch.length()); final CountMinOutput cmo2 = prefixes.get(scratch.get()); - //System.out.println(" parent count = " + (cmo2 == null ? -1 : cmo2.count)); - keep = cmo2 != null && ((prune2 > 1 && cmo2.count >= prune2) || (prune2 == 1 && (cmo2.count >= 2 || prefix.length <= 1))); + // System.out.println(" parent count = " + (cmo2 == null ? -1 : cmo2.count)); + keep = + cmo2 != null + && ((prune2 > 1 && cmo2.count >= prune2) + || (prune2 == 1 && (cmo2.count >= 2 || prefix.length <= 1))); } else if (cmo.count >= prune2) { keep = true; } else { @@ -738,16 +815,16 @@ public class FSTTester { if (!keep) { it.remove(); - //System.out.println(" remove"); + // System.out.println(" remove"); } else { // clear isLeaf for all ancestors - //System.out.println(" keep"); + // System.out.println(" keep"); scratch.copyInts(prefix); scratch.setLength(scratch.length() - 1); - while(scratch.length() >= 0) { + while (scratch.length() >= 0) { final CountMinOutput cmo2 = prefixes.get(scratch.get()); if (cmo2 != null) { - //System.out.println(" clear isLeaf " + inputToString(inputMode, scratch)); + // System.out.println(" clear isLeaf " + inputToString(inputMode, scratch)); cmo2.isLeaf = false; } scratch.setLength(scratch.length() - 1); @@ -757,10 +834,17 @@ public class FSTTester { if (LuceneTestCase.VERBOSE) { System.out.println("TEST: after prune"); - for(Map.Entry> ent : prefixes.entrySet()) { - System.out.println(" " + inputToString(inputMode, ent.getKey(), false) + ": isLeaf=" + ent.getValue().isLeaf + " isFinal=" + ent.getValue().isFinal); + for (Map.Entry> ent : prefixes.entrySet()) { + System.out.println( + " " + + inputToString(inputMode, ent.getKey(), false) + + ": isLeaf=" + + ent.getValue().isLeaf + + " isFinal=" + + ent.getValue().isFinal); if (ent.getValue().isFinal) { - System.out.println(" finalOutput=" + outputs.outputToString(ent.getValue().finalOutput)); + System.out.println( + " finalOutput=" + outputs.outputToString(ent.getValue().finalOutput)); } } } @@ -778,14 +862,18 @@ public class FSTTester { } IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); IntsRefFSTEnum.InputOutput current; - while((current = fstEnum.next()) != null) { + while ((current = fstEnum.next()) != null) { if (LuceneTestCase.VERBOSE) { - System.out.println(" fstEnum.next prefix=" + inputToString(inputMode, current.input, false) + " output=" + outputs.outputToString(current.output)); + System.out.println( + " fstEnum.next prefix=" + + inputToString(inputMode, current.input, false) + + " output=" + + outputs.outputToString(current.output)); } final CountMinOutput cmo = prefixes.get(current.input); assertNotNull(cmo); assertTrue(cmo.isLeaf || cmo.isFinal); - //if (cmo.isFinal && !cmo.isLeaf) { + // if (cmo.isFinal && !cmo.isLeaf) { if (cmo.isFinal) { assertEquals(cmo.finalOutput, current.output); } else { @@ -798,12 +886,16 @@ public class FSTTester { System.out.println("TEST: verify all prefixes"); } final int[] stopNode = new int[1]; - for(Map.Entry> ent : prefixes.entrySet()) { + for (Map.Entry> ent : prefixes.entrySet()) { if (ent.getKey().length > 0) { final CountMinOutput cmo = ent.getValue(); final T output = run(fst, ent.getKey(), stopNode); if (LuceneTestCase.VERBOSE) { - System.out.println("TEST: verify prefix=" + inputToString(inputMode, ent.getKey(), false) + " output=" + outputs.outputToString(cmo.output)); + System.out.println( + "TEST: verify prefix=" + + inputToString(inputMode, ent.getKey(), false) + + " output=" + + outputs.outputToString(cmo.output)); } // if (cmo.isFinal && !cmo.isLeaf) { if (cmo.isFinal) { @@ -816,4 +908,3 @@ public class FSTTester { } } } - diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestLookaheadTokenFilter.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestLookaheadTokenFilter.java index cccaf6b82e5..aa3d735e7e9 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestLookaheadTokenFilter.java +++ b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestLookaheadTokenFilter.java @@ -22,20 +22,22 @@ import java.util.Random; public class TestLookaheadTokenFilter extends BaseTokenStreamTestCase { public void testRandomStrings() throws Exception { - Analyzer a = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - Random random = random(); - Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, random.nextBoolean()); - TokenStream output = new MockRandomLookaheadTokenFilter(random, tokenizer); - return new TokenStreamComponents(tokenizer, output); - } - }; + Analyzer a = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Random random = random(); + Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, random.nextBoolean()); + TokenStream output = new MockRandomLookaheadTokenFilter(random, tokenizer); + return new TokenStreamComponents(tokenizer, output); + } + }; int maxLength = TEST_NIGHTLY ? 8192 : 1024; - checkRandomData(random(), a, 50*RANDOM_MULTIPLIER, maxLength); + checkRandomData(random(), a, 50 * RANDOM_MULTIPLIER, maxLength); } - private static class NeverPeeksLookaheadTokenFilter extends LookaheadTokenFilter { + private static class NeverPeeksLookaheadTokenFilter + extends LookaheadTokenFilter { public NeverPeeksLookaheadTokenFilter(TokenStream input) { super(input); } @@ -52,45 +54,49 @@ public class TestLookaheadTokenFilter extends BaseTokenStreamTestCase { } public void testNeverCallingPeek() throws Exception { - Analyzer a = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, random().nextBoolean()); - TokenStream output = new NeverPeeksLookaheadTokenFilter(tokenizer); - return new TokenStreamComponents(tokenizer, output); - } - }; + Analyzer a = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer tokenizer = + new MockTokenizer(MockTokenizer.WHITESPACE, random().nextBoolean()); + TokenStream output = new NeverPeeksLookaheadTokenFilter(tokenizer); + return new TokenStreamComponents(tokenizer, output); + } + }; int maxLength = TEST_NIGHTLY ? 8192 : 1024; - checkRandomData(random(), a, 50*RANDOM_MULTIPLIER, maxLength); + checkRandomData(random(), a, 50 * RANDOM_MULTIPLIER, maxLength); } public void testMissedFirstToken() throws Exception { - Analyzer analyzer = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false); - TrivialLookaheadFilter filter = new TrivialLookaheadFilter(source); - return new TokenStreamComponents(source, filter); - } - }; + Analyzer analyzer = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false); + TrivialLookaheadFilter filter = new TrivialLookaheadFilter(source); + return new TokenStreamComponents(source, filter); + } + }; - assertAnalyzesTo(analyzer, + assertAnalyzesTo( + analyzer, "Only he who is running knows .", - new String[]{ - "Only", - "Only-huh?", - "he", - "he-huh?", - "who", - "who-huh?", - "is", - "is-huh?", - "running", - "running-huh?", - "knows", - "knows-huh?", - ".", - ".-huh?" + new String[] { + "Only", + "Only-huh?", + "he", + "he-huh?", + "who", + "who-huh?", + "is", + "is-huh?", + "running", + "running-huh?", + "knows", + "knows-huh?", + ".", + ".-huh?" }); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java index 6e73206e77f..c3fccc5514d 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java +++ b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java @@ -16,18 +16,19 @@ */ package org.apache.lucene.analysis; +import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; + import java.io.Reader; import java.io.StringReader; import java.util.Arrays; import java.util.Random; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; -import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -39,177 +40,166 @@ import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; -import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; - public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like WhitespaceAnalyzer */ public void testWhitespace() throws Exception { Analyzer a = new MockAnalyzer(random()); - assertAnalyzesTo(a, "A bc defg hiJklmn opqrstuv wxy z ", - new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" }); - assertAnalyzesTo(a, "aba cadaba shazam", - new String[] { "aba", "cadaba", "shazam" }); - assertAnalyzesTo(a, "break on whitespace", - new String[] { "break", "on", "whitespace" }); + assertAnalyzesTo( + a, + "A bc defg hiJklmn opqrstuv wxy z ", + new String[] {"a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z"}); + assertAnalyzesTo(a, "aba cadaba shazam", new String[] {"aba", "cadaba", "shazam"}); + assertAnalyzesTo(a, "break on whitespace", new String[] {"break", "on", "whitespace"}); } - + /** Test a configuration that behaves a lot like SimpleAnalyzer */ public void testSimple() throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); - assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ", - new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" }); - assertAnalyzesTo(a, "aba4cadaba-Shazam", - new String[] { "aba", "cadaba", "shazam" }); - assertAnalyzesTo(a, "break+on/Letters", - new String[] { "break", "on", "letters" }); + assertAnalyzesTo( + a, + "a-bc123 defg+hijklmn567opqrstuv78wxy_z ", + new String[] {"a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z"}); + assertAnalyzesTo(a, "aba4cadaba-Shazam", new String[] {"aba", "cadaba", "shazam"}); + assertAnalyzesTo(a, "break+on/Letters", new String[] {"break", "on", "letters"}); } - + /** Test a configuration that behaves a lot like KeywordAnalyzer */ public void testKeyword() throws Exception { Analyzer a = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); - assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ", - new String[] { "a-bc123 defg+hijklmn567opqrstuv78wxy_z " }); - assertAnalyzesTo(a, "aba4cadaba-Shazam", - new String[] { "aba4cadaba-Shazam" }); - assertAnalyzesTo(a, "break+on/Nothing", - new String[] { "break+on/Nothing" }); + assertAnalyzesTo( + a, + "a-bc123 defg+hijklmn567opqrstuv78wxy_z ", + new String[] {"a-bc123 defg+hijklmn567opqrstuv78wxy_z "}); + assertAnalyzesTo(a, "aba4cadaba-Shazam", new String[] {"aba4cadaba-Shazam"}); + assertAnalyzesTo(a, "break+on/Nothing", new String[] {"break+on/Nothing"}); // currently though emits no tokens for empty string: maybe we can do it, // but we don't want to emit tokens infinitely... assertAnalyzesTo(a, "", new String[0]); } - + // Test some regular expressions as tokenization patterns /** Test a configuration where each character is a term */ public void testSingleChar() throws Exception { - CharacterRunAutomaton single = - new CharacterRunAutomaton(new RegExp(".").toAutomaton()); + CharacterRunAutomaton single = new CharacterRunAutomaton(new RegExp(".").toAutomaton()); Analyzer a = new MockAnalyzer(random(), single, false); - assertAnalyzesTo(a, "foobar", - new String[] { "f", "o", "o", "b", "a", "r" }, - new int[] { 0, 1, 2, 3, 4, 5 }, - new int[] { 1, 2, 3, 4, 5, 6 } - ); + assertAnalyzesTo( + a, + "foobar", + new String[] {"f", "o", "o", "b", "a", "r"}, + new int[] {0, 1, 2, 3, 4, 5}, + new int[] {1, 2, 3, 4, 5, 6}); checkRandomData(random(), a, 100); } - + /** Test a configuration where two characters makes a term */ public void testTwoChars() throws Exception { - CharacterRunAutomaton single = - new CharacterRunAutomaton(new RegExp("..").toAutomaton()); + CharacterRunAutomaton single = new CharacterRunAutomaton(new RegExp("..").toAutomaton()); Analyzer a = new MockAnalyzer(random(), single, false); - assertAnalyzesTo(a, "foobar", - new String[] { "fo", "ob", "ar"}, - new int[] { 0, 2, 4 }, - new int[] { 2, 4, 6 } - ); + assertAnalyzesTo( + a, "foobar", new String[] {"fo", "ob", "ar"}, new int[] {0, 2, 4}, new int[] {2, 4, 6}); // make sure when last term is a "partial" match that end() is correct - assertTokenStreamContents(a.tokenStream("bogus", "fooba"), - new String[] { "fo", "ob" }, - new int[] { 0, 2 }, - new int[] { 2, 4 }, - new int[] { 1, 1 }, - 5 - ); + assertTokenStreamContents( + a.tokenStream("bogus", "fooba"), + new String[] {"fo", "ob"}, + new int[] {0, 2}, + new int[] {2, 4}, + new int[] {1, 1}, + 5); checkRandomData(random(), a, 100); } - + /** Test a configuration where three characters makes a term */ public void testThreeChars() throws Exception { - CharacterRunAutomaton single = - new CharacterRunAutomaton(new RegExp("...").toAutomaton()); + CharacterRunAutomaton single = new CharacterRunAutomaton(new RegExp("...").toAutomaton()); Analyzer a = new MockAnalyzer(random(), single, false); - assertAnalyzesTo(a, "foobar", - new String[] { "foo", "bar"}, - new int[] { 0, 3 }, - new int[] { 3, 6 } - ); + assertAnalyzesTo(a, "foobar", new String[] {"foo", "bar"}, new int[] {0, 3}, new int[] {3, 6}); // make sure when last term is a "partial" match that end() is correct - assertTokenStreamContents(a.tokenStream("bogus", "fooba"), - new String[] { "foo" }, - new int[] { 0 }, - new int[] { 3 }, - new int[] { 1 }, - 5 - ); + assertTokenStreamContents( + a.tokenStream("bogus", "fooba"), + new String[] {"foo"}, + new int[] {0}, + new int[] {3}, + new int[] {1}, + 5); checkRandomData(random(), a, 100); } - + /** Test a configuration where word starts with one uppercase */ public void testUppercase() throws Exception { CharacterRunAutomaton single = new CharacterRunAutomaton(new RegExp("[A-Z][a-z]*").toAutomaton()); Analyzer a = new MockAnalyzer(random(), single, false); - assertAnalyzesTo(a, "FooBarBAZ", - new String[] { "Foo", "Bar", "B", "A", "Z"}, - new int[] { 0, 3, 6, 7, 8 }, - new int[] { 3, 6, 7, 8, 9 } - ); - assertAnalyzesTo(a, "aFooBar", - new String[] { "Foo", "Bar" }, - new int[] { 1, 4 }, - new int[] { 4, 7 } - ); + assertAnalyzesTo( + a, + "FooBarBAZ", + new String[] {"Foo", "Bar", "B", "A", "Z"}, + new int[] {0, 3, 6, 7, 8}, + new int[] {3, 6, 7, 8, 9}); + assertAnalyzesTo(a, "aFooBar", new String[] {"Foo", "Bar"}, new int[] {1, 4}, new int[] {4, 7}); checkRandomData(random(), a, 100); } - + /** Test a configuration that behaves a lot like StopAnalyzer */ public void testStop() throws Exception { - Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); - assertAnalyzesTo(a, "the quick brown a fox", - new String[] { "quick", "brown", "fox" }, - new int[] { 2, 1, 2 }); + Analyzer a = + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); + assertAnalyzesTo( + a, "the quick brown a fox", new String[] {"quick", "brown", "fox"}, new int[] {2, 1, 2}); } - + /** Test a configuration that behaves a lot like KeepWordFilter */ public void testKeep() throws Exception { - CharacterRunAutomaton keepWords = - new CharacterRunAutomaton( - Operations.complement( - Operations.union( - Arrays.asList(Automata.makeString("foo"), Automata.makeString("bar"))), - DEFAULT_MAX_DETERMINIZED_STATES)); + CharacterRunAutomaton keepWords = + new CharacterRunAutomaton( + Operations.complement( + Operations.union( + Arrays.asList(Automata.makeString("foo"), Automata.makeString("bar"))), + DEFAULT_MAX_DETERMINIZED_STATES)); Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, keepWords); - assertAnalyzesTo(a, "quick foo brown bar bar fox foo", - new String[] { "foo", "bar", "bar", "foo" }, - new int[] { 2, 2, 1, 2 }); + assertAnalyzesTo( + a, + "quick foo brown bar bar fox foo", + new String[] {"foo", "bar", "bar", "foo"}, + new int[] {2, 2, 1, 2}); } - + /** Test a configuration that behaves a lot like LengthFilter */ public void testLength() throws Exception { CharacterRunAutomaton length5 = new CharacterRunAutomaton(new RegExp(".{5,}").toAutomaton()); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, length5); - assertAnalyzesTo(a, "ok toolong fine notfine", - new String[] { "ok", "fine" }, - new int[] { 1, 2 }); + assertAnalyzesTo(a, "ok toolong fine notfine", new String[] {"ok", "fine"}, new int[] {1, 2}); } - + /** Test MockTokenizer encountering a too long token */ public void testTooLongToken() throws Exception { - Analyzer whitespace = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false, 5); - return new TokenStreamComponents(t, t); - } - }; - - assertTokenStreamContents(whitespace.tokenStream("bogus", "test 123 toolong ok "), - new String[] { "test", "123", "toolo", "ng", "ok" }, - new int[] { 0, 5, 9, 14, 17 }, - new int[] { 4, 8, 14, 16, 19 }, + Analyzer whitespace = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false, 5); + return new TokenStreamComponents(t, t); + } + }; + + assertTokenStreamContents( + whitespace.tokenStream("bogus", "test 123 toolong ok "), + new String[] {"test", "123", "toolo", "ng", "ok"}, + new int[] {0, 5, 9, 14, 17}, + new int[] {4, 8, 14, 16, 19}, 20); - - assertTokenStreamContents(whitespace.tokenStream("bogus", "test 123 toolo"), - new String[] { "test", "123", "toolo" }, - new int[] { 0, 5, 9 }, - new int[] { 4, 8, 14 }, + + assertTokenStreamContents( + whitespace.tokenStream("bogus", "test 123 toolo"), + new String[] {"test", "123", "toolo"}, + new int[] {0, 5, 9}, + new int[] {4, 8, 14}, 14); } - + public void testLUCENE_3042() throws Exception { String testString = "t"; - + Analyzer analyzer = new MockAnalyzer(random()); try (TokenStream stream = analyzer.tokenStream("dummy", testString)) { stream.reset(); @@ -218,34 +208,36 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { } stream.end(); } - - assertAnalyzesTo(analyzer, testString, new String[] { "t" }); + + assertAnalyzesTo(analyzer, testString, new String[] {"t"}); } /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { checkRandomData(random(), new MockAnalyzer(random()), atLeast(1000)); } - + /** blast some random strings through differently configured tokenizers */ public void testRandomRegexps() throws Exception { int iters = TEST_NIGHTLY ? atLeast(30) : atLeast(1); for (int i = 0; i < iters; i++) { - final CharacterRunAutomaton dfa = new CharacterRunAutomaton(AutomatonTestUtil.randomAutomaton(random()), Integer.MAX_VALUE); + final CharacterRunAutomaton dfa = + new CharacterRunAutomaton(AutomatonTestUtil.randomAutomaton(random()), Integer.MAX_VALUE); final boolean lowercase = random().nextBoolean(); final int limit = TestUtil.nextInt(random(), 0, 500); - Analyzer a = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer t = new MockTokenizer(dfa, lowercase, limit); - return new TokenStreamComponents(t, t); - } - }; + Analyzer a = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer t = new MockTokenizer(dfa, lowercase, limit); + return new TokenStreamComponents(t, t); + } + }; checkRandomData(random(), a, 100); a.close(); } } - + public void testForwardOffsets() throws Exception { int num = atLeast(1000); for (int i = 0; i < num; i++) { @@ -255,32 +247,31 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { MockAnalyzer analyzer = new MockAnalyzer(random()); try (TokenStream ts = analyzer.tokenStream("bogus", charfilter)) { ts.reset(); - while (ts.incrementToken()) { - ; - } + while (ts.incrementToken()) {} ts.end(); } } } - + public void testWrapReader() throws Exception { // LUCENE-5153: test that wrapping an analyzer's reader is allowed final Random random = random(); - + final Analyzer delegate = new MockAnalyzer(random); - Analyzer a = new AnalyzerWrapper(delegate.getReuseStrategy()) { - - @Override - protected Reader wrapReader(String fieldName, Reader reader) { - return new MockCharFilter(reader, 7); - } - - @Override - protected Analyzer getWrappedAnalyzer(String fieldName) { - return delegate; - } - }; - + Analyzer a = + new AnalyzerWrapper(delegate.getReuseStrategy()) { + + @Override + protected Reader wrapReader(String fieldName, Reader reader) { + return new MockCharFilter(reader, 7); + } + + @Override + protected Analyzer getWrappedAnalyzer(String fieldName) { + return delegate; + } + }; + checkOneTerm(a, "abc", "aabc"); } @@ -289,20 +280,23 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { final int positionGap = random().nextInt(1000); final int offsetGap = random().nextInt(1000); final Analyzer delegate = new MockAnalyzer(random()); - final Analyzer a = new DelegatingAnalyzerWrapper(delegate.getReuseStrategy()) { - @Override - protected Analyzer getWrappedAnalyzer(String fieldName) { - return delegate; - } - @Override - public int getPositionIncrementGap(String fieldName) { - return positionGap; - } - @Override - public int getOffsetGap(String fieldName) { - return offsetGap; - } - }; + final Analyzer a = + new DelegatingAnalyzerWrapper(delegate.getReuseStrategy()) { + @Override + protected Analyzer getWrappedAnalyzer(String fieldName) { + return delegate; + } + + @Override + public int getPositionIncrementGap(String fieldName) { + return positionGap; + } + + @Override + public int getOffsetGap(String fieldName) { + return offsetGap; + } + }; final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory(), a); final Document doc = new Document(); @@ -333,5 +327,4 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { writer.close(); writer.w.getDirectory().close(); } - } diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockCharFilter.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockCharFilter.java index d8072b350a6..2b321e4ef89 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockCharFilter.java +++ b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockCharFilter.java @@ -20,38 +20,28 @@ import java.io.IOException; import java.io.Reader; public class TestMockCharFilter extends BaseTokenStreamTestCase { - + public void test() throws IOException { - Analyzer analyzer = new Analyzer() { + Analyzer analyzer = + new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false); - return new TokenStreamComponents(tokenizer, tokenizer); - } + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false); + return new TokenStreamComponents(tokenizer, tokenizer); + } - @Override - protected Reader initReader(String fieldName, Reader reader) { - return new MockCharFilter(reader, 7); - } - }; - - assertAnalyzesTo(analyzer, "ab", - new String[] { "aab" }, - new int[] { 0 }, - new int[] { 2 } - ); - - assertAnalyzesTo(analyzer, "aba", - new String[] { "aabaa" }, - new int[] { 0 }, - new int[] { 3 } - ); - - assertAnalyzesTo(analyzer, "abcdefga", - new String[] { "aabcdefgaa" }, - new int[] { 0 }, - new int[] { 8 } - ); + @Override + protected Reader initReader(String fieldName, Reader reader) { + return new MockCharFilter(reader, 7); + } + }; + + assertAnalyzesTo(analyzer, "ab", new String[] {"aab"}, new int[] {0}, new int[] {2}); + + assertAnalyzesTo(analyzer, "aba", new String[] {"aabaa"}, new int[] {0}, new int[] {3}); + + assertAnalyzesTo( + analyzer, "abcdefga", new String[] {"aabcdefgaa"}, new int[] {0}, new int[] {8}); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockSynonymFilter.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockSynonymFilter.java index fb0d0657744..17f02e84914 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockSynonymFilter.java +++ b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockSynonymFilter.java @@ -23,129 +23,156 @@ public class TestMockSynonymFilter extends BaseTokenStreamTestCase { /** test the mock synonym filter */ public void test() throws IOException { - Analyzer analyzer = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - MockTokenizer tokenizer = new MockTokenizer(); - return new TokenStreamComponents(tokenizer, new MockSynonymFilter(tokenizer)); - } - }; + Analyzer analyzer = + new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + MockTokenizer tokenizer = new MockTokenizer(); + return new TokenStreamComponents(tokenizer, new MockSynonymFilter(tokenizer)); + } + }; - assertAnalyzesTo(analyzer, "dogs", - new String[]{"dogs", "dog"}, - new int[]{0, 0}, // start offset - new int[]{4, 4}, // end offset + assertAnalyzesTo( + analyzer, + "dogs", + new String[] {"dogs", "dog"}, + new int[] {0, 0}, // start offset + new int[] {4, 4}, // end offset null, - new int[]{1, 0}, // position increment - new int[]{1, 1}, // position length + new int[] {1, 0}, // position increment + new int[] {1, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "small dogs", - new String[]{"small", "dogs", "dog"}, - new int[]{0, 6, 6}, // start offset - new int[]{5, 10, 10}, // end offset + assertAnalyzesTo( + analyzer, + "small dogs", + new String[] {"small", "dogs", "dog"}, + new int[] {0, 6, 6}, // start offset + new int[] {5, 10, 10}, // end offset null, - new int[]{1, 1, 0}, // position increment - new int[]{1, 1, 1}, // position length + new int[] {1, 1, 0}, // position increment + new int[] {1, 1, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "dogs running", - new String[]{"dogs", "dog", "running"}, - new int[]{0, 0, 5}, // start offset - new int[]{4, 4, 12}, // end offset + assertAnalyzesTo( + analyzer, + "dogs running", + new String[] {"dogs", "dog", "running"}, + new int[] {0, 0, 5}, // start offset + new int[] {4, 4, 12}, // end offset null, - new int[]{1, 0, 1}, // position increment - new int[]{1, 1, 1}, // position length + new int[] {1, 0, 1}, // position increment + new int[] {1, 1, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "small dogs running", - new String[]{"small", "dogs", "dog", "running"}, - new int[]{0, 6, 6, 11}, // start offset - new int[]{5, 10, 10, 18}, // end offset + assertAnalyzesTo( + analyzer, + "small dogs running", + new String[] {"small", "dogs", "dog", "running"}, + new int[] {0, 6, 6, 11}, // start offset + new int[] {5, 10, 10, 18}, // end offset null, - new int[]{1, 1, 0, 1}, // position increment - new int[]{1, 1, 1, 1}, // position length + new int[] {1, 1, 0, 1}, // position increment + new int[] {1, 1, 1, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "guinea", - new String[]{"guinea"}, - new int[]{0}, // start offset - new int[]{6}, // end offset + assertAnalyzesTo( + analyzer, + "guinea", + new String[] {"guinea"}, + new int[] {0}, // start offset + new int[] {6}, // end offset null, - new int[]{1}, // position increment - new int[]{1}, // position length + new int[] {1}, // position increment + new int[] {1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "pig", - new String[]{"pig"}, - new int[]{0}, // start offset - new int[]{3}, // end offset + assertAnalyzesTo( + analyzer, + "pig", + new String[] {"pig"}, + new int[] {0}, // start offset + new int[] {3}, // end offset null, - new int[]{1}, // position increment - new int[]{1}, // position length + new int[] {1}, // position increment + new int[] {1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "guinea pig", - new String[]{"guinea", "cavy", "pig"}, - new int[]{0, 0, 7}, // start offset - new int[]{6, 10, 10}, // end offset + assertAnalyzesTo( + analyzer, + "guinea pig", + new String[] {"guinea", "cavy", "pig"}, + new int[] {0, 0, 7}, // start offset + new int[] {6, 10, 10}, // end offset null, - new int[]{1, 0, 1}, // position increment - new int[]{1, 2, 1}, // position length + new int[] {1, 0, 1}, // position increment + new int[] {1, 2, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "guinea dogs", - new String[]{"guinea", "dogs", "dog"}, - new int[]{0, 7, 7}, // start offset - new int[]{6, 11, 11}, // end offset + assertAnalyzesTo( + analyzer, + "guinea dogs", + new String[] {"guinea", "dogs", "dog"}, + new int[] {0, 7, 7}, // start offset + new int[] {6, 11, 11}, // end offset null, - new int[]{1, 1, 0}, // position increment - new int[]{1, 1, 1}, // position length + new int[] {1, 1, 0}, // position increment + new int[] {1, 1, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "dogs guinea", - new String[]{"dogs", "dog", "guinea"}, - new int[]{0, 0, 5}, // start offset - new int[]{4, 4, 11}, // end offset + assertAnalyzesTo( + analyzer, + "dogs guinea", + new String[] {"dogs", "dog", "guinea"}, + new int[] {0, 0, 5}, // start offset + new int[] {4, 4, 11}, // end offset null, - new int[]{1, 0, 1}, // position increment - new int[]{1, 1, 1}, // position length + new int[] {1, 0, 1}, // position increment + new int[] {1, 1, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "dogs guinea pig", - new String[]{"dogs", "dog", "guinea", "cavy", "pig"}, - new int[]{0, 0, 5, 5, 12}, // start offset - new int[]{4, 4, 11, 15, 15}, // end offset + assertAnalyzesTo( + analyzer, + "dogs guinea pig", + new String[] {"dogs", "dog", "guinea", "cavy", "pig"}, + new int[] {0, 0, 5, 5, 12}, // start offset + new int[] {4, 4, 11, 15, 15}, // end offset null, - new int[]{1, 0, 1, 0, 1}, // position increment - new int[]{1, 1, 1, 2, 1}, // position length + new int[] {1, 0, 1, 0, 1}, // position increment + new int[] {1, 1, 1, 2, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "guinea pig dogs", - new String[]{"guinea", "cavy", "pig", "dogs", "dog"}, - new int[]{0, 0, 7, 11, 11}, // start offset - new int[]{6, 10, 10, 15, 15}, // end offset + assertAnalyzesTo( + analyzer, + "guinea pig dogs", + new String[] {"guinea", "cavy", "pig", "dogs", "dog"}, + new int[] {0, 0, 7, 11, 11}, // start offset + new int[] {6, 10, 10, 15, 15}, // end offset null, - new int[]{1, 0, 1, 1, 0}, // position increment - new int[]{1, 2, 1, 1, 1}, // position length + new int[] {1, 0, 1, 1, 0}, // position increment + new int[] {1, 2, 1, 1, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "small dogs and guinea pig running", - new String[]{"small", "dogs", "dog", "and", "guinea", "cavy", "pig", "running"}, - new int[]{0, 6, 6, 11, 15, 15, 22, 26}, // start offset - new int[]{5, 10, 10, 14, 21, 25, 25, 33}, // end offset + assertAnalyzesTo( + analyzer, + "small dogs and guinea pig running", + new String[] {"small", "dogs", "dog", "and", "guinea", "cavy", "pig", "running"}, + new int[] {0, 6, 6, 11, 15, 15, 22, 26}, // start offset + new int[] {5, 10, 10, 14, 21, 25, 25, 33}, // end offset null, - new int[]{1, 1, 0, 1, 1, 0, 1, 1}, // position increment - new int[]{1, 1, 1, 1, 1, 2, 1, 1}, // position length + new int[] {1, 1, 0, 1, 1, 0, 1, 1}, // position increment + new int[] {1, 1, 1, 1, 1, 2, 1, 1}, // position length true); // check that offsets are correct - assertAnalyzesTo(analyzer, "small guinea pig and dogs running", - new String[]{"small", "guinea", "cavy", "pig", "and", "dogs", "dog", "running"}, - new int[]{0, 6, 6, 13, 17, 21, 21, 26}, // start offset - new int[]{5, 12, 16, 16, 20, 25, 25, 33}, // end offset + assertAnalyzesTo( + analyzer, + "small guinea pig and dogs running", + new String[] {"small", "guinea", "cavy", "pig", "and", "dogs", "dog", "running"}, + new int[] {0, 6, 6, 13, 17, 21, 21, 26}, // start offset + new int[] {5, 12, 16, 16, 20, 25, 25, 33}, // end offset null, - new int[]{1, 1, 0, 1, 1, 1, 0, 1}, // position increment - new int[]{1, 1, 2, 1, 1, 1, 1, 1}, // position length + new int[] {1, 1, 0, 1, 1, 1, 0, 1}, // position increment + new int[] {1, 1, 2, 1, 1, 1, 1, 1}, // position length true); // check that offsets are correct } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestPosition.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestPosition.java index 24087fccf9f..7726044040b 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestPosition.java +++ b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestPosition.java @@ -16,12 +16,9 @@ */ package org.apache.lucene.analysis; - import org.junit.Ignore; -/** - * Trivial position class. - */ +/** Trivial position class. */ @Ignore public class TestPosition extends LookaheadTokenFilter.Position { private String fact; diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TrivialLookaheadFilter.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TrivialLookaheadFilter.java index c370d89a759..d4f57107886 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TrivialLookaheadFilter.java +++ b/lucene/test-framework/src/test/org/apache/lucene/analysis/TrivialLookaheadFilter.java @@ -19,18 +19,16 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -/** - * Simple example of a filter that seems to show some problems with LookaheadTokenFilter. - */ -final public class TrivialLookaheadFilter extends LookaheadTokenFilter { +/** Simple example of a filter that seems to show some problems with LookaheadTokenFilter. */ +public final class TrivialLookaheadFilter extends LookaheadTokenFilter { private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionIncrementAttribute posIncAtt = + addAttribute(PositionIncrementAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); private int insertUpto; @@ -46,8 +44,10 @@ final public class TrivialLookaheadFilter extends LookaheadTokenFilter seen = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(dir.resolve("foobar"))) { for (Path path : stream) { @@ -52,12 +54,12 @@ public class TestExtrasFS extends MockFileSystemTestCase { assertEquals(Arrays.asList("extra0"), seen); assertTrue(Files.isRegularFile(dir.resolve("foobar").resolve("extra0"))); } - + /** test where extra directory is created */ public void testExtraDirectory() throws Exception { Path dir = wrap(createTempDir(), true, true); Files.createDirectory(dir.resolve("foobar")); - + List seen = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(dir.resolve("foobar"))) { for (Path path : stream) { @@ -67,7 +69,7 @@ public class TestExtrasFS extends MockFileSystemTestCase { assertEquals(Arrays.asList("extra0"), seen); assertTrue(Files.isDirectory(dir.resolve("foobar").resolve("extra0"))); } - + /** test where no extras are created: its a no-op */ public void testNoExtras() throws Exception { Path dir = wrap(createTempDir(), false, false); diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestHandleLimitFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestHandleLimitFS.java index 4374fdafbd6..1c0434aff67 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestHandleLimitFS.java +++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestHandleLimitFS.java @@ -24,40 +24,41 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.List; - import org.apache.lucene.util.IOUtils; /** Basic tests for HandleLimitFS */ public class TestHandleLimitFS extends MockFileSystemTestCase { - + @Override protected Path wrap(Path path) { return wrap(path, 4096); } - + Path wrap(Path path, int limit) { - FileSystem fs = new HandleLimitFS(path.getFileSystem(), limit).getFileSystem(URI.create("file:///")); + FileSystem fs = + new HandleLimitFS(path.getFileSystem(), limit).getFileSystem(URI.create("file:///")); return new FilterPath(path, fs); } - + /** set a limit at n files, then open more than that and ensure we hit exception */ public void testTooManyOpenFiles() throws IOException { int n = 60; Path dir = wrap(createTempDir(), n); - + // create open files to exact limit List toClose = new ArrayList<>(); for (int i = 0; i < n; i++) { Path p = Files.createTempFile(dir, null, null); toClose.add(Files.newOutputStream(p)); } - + // now exceed - IOException e = expectThrows(IOException.class, () -> - Files.newOutputStream(Files.createTempFile(dir, null, null))); + IOException e = + expectThrows( + IOException.class, () -> Files.newOutputStream(Files.createTempFile(dir, null, null))); assertTrue(e.getMessage().contains("Too many open files")); - + IOUtils.close(toClose); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestHandleTrackingFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestHandleTrackingFS.java index e42dcf48f71..8bf78d79aed 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestHandleTrackingFS.java +++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestHandleTrackingFS.java @@ -28,27 +28,30 @@ import java.nio.file.Path; /** Basic tests for HandleTrackingFS */ public class TestHandleTrackingFS extends MockFileSystemTestCase { - + @Override protected Path wrap(Path path) { FileSystem fs = new LeakFS(path.getFileSystem()).getFileSystem(URI.create("file:///")); return new FilterPath(path, fs); } - + /** Test that the delegate gets closed on exception in HandleTrackingFS#onClose */ public void testOnCloseThrowsException() throws IOException { - Path path = wrap(createTempDir()); // we are using LeakFS under the hood if we don't get closed the test fails - FileSystem fs = new HandleTrackingFS("test://", path.getFileSystem()) { - @Override - protected void onClose(Path path, Object stream) throws IOException { - throw new IOException("boom"); - } + Path path = + wrap(createTempDir()); // we are using LeakFS under the hood if we don't get closed the test + // fails + FileSystem fs = + new HandleTrackingFS("test://", path.getFileSystem()) { + @Override + protected void onClose(Path path, Object stream) throws IOException { + throw new IOException("boom"); + } - @Override - protected void onOpen(Path path, Object stream) throws IOException { - // - } - }.getFileSystem(URI.create("file:///")); + @Override + protected void onOpen(Path path, Object stream) throws IOException { + // + } + }.getFileSystem(URI.create("file:///")); Path dir = new FilterPath(path, fs); OutputStream file = Files.newOutputStream(dir.resolve("somefile")); @@ -66,20 +69,21 @@ public class TestHandleTrackingFS extends MockFileSystemTestCase { expectThrows(IOException.class, dirStream::close); } - /** Test that the delegate gets closed on exception in HandleTrackingFS#onOpen */ public void testOnOpenThrowsException() throws IOException { - Path path = wrap(createTempDir()); // we are using LeakFS under the hood if we don't get closed the test fails - FileSystem fs = new HandleTrackingFS("test://", path.getFileSystem()) { - @Override - protected void onClose(Path path, Object stream) throws IOException { - } + Path path = + wrap(createTempDir()); // we are using LeakFS under the hood if we don't get closed the test + // fails + FileSystem fs = + new HandleTrackingFS("test://", path.getFileSystem()) { + @Override + protected void onClose(Path path, Object stream) throws IOException {} - @Override - protected void onOpen(Path path, Object stream) throws IOException { - throw new IOException("boom"); - } - }.getFileSystem(URI.create("file:///")); + @Override + protected void onOpen(Path path, Object stream) throws IOException { + throw new IOException("boom"); + } + }.getFileSystem(URI.create("file:///")); Path dir = new FilterPath(path, fs); expectThrows(IOException.class, () -> Files.newOutputStream(dir.resolve("somefile"))); diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestLeakFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestLeakFS.java index d2a53ba78f0..30438890459 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestLeakFS.java +++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestLeakFS.java @@ -34,17 +34,17 @@ import org.apache.lucene.util.NamedThreadFactory; /** Basic tests for LeakFS */ public class TestLeakFS extends MockFileSystemTestCase { - + @Override protected Path wrap(Path path) { FileSystem fs = new LeakFS(path.getFileSystem()).getFileSystem(URI.create("file:///")); return new FilterPath(path, fs); } - + /** Test leaks via Files.newInputStream */ public void testLeakInputStream() throws IOException { Path dir = wrap(createTempDir()); - + OutputStream file = Files.newOutputStream(dir.resolve("stillopen")); file.write(5); file.close(); @@ -54,21 +54,21 @@ public class TestLeakFS extends MockFileSystemTestCase { assertTrue(e.getMessage().contains("file handle leaks")); leak.close(); } - + /** Test leaks via Files.newOutputStream */ public void testLeakOutputStream() throws IOException { Path dir = wrap(createTempDir()); - + OutputStream leak = Files.newOutputStream(dir.resolve("leaky")); Exception e = expectThrows(Exception.class, () -> dir.getFileSystem().close()); assertTrue(e.getMessage().contains("file handle leaks")); leak.close(); } - + /** Test leaks via FileChannel.open */ public void testLeakFileChannel() throws IOException { Path dir = wrap(createTempDir()); - + OutputStream file = Files.newOutputStream(dir.resolve("stillopen")); file.write(5); file.close(); @@ -78,20 +78,21 @@ public class TestLeakFS extends MockFileSystemTestCase { assertTrue(e.getMessage().contains("file handle leaks")); leak.close(); } - + /** Test leaks via AsynchronousFileChannel.open */ public void testLeakAsyncFileChannel() throws IOException, InterruptedException { Path dir = wrap(createTempDir()); - + OutputStream file = Files.newOutputStream(dir.resolve("stillopen")); file.write(5); file.close(); - ExecutorService executorService = Executors.newFixedThreadPool(1, - new NamedThreadFactory("async-io")); + ExecutorService executorService = + Executors.newFixedThreadPool(1, new NamedThreadFactory("async-io")); try { - AsynchronousFileChannel leak = AsynchronousFileChannel.open(dir.resolve("stillopen"), - Collections.emptySet(), executorService); + AsynchronousFileChannel leak = + AsynchronousFileChannel.open( + dir.resolve("stillopen"), Collections.emptySet(), executorService); Exception e = expectThrows(Exception.class, () -> dir.getFileSystem().close()); assertTrue(e.getMessage().contains("file handle leaks")); leak.close(); @@ -100,11 +101,11 @@ public class TestLeakFS extends MockFileSystemTestCase { executorService.awaitTermination(5, TimeUnit.SECONDS); } } - + /** Test leaks via Files.newByteChannel */ public void testLeakByteChannel() throws IOException { Path dir = wrap(createTempDir()); - + OutputStream file = Files.newOutputStream(dir.resolve("stillopen")); file.write(5); file.close(); diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestShuffleFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestShuffleFS.java index 5327d574224..6fa80c9a640 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestShuffleFS.java +++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestShuffleFS.java @@ -28,38 +28,38 @@ import java.util.List; /** Basic tests for ShuffleFS */ public class TestShuffleFS extends MockFileSystemTestCase { - + @Override protected Path wrap(Path path) { return wrap(path, random().nextLong()); } - + Path wrap(Path path, long seed) { FileSystem fs = new ShuffleFS(path.getFileSystem(), seed).getFileSystem(URI.create("file:///")); return new FilterPath(path, fs); } - + /** test that we return directory listings correctly */ public void testShuffleWorks() throws IOException { Path dir = wrap(createTempDir()); - + Files.createFile(dir.resolve("file1")); Files.createFile(dir.resolve("file2")); Files.createFile(dir.resolve("file3")); - + List seen = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(dir)) { for (Path path : stream) { seen.add(path); } } - + assertEquals(3, seen.size()); assertTrue(seen.contains(dir.resolve("file1"))); assertTrue(seen.contains(dir.resolve("file2"))); assertTrue(seen.contains(dir.resolve("file3"))); } - + /** test that we change order of directory listings */ public void testActuallyShuffles() throws IOException { Path dir = createTempDir(); @@ -67,25 +67,25 @@ public class TestShuffleFS extends MockFileSystemTestCase { Files.createFile(dir.resolve("file" + i)); } List expected = new ArrayList<>(); - + // get the raw listing from the actual filesystem try (DirectoryStream stream = Files.newDirectoryStream(dir)) { for (Path path : stream) { expected.add(path.getFileName().toString()); } } - + // shuffle until the order changes. for (int i = 0; i < 10000; i++) { Path wrapped = wrap(dir, random().nextLong()); - + List seen = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(wrapped)) { for (Path path : stream) { seen.add(path.getFileName().toString()); } } - + // we should always see the same files. assertEquals(new HashSet<>(expected), new HashSet<>(seen)); if (!expected.equals(seen)) { @@ -94,17 +94,17 @@ public class TestShuffleFS extends MockFileSystemTestCase { } fail("ordering never changed"); } - - /** - * shuffle underlying contents randomly with different seeds, - * and ensure shuffling that again with the same seed is consistent. + + /** + * shuffle underlying contents randomly with different seeds, and ensure shuffling that again with + * the same seed is consistent. */ public void testConsistentOrder() throws IOException { Path raw = createTempDir(); for (int i = 0; i < 100; i++) { Files.createFile(raw.resolve("file" + i)); } - + long seed = random().nextLong(); Path dirExpected = wrap(raw, seed); @@ -115,55 +115,52 @@ public class TestShuffleFS extends MockFileSystemTestCase { expected.add(path.getFileName().toString()); } } - + // shuffle wrapping a different scrambled ordering each time, it should always be the same. for (int i = 0; i < 100; i++) { Path scrambled = wrap(raw, random().nextLong()); Path ordered = wrap(scrambled, seed); - + List seen = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(ordered)) { for (Path path : stream) { seen.add(path.getFileName().toString()); } } - + // we should always see the same files in the same order assertEquals(expected, seen); } } - - /** - * test that we give a consistent order - * for the same file names within different directories - */ + + /** test that we give a consistent order for the same file names within different directories */ public void testFileNameOnly() throws IOException { Path dir = wrap(createTempDir()); - + Files.createFile(dir.resolve("file1")); Files.createFile(dir.resolve("file2")); Files.createFile(dir.resolve("file3")); - + List expected = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(dir)) { for (Path path : stream) { expected.add(path.getFileName().toString()); } } - + Path subdir = dir.resolve("subdir"); Files.createDirectory(subdir); Files.createFile(subdir.resolve("file3")); Files.createFile(subdir.resolve("file2")); Files.createFile(subdir.resolve("file1")); - + List actual = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(subdir)) { for (Path path : stream) { actual.add(path.getFileName().toString()); } } - + assertEquals(expected, actual); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestVerboseFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestVerboseFS.java index fe0c501815f..2278c31182b 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestVerboseFS.java +++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestVerboseFS.java @@ -32,7 +32,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.NamedThreadFactory; @@ -45,7 +44,8 @@ public class TestVerboseFS extends MockFileSystemTestCase { } Path wrap(Path path, InfoStream stream) { - FileSystem fs = new VerboseFS(path.getFileSystem(), stream).getFileSystem(URI.create("file:///")); + FileSystem fs = + new VerboseFS(path.getFileSystem(), stream).getFileSystem(URI.create("file:///")); return new FilterPath(path, fs); } @@ -121,7 +121,8 @@ public class TestVerboseFS extends MockFileSystemTestCase { Files.copy(dir.resolve("foobar"), dir.resolve("baz")); assertTrue(stream.sawMessage()); - expectThrows(IOException.class, () -> Files.copy(dir.resolve("nonexistent"), dir.resolve("something"))); + expectThrows( + IOException.class, () -> Files.copy(dir.resolve("nonexistent"), dir.resolve("something"))); } /** Test move */ @@ -132,7 +133,8 @@ public class TestVerboseFS extends MockFileSystemTestCase { Files.move(dir.resolve("foobar"), dir.resolve("baz")); assertTrue(stream.sawMessage()); - expectThrows(IOException.class, () -> Files.move(dir.resolve("nonexistent"), dir.resolve("something"))); + expectThrows( + IOException.class, () -> Files.move(dir.resolve("nonexistent"), dir.resolve("something"))); } /** Test newOutputStream */ @@ -143,19 +145,32 @@ public class TestVerboseFS extends MockFileSystemTestCase { assertTrue(stream.sawMessage()); file.close(); - expectThrows(IOException.class, () -> Files.newOutputStream(dir.resolve("output"), StandardOpenOption.CREATE_NEW)); + expectThrows( + IOException.class, + () -> Files.newOutputStream(dir.resolve("output"), StandardOpenOption.CREATE_NEW)); } /** Test FileChannel.open */ public void testFileChannel() throws IOException { InfoStreamListener stream = new InfoStreamListener("newFileChannel"); Path dir = wrap(createTempDir(), stream); - FileChannel channel = FileChannel.open(dir.resolve("foobar"), StandardOpenOption.CREATE_NEW, StandardOpenOption.READ, StandardOpenOption.WRITE); + FileChannel channel = + FileChannel.open( + dir.resolve("foobar"), + StandardOpenOption.CREATE_NEW, + StandardOpenOption.READ, + StandardOpenOption.WRITE); assertTrue(stream.sawMessage()); channel.close(); - expectThrows(IOException.class, () -> FileChannel.open(dir.resolve("foobar"), - StandardOpenOption.CREATE_NEW, StandardOpenOption.READ, StandardOpenOption.WRITE)); + expectThrows( + IOException.class, + () -> + FileChannel.open( + dir.resolve("foobar"), + StandardOpenOption.CREATE_NEW, + StandardOpenOption.READ, + StandardOpenOption.WRITE)); } /** Test AsynchronousFileChannel.open */ @@ -163,21 +178,22 @@ public class TestVerboseFS extends MockFileSystemTestCase { InfoStreamListener stream = new InfoStreamListener("newAsynchronousFileChannel"); Path dir = wrap(createTempDir(), stream); - ExecutorService executorService = Executors.newFixedThreadPool(1, - new NamedThreadFactory("async-io")); + ExecutorService executorService = + Executors.newFixedThreadPool(1, new NamedThreadFactory("async-io")); try { - Set opts = Set - .of(StandardOpenOption.CREATE_NEW, StandardOpenOption.READ, - StandardOpenOption.WRITE); - AsynchronousFileChannel channel = AsynchronousFileChannel - .open(dir.resolve("foobar"), opts, executorService); + Set opts = + Set.of(StandardOpenOption.CREATE_NEW, StandardOpenOption.READ, StandardOpenOption.WRITE); + AsynchronousFileChannel channel = + AsynchronousFileChannel.open(dir.resolve("foobar"), opts, executorService); assertTrue(stream.sawMessage()); channel.close(); - expectThrows(IOException.class, () -> AsynchronousFileChannel.open(dir.resolve("foobar"), - opts, executorService)); + expectThrows( + IOException.class, + () -> AsynchronousFileChannel.open(dir.resolve("foobar"), opts, executorService)); - expectThrows(NoSuchFileException.class, + expectThrows( + NoSuchFileException.class, () -> AsynchronousFileChannel.open(dir.resolve("doesNotExist.rip"))); } finally { executorService.shutdown(); @@ -189,18 +205,30 @@ public class TestVerboseFS extends MockFileSystemTestCase { public void testByteChannel() throws IOException { InfoStreamListener stream = new InfoStreamListener("newByteChannel"); Path dir = wrap(createTempDir(), stream); - SeekableByteChannel channel = Files.newByteChannel(dir.resolve("foobar"), StandardOpenOption.CREATE_NEW, StandardOpenOption.READ, StandardOpenOption.WRITE); + SeekableByteChannel channel = + Files.newByteChannel( + dir.resolve("foobar"), + StandardOpenOption.CREATE_NEW, + StandardOpenOption.READ, + StandardOpenOption.WRITE); assertTrue(stream.sawMessage()); channel.close(); - expectThrows(IOException.class, () -> Files.newByteChannel(dir.resolve("foobar"), - StandardOpenOption.CREATE_NEW, StandardOpenOption.READ, StandardOpenOption.WRITE)); + expectThrows( + IOException.class, + () -> + Files.newByteChannel( + dir.resolve("foobar"), + StandardOpenOption.CREATE_NEW, + StandardOpenOption.READ, + StandardOpenOption.WRITE)); } /** Test that verbose does not corrupt file not found exceptions */ public void testVerboseFSNoSuchFileException() { Path dir = wrap(createTempDir()); - expectThrows(NoSuchFileException.class, () -> FileChannel.open(dir.resolve("doesNotExist.rip"))); + expectThrows( + NoSuchFileException.class, () -> FileChannel.open(dir.resolve("doesNotExist.rip"))); expectThrows(NoSuchFileException.class, () -> Files.newByteChannel(dir.resolve("stillopen"))); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestVirusCheckingFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestVirusCheckingFS.java index 91e9aa86d1e..0f9dff7cf74 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestVirusCheckingFS.java +++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestVirusCheckingFS.java @@ -27,13 +27,15 @@ import java.nio.file.Path; /** Basic tests for VirusCheckingFS */ public class TestVirusCheckingFS extends MockFileSystemTestCase { - + @Override protected Path wrap(Path path) { - FileSystem fs = new VirusCheckingFS(path.getFileSystem(), random().nextLong()).getFileSystem(URI.create("file:///")); + FileSystem fs = + new VirusCheckingFS(path.getFileSystem(), random().nextLong()) + .getFileSystem(URI.create("file:///")); return new FilterPath(path, fs); } - + /** Test Files.delete fails if a file has an open inputstream against it */ public void testDeleteSometimesFails() throws IOException { Path dir = wrap(createTempDir()); @@ -52,7 +54,8 @@ public class TestVirusCheckingFS extends MockFileSystemTestCase { Files.delete(path); } catch (AccessDeniedException ade) { // expected (sometimes) - assertTrue(ade.getMessage().contains("VirusCheckingFS is randomly refusing to delete file ")); + assertTrue( + ade.getMessage().contains("VirusCheckingFS is randomly refusing to delete file ")); break; } diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestWindowsFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestWindowsFS.java index bb33b8506c3..665b1be2905 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestWindowsFS.java +++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestWindowsFS.java @@ -28,12 +28,11 @@ import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.lucene.util.Constants; /** Basic tests for WindowsFS */ public class TestWindowsFS extends MockFileSystemTestCase { - + @Override public void setUp() throws Exception { super.setUp(); @@ -46,11 +45,11 @@ public class TestWindowsFS extends MockFileSystemTestCase { FileSystem fs = new WindowsFS(path.getFileSystem()).getFileSystem(URI.create("file:///")); return new FilterPath(path, fs); } - + /** Test Files.delete fails if a file has an open inputstream against it */ public void testDeleteOpenFile() throws IOException { Path dir = wrap(createTempDir()); - + OutputStream file = Files.newOutputStream(dir.resolve("stillopen")); file.write(5); file.close(); @@ -60,33 +59,40 @@ public class TestWindowsFS extends MockFileSystemTestCase { assertTrue(e.getMessage().contains("access denied")); is.close(); } - + /** Test Files.deleteIfExists fails if a file has an open inputstream against it */ public void testDeleteIfExistsOpenFile() throws IOException { Path dir = wrap(createTempDir()); - + OutputStream file = Files.newOutputStream(dir.resolve("stillopen")); file.write(5); file.close(); InputStream is = Files.newInputStream(dir.resolve("stillopen")); - IOException e = expectThrows(IOException.class, () -> Files.deleteIfExists(dir.resolve("stillopen"))); + IOException e = + expectThrows(IOException.class, () -> Files.deleteIfExists(dir.resolve("stillopen"))); assertTrue(e.getMessage().contains("access denied")); is.close(); } - + /** Test Files.rename fails if a file has an open inputstream against it */ // TODO: what does windows do here? public void testRenameOpenFile() throws IOException { Path dir = wrap(createTempDir()); - + OutputStream file = Files.newOutputStream(dir.resolve("stillopen")); file.write(5); file.close(); InputStream is = Files.newInputStream(dir.resolve("stillopen")); - IOException e = expectThrows(IOException.class, () -> - Files.move(dir.resolve("stillopen"), dir.resolve("target"), StandardCopyOption.ATOMIC_MOVE)); + IOException e = + expectThrows( + IOException.class, + () -> + Files.move( + dir.resolve("stillopen"), + dir.resolve("target"), + StandardCopyOption.ATOMIC_MOVE)); assertTrue(e.getMessage().contains("access denied")); is.close(); } @@ -96,31 +102,32 @@ public class TestWindowsFS extends MockFileSystemTestCase { final Path file = dir.resolve("thefile"); final CyclicBarrier barrier = new CyclicBarrier(2); final AtomicBoolean stopped = new AtomicBoolean(false); - Thread t = new Thread() { - @Override - public void run() { - try { - barrier.await(); - } catch (Exception ex) { - throw new RuntimeException(ex); - } - while (stopped.get() == false) { - try { - if (random().nextBoolean()) { - Files.delete(file); - } else if (random().nextBoolean()) { - Files.deleteIfExists(file); - } else { - Path target = file.resolveSibling("other"); - Files.move(file, target); - Files.delete(target); + Thread t = + new Thread() { + @Override + public void run() { + try { + barrier.await(); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + while (stopped.get() == false) { + try { + if (random().nextBoolean()) { + Files.delete(file); + } else if (random().nextBoolean()) { + Files.deleteIfExists(file); + } else { + Path target = file.resolveSibling("other"); + Files.move(file, target); + Files.delete(target); + } + } catch (IOException ex) { + // continue + } } - } catch (IOException ex) { - // continue } - } - } - }; + }; t.start(); barrier.await(); try { @@ -132,10 +139,16 @@ public class TestWindowsFS extends MockFileSystemTestCase { stream.write(0); // just create } catch (FileNotFoundException | NoSuchFileException ex) { - assertEquals("File handle leaked - file is closed but still registered", 0, ((WindowsFS) dir.getFileSystem().provider()).openFiles.size()); + assertEquals( + "File handle leaked - file is closed but still registered", + 0, + ((WindowsFS) dir.getFileSystem().provider()).openFiles.size()); assertFalse("caught FNF on close", opened); } - assertEquals("File handle leaked - file is closed but still registered", 0, ((WindowsFS) dir.getFileSystem().provider()).openFiles.size()); + assertEquals( + "File handle leaked - file is closed but still registered", + 0, + ((WindowsFS) dir.getFileSystem().provider()).openFiles.size()); Files.deleteIfExists(file); } } finally { @@ -159,7 +172,8 @@ public class TestWindowsFS extends MockFileSystemTestCase { file.write(2); file.close(); - Files.move(dir.resolve("otherFile"), dir.resolve("target"), StandardCopyOption.REPLACE_EXISTING); + Files.move( + dir.resolve("otherFile"), dir.resolve("target"), StandardCopyOption.REPLACE_EXISTING); assertTrue(Files.exists(dir.resolve("target"))); assertFalse(Files.exists(dir.resolve("otherFile"))); try (InputStream stream = Files.newInputStream(dir.resolve("target"))) { diff --git a/lucene/test-framework/src/test/org/apache/lucene/search/TestBaseExplanationTestCase.java b/lucene/test-framework/src/test/org/apache/lucene/search/TestBaseExplanationTestCase.java index 1889d5e1884..e70648bd4f6 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/search/TestBaseExplanationTestCase.java +++ b/lucene/test-framework/src/test/org/apache/lucene/search/TestBaseExplanationTestCase.java @@ -20,74 +20,90 @@ import java.io.IOException; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; - /** - * Tests that the {@link BaseExplanationTestCase} helper code, as well as - * {@link CheckHits#checkNoMatchExplanations} are checking what they are suppose to. + * Tests that the {@link BaseExplanationTestCase} helper code, as well as {@link + * CheckHits#checkNoMatchExplanations} are checking what they are suppose to. */ public class TestBaseExplanationTestCase extends BaseExplanationTestCase { public void testQueryNoMatchWhenExpected() throws Exception { - expectThrows(AssertionError.class, () -> { - qtest(new TermQuery(new Term(FIELD, "BOGUS")), new int[] { 3 /* none */ }); - }); + expectThrows( + AssertionError.class, + () -> { + qtest(new TermQuery(new Term(FIELD, "BOGUS")), new int[] {3 /* none */}); + }); } + public void testQueryMatchWhenNotExpected() throws Exception { - expectThrows(AssertionError.class, () -> { - qtest(new TermQuery(new Term(FIELD, "w1")), new int[] { 0, 1 /*, 2, 3 */ }); - }); + expectThrows( + AssertionError.class, + () -> { + qtest(new TermQuery(new Term(FIELD, "w1")), new int[] {0, 1 /*, 2, 3 */}); + }); } public void testIncorrectExplainScores() throws Exception { // sanity check what a real TermQuery matches - qtest(new TermQuery(new Term(FIELD, "zz")), new int[] { 1, 3 }); + qtest(new TermQuery(new Term(FIELD, "zz")), new int[] {1, 3}); // ensure when the Explanations are broken, we get an error about those matches - expectThrows(AssertionError.class, () -> { - qtest(new BrokenExplainTermQuery(new Term(FIELD, "zz"), false, true), new int[] { 1, 3 }); - - }); + expectThrows( + AssertionError.class, + () -> { + qtest(new BrokenExplainTermQuery(new Term(FIELD, "zz"), false, true), new int[] {1, 3}); + }); } public void testIncorrectExplainMatches() throws Exception { // sanity check what a real TermQuery matches - qtest(new TermQuery(new Term(FIELD, "zz")), new int[] { 1, 3 }); - + qtest(new TermQuery(new Term(FIELD, "zz")), new int[] {1, 3}); + // ensure when the Explanations are broken, we get an error about the non matches - expectThrows(AssertionError.class, () -> { - CheckHits.checkNoMatchExplanations(new BrokenExplainTermQuery(new Term(FIELD, "zz"), true, false), - FIELD, searcher, new int[] { 1, 3 }); - }); + expectThrows( + AssertionError.class, + () -> { + CheckHits.checkNoMatchExplanations( + new BrokenExplainTermQuery(new Term(FIELD, "zz"), true, false), + FIELD, + searcher, + new int[] {1, 3}); + }); } - public static final class BrokenExplainTermQuery extends TermQuery { public final boolean toggleExplainMatch; public final boolean breakExplainScores; + public BrokenExplainTermQuery(Term t, boolean toggleExplainMatch, boolean breakExplainScores) { super(t); this.toggleExplainMatch = toggleExplainMatch; this.breakExplainScores = breakExplainScores; } + @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { - return new BrokenExplainWeight(this, super.createWeight(searcher,scoreMode, boost)); + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { + return new BrokenExplainWeight(this, super.createWeight(searcher, scoreMode, boost)); } } - + public static final class BrokenExplainWeight extends FilterWeight { public BrokenExplainWeight(BrokenExplainTermQuery q, Weight in) { super(q, in); } + public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { return in.bulkScorer(context); } + public Explanation explain(LeafReaderContext context, int doc) throws IOException { BrokenExplainTermQuery q = (BrokenExplainTermQuery) this.getQuery(); Explanation result = in.explain(context, doc); if (result.isMatch()) { if (q.breakExplainScores) { - result = Explanation.match(-1F * result.getValue().doubleValue(), "Broken Explanation Score", result); + result = + Explanation.match( + -1F * result.getValue().doubleValue(), "Broken Explanation Score", result); } if (q.toggleExplainMatch) { result = Explanation.noMatch("Broken Explanation Matching", result); diff --git a/lucene/test-framework/src/test/org/apache/lucene/store/TestMockDirectoryWrapper.java b/lucene/test-framework/src/test/org/apache/lucene/store/TestMockDirectoryWrapper.java index a41ff69553c..75196ae3539 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/store/TestMockDirectoryWrapper.java +++ b/lucene/test-framework/src/test/org/apache/lucene/store/TestMockDirectoryWrapper.java @@ -21,13 +21,13 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.nio.file.Path; - import org.apache.lucene.document.Document; import org.apache.lucene.index.RandomIndexWriter; -// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows machines occasionally +// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows +// machines occasionally public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { - + @Override protected Directory getDirectory(Path path) throws IOException { final MockDirectoryWrapper dir; @@ -38,7 +38,7 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { } return dir; } - + // we wrap the directory in slow stuff, so only run nightly @Override @Nightly @@ -50,7 +50,7 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { // test writeBytes MockDirectoryWrapper dir = newMockDirectory(); dir.setMaxSizeInBytes(3); - final byte[] bytes = new byte[] { 1, 2}; + final byte[] bytes = new byte[] {1, 2}; IndexOutput out = dir.createOutput("foo", IOContext.DEFAULT); out.writeBytes(bytes, bytes.length); // first write should succeed // close() to ensure the written bytes are not buffered and counted @@ -61,7 +61,7 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { expectThrows(IOException.class, () -> out2.writeBytes(bytes, bytes.length)); out2.close(); dir.close(); - + // test copyBytes dir = newMockDirectory(); dir.setMaxSizeInBytes(3); @@ -72,11 +72,12 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { out.close(); IndexOutput out3 = dir.createOutput("bar", IOContext.DEFAULT); - expectThrows(IOException.class, () -> out3.copyBytes(new ByteArrayDataInput(bytes), bytes.length)); + expectThrows( + IOException.class, () -> out3.copyBytes(new ByteArrayDataInput(bytes), bytes.length)); out3.close(); dir.close(); } - + public void testMDWinsideOfMDW() throws Exception { // add MDW inside another MDW Directory dir = new MockDirectoryWrapper(random(), newMockDirectory()); @@ -96,23 +97,22 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { } @Override - public void close() { - } + public void close() {} } public void testCorruptOnCloseIsWorkingFSDir() throws Exception { Path path = createTempDir(); - try(Directory dir = newFSDirectory(path)) { + try (Directory dir = newFSDirectory(path)) { testCorruptOnCloseIsWorking(dir); } } public void testCorruptOnCloseIsWorkingOnByteBuffersDirectory() throws Exception { - try(Directory dir = new ByteBuffersDirectory()) { + try (Directory dir = new ByteBuffersDirectory()) { testCorruptOnCloseIsWorking(dir); } } - + private void testCorruptOnCloseIsWorking(Directory dir) throws Exception { dir = new PreventCloseDirectoryWrapper(dir); @@ -126,10 +126,10 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { RandomIndexWriter iw = new RandomIndexWriter(random(), dir); iw.addDocument(new Document()); iw.close(); - + // not sync'd! try (IndexOutput out = wrapped.createOutput("foo", IOContext.DEFAULT)) { - for(int i=0;i<100;i++) { + for (int i = 0; i < 100; i++) { out.writeInt(i); } } @@ -146,7 +146,7 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { changed = true; } if (in != null) { - for(int i=0;i<100;i++) { + for (int i = 0; i < 100; i++) { int x; try { x = in.readInt(); @@ -163,7 +163,8 @@ public class TestMockDirectoryWrapper extends BaseDirectoryTestCase { in.close(); } - assertTrue("MockDirectoryWrapper on dir=" + dir + " failed to corrupt an unsync'd file", changed); + assertTrue( + "MockDirectoryWrapper on dir=" + dir + " failed to corrupt an unsync'd file", changed); } public void testAbuseClosedIndexInput() throws Exception { diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/SorePoint.java b/lucene/test-framework/src/test/org/apache/lucene/util/SorePoint.java index 41f9715dac5..8be80295ea6 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/SorePoint.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/SorePoint.java @@ -16,10 +16,7 @@ */ package org.apache.lucene.util; -/** - * A pointcut-like definition where we should trigger - * an assumption or error. - */ +/** A pointcut-like definition where we should trigger an assumption or error. */ public enum SorePoint { // STATIC_INITIALIZER, // I assume this will result in JUnit failure to load a suite. BEFORE_CLASS, @@ -29,4 +26,4 @@ public enum SorePoint { TEST, AFTER, AFTER_CLASS -} \ No newline at end of file +} diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestBeforeAfterOverrides.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestBeforeAfterOverrides.java index c1b892da037..5ccf6db32c1 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestBeforeAfterOverrides.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestBeforeAfterOverrides.java @@ -31,10 +31,12 @@ public class TestBeforeAfterOverrides extends WithNestedTests { public static class Before1 extends WithNestedTests.AbstractNestedTest { @Before public void before() {} - + public void testEmpty() {} } + public static class Before2 extends Before1 {} + public static class Before3 extends Before2 { @Override @Before @@ -44,10 +46,12 @@ public class TestBeforeAfterOverrides extends WithNestedTests { public static class After1 extends WithNestedTests.AbstractNestedTest { @After public void after() {} - + public void testEmpty() {} } + public static class After2 extends Before1 {} + public static class After3 extends Before2 { @After public void after() {} @@ -57,13 +61,15 @@ public class TestBeforeAfterOverrides extends WithNestedTests { public void testBefore() { Result result = JUnitCore.runClasses(Before3.class); Assert.assertEquals(1, result.getFailureCount()); - Assert.assertTrue(result.getFailures().get(0).getTrace().contains("There are overridden methods")); + Assert.assertTrue( + result.getFailures().get(0).getTrace().contains("There are overridden methods")); } - + @Test public void testAfter() { Result result = JUnitCore.runClasses(Before3.class); Assert.assertEquals(1, result.getFailureCount()); - Assert.assertTrue(result.getFailures().get(0).getTrace().contains("There are overridden methods")); - } + Assert.assertTrue( + result.getFailures().get(0).getTrace().contains("There are overridden methods")); + } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestCodecReported.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestCodecReported.java index a1f8f422813..910d939c62e 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestCodecReported.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestCodecReported.java @@ -26,7 +26,7 @@ public class TestCodecReported extends WithNestedTests { public TestCodecReported() { super(true); } - + public static class Nested1 extends WithNestedTests.AbstractNestedTest { public static String codecName; @@ -40,7 +40,6 @@ public class TestCodecReported extends WithNestedTests { public void testCorrectCodecReported() { Result r = JUnitCore.runClasses(Nested1.class); Assert.assertEquals(1, r.getFailureCount()); - Assert.assertTrue(super.getSysErr(), - super.getSysErr().contains("codec=" + Nested1.codecName)); + Assert.assertTrue(super.getSysErr(), super.getSysErr().contains("codec=" + Nested1.codecName)); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestExceptionInBeforeClassHooks.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestExceptionInBeforeClassHooks.java index 16c64cf9de7..470262a098c 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestExceptionInBeforeClassHooks.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestExceptionInBeforeClassHooks.java @@ -19,7 +19,6 @@ package org.apache.lucene.util; import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; - import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -36,12 +35,13 @@ public class TestExceptionInBeforeClassHooks extends WithNestedTests { public static class Nested1 extends WithNestedTests.AbstractNestedTest { @BeforeClass public static void beforeClass() throws Exception { - Thread t = new Thread() { - @Override - public void run() { - throw new RuntimeException("foobar"); - } - }; + Thread t = + new Thread() { + @Override + public void run() { + throw new RuntimeException("foobar"); + } + }; t.start(); t.join(); } @@ -51,54 +51,57 @@ public class TestExceptionInBeforeClassHooks extends WithNestedTests { public static class Nested2 extends WithNestedTests.AbstractNestedTest { public void test1() throws Exception { - Thread t = new Thread() { - @Override - public void run() { - throw new RuntimeException("foobar1"); - } - }; + Thread t = + new Thread() { + @Override + public void run() { + throw new RuntimeException("foobar1"); + } + }; t.start(); t.join(); } public void test2() throws Exception { - Thread t = new Thread() { - @Override - public void run() { - throw new RuntimeException("foobar2"); - } - }; + Thread t = + new Thread() { + @Override + public void run() { + throw new RuntimeException("foobar2"); + } + }; t.start(); t.join(); } - + public void test3() throws Exception { - Thread t = new Thread() { - @Override - public void run() { - throw new RuntimeException("foobar3"); - } - }; + Thread t = + new Thread() { + @Override + public void run() { + throw new RuntimeException("foobar3"); + } + }; t.start(); t.join(); - } + } } public static class Nested3 extends WithNestedTests.AbstractNestedTest { @Before public void runBeforeTest() throws Exception { - Thread t = new Thread() { - @Override - public void run() { - throw new RuntimeException("foobar"); - } - }; + Thread t = + new Thread() { + @Override + public void run() { + throw new RuntimeException("foobar"); + } + }; t.start(); t.join(); } - public void test1() throws Exception { - } + public void test1() throws Exception {} } @Test @@ -114,7 +117,7 @@ public class TestExceptionInBeforeClassHooks extends WithNestedTests { Result runClasses = JUnitCore.runClasses(Nested2.class); assertFailureCount(3, runClasses); Assert.assertEquals(3, runClasses.getRunCount()); - + ArrayList foobars = new ArrayList<>(); for (Failure f : runClasses.getFailures()) { Matcher m = Pattern.compile("foobar[0-9]+").matcher(f.getTrace()); @@ -124,16 +127,14 @@ public class TestExceptionInBeforeClassHooks extends WithNestedTests { } Collections.sort(foobars); - Assert.assertEquals("[foobar1, foobar2, foobar3]", - Arrays.toString(foobars.toArray())); + Assert.assertEquals("[foobar1, foobar2, foobar3]", Arrays.toString(foobars.toArray())); } - + @Test public void testExceptionWithinBefore() { Result runClasses = JUnitCore.runClasses(Nested3.class); assertFailureCount(1, runClasses); Assert.assertEquals(1, runClasses.getRunCount()); Assert.assertTrue(runClasses.getFailures().get(0).getTrace().contains("foobar")); - } - + } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestExpectThrows.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestExpectThrows.java index cfc70be9f2c..d29b28ce6af 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestExpectThrows.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestExpectThrows.java @@ -16,11 +16,10 @@ */ package org.apache.lucene.util; -import java.util.concurrent.atomic.AtomicBoolean; import java.io.IOException; - +import java.util.concurrent.atomic.AtomicBoolean; import org.junit.internal.AssumptionViolatedException; - + public class TestExpectThrows extends LuceneTestCase { private static class HuperDuperException extends IOException { @@ -28,34 +27,40 @@ public class TestExpectThrows extends LuceneTestCase { /* No-Op */ } } - - /** - * Tests that {@link #expectThrows} behaves correctly when the Runnable throws (an - * instance of a subclass of) the expected Exception type: by returning that Exception. + + /** + * Tests that {@link #expectThrows} behaves correctly when the Runnable throws (an instance of a + * subclass of) the expected Exception type: by returning that Exception. */ public void testPass() { final AtomicBoolean ran = new AtomicBoolean(false); - final IOException returned = expectThrows(IOException.class, () -> { - ran.getAndSet(true); - throw new HuperDuperException(); - }); + final IOException returned = + expectThrows( + IOException.class, + () -> { + ran.getAndSet(true); + throw new HuperDuperException(); + }); assertTrue(ran.get()); assertNotNull(returned); assertEquals(HuperDuperException.class, returned.getClass()); } - - /** - * Tests that {@link #expectThrows} behaves correctly when the Runnable does not throw (an - * instance of a subclass of) the expected Exception type: by throwing an assertion to - * FAIL the test. + + /** + * Tests that {@link #expectThrows} behaves correctly when the Runnable does not throw (an + * instance of a subclass of) the expected Exception type: by throwing an assertion to FAIL + * the test. */ public void testFail() { final AtomicBoolean ran = new AtomicBoolean(false); AssertionError caught = null; try { - final IOException returned = expectThrows(IOException.class, () -> { - ran.getAndSet(true); - }); + final IOException returned = + expectThrows( + IOException.class, + () -> { + ran.getAndSet(true); + }); fail("must not complete"); // NOTE: we don't use expectThrows to test expectThrows } catch (AssertionError ae) { caught = ae; @@ -63,22 +68,23 @@ public class TestExpectThrows extends LuceneTestCase { assertTrue(ran.get()); assertNotNull(caught); assertEquals("Expected exception IOException but no exception was thrown", caught.getMessage()); - } - /** - * Tests that {@link #expectThrows} behaves correctly when the Runnable contains an - * assertion that does not pass: by allowing that assertion to propogate and - * FAIL the test. + /** + * Tests that {@link #expectThrows} behaves correctly when the Runnable contains an assertion that + * does not pass: by allowing that assertion to propogate and FAIL the test. */ public void testNestedFail() { final AtomicBoolean ran = new AtomicBoolean(false); AssertionError caught = null; try { - final IOException returned = expectThrows(IOException.class, () -> { - ran.getAndSet(true); - fail("this failure should propogate"); - }); + final IOException returned = + expectThrows( + IOException.class, + () -> { + ran.getAndSet(true); + fail("this failure should propogate"); + }); fail("must not complete"); // NOTE: we don't use expectThrows to test expectThrows } catch (AssertionError ae) { caught = ae; @@ -87,20 +93,23 @@ public class TestExpectThrows extends LuceneTestCase { assertNotNull(caught); assertEquals("this failure should propogate", caught.getMessage()); } - - /** - * Tests that {@link #expectThrows} behaves correctly when the Runnable contains an - * assumption that does not pass: by allowing that assumption to propogate and cause - * the test to SKIP. + + /** + * Tests that {@link #expectThrows} behaves correctly when the Runnable contains an assumption + * that does not pass: by allowing that assumption to propogate and cause the test to SKIP + * . */ public void testNestedAssume() { final AtomicBoolean ran = new AtomicBoolean(false); AssumptionViolatedException caught = null; try { - final IOException returned = expectThrows(IOException.class, () -> { - ran.getAndSet(true); - assumeTrue("this assumption should propogate", false); - }); + final IOException returned = + expectThrows( + IOException.class, + () -> { + ran.getAndSet(true); + assumeTrue("this assumption should propogate", false); + }); fail("must not complete"); // NOTE: we don't use expectThrows to test expectThrows } catch (AssumptionViolatedException ave) { caught = ave; @@ -110,19 +119,22 @@ public class TestExpectThrows extends LuceneTestCase { assertEquals("this assumption should propogate", caught.getMessage()); } - /** - * Tests that {@link #expectThrows} behaves correctly when the Runnable contains an - * assertion that does not pass but the caller has explicitly said they expect an Exception of that type: - * by returning that assertion failure Exception. + /** + * Tests that {@link #expectThrows} behaves correctly when the Runnable contains an assertion that + * does not pass but the caller has explicitly said they expect an Exception of that type: by + * returning that assertion failure Exception. */ public void testExpectingNestedFail() { final AtomicBoolean ran = new AtomicBoolean(false); AssertionError returned = null; try { - returned = expectThrows(AssertionError.class, () -> { - ran.getAndSet(true); - fail("this failure should be returned, not propogated"); - }); + returned = + expectThrows( + AssertionError.class, + () -> { + ran.getAndSet(true); + fail("this failure should be returned, not propogated"); + }); } catch (AssertionError caught) { // NOTE: we don't use expectThrows to test expectThrows assertNull("An exception should not have been thrown", caught); } @@ -130,26 +142,30 @@ public class TestExpectThrows extends LuceneTestCase { assertNotNull(returned); assertEquals("this failure should be returned, not propogated", returned.getMessage()); } - - /** - * Tests that {@link #expectThrows} behaves correctly when the Runnable contains an - * assumption that does not pass but the caller has explicitly said they expect an Exception of that type: - * by returning that assumption failure Exception. + + /** + * Tests that {@link #expectThrows} behaves correctly when the Runnable contains an assumption + * that does not pass but the caller has explicitly said they expect an Exception of that type: by + * returning that assumption failure Exception. */ public void testExpectingNestedAssume() { final AtomicBoolean ran = new AtomicBoolean(false); AssumptionViolatedException returned = null; try { - returned = expectThrows(AssumptionViolatedException.class, () -> { - ran.getAndSet(true); - assumeTrue("this assumption should be returned, not propogated", false); - }); - } catch (AssumptionViolatedException caught) { // NOTE: we don't use expectThrows to test expectThrows + returned = + expectThrows( + AssumptionViolatedException.class, + () -> { + ran.getAndSet(true); + assumeTrue("this assumption should be returned, not propogated", false); + }); + } catch ( + AssumptionViolatedException + caught) { // NOTE: we don't use expectThrows to test expectThrows assertNull("An exception should not have been thrown", caught); } assertTrue(ran.get()); assertNotNull(returned); assertEquals("this assumption should be returned, not propogated", returned.getMessage()); } - } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestFailIfDirectoryNotClosed.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestFailIfDirectoryNotClosed.java index a367d4e6dc1..a5ec8285e3d 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestFailIfDirectoryNotClosed.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestFailIfDirectoryNotClosed.java @@ -16,14 +16,13 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.lucene.store.Directory; import org.junit.Assert; import org.junit.Test; import org.junit.runner.JUnitCore; import org.junit.runner.Result; -import com.carrotsearch.randomizedtesting.RandomizedTest; - public class TestFailIfDirectoryNotClosed extends WithNestedTests { public TestFailIfDirectoryNotClosed() { super(true); @@ -39,9 +38,10 @@ public class TestFailIfDirectoryNotClosed extends WithNestedTests { @Test public void testFailIfDirectoryNotClosed() { Result r = JUnitCore.runClasses(Nested1.class); - RandomizedTest.assumeTrue("Ignoring nested test, very likely zombie threads present.", - r.getIgnoreCount() == 0); + RandomizedTest.assumeTrue( + "Ignoring nested test, very likely zombie threads present.", r.getIgnoreCount() == 0); assertFailureCount(1, r); - Assert.assertTrue(r.getFailures().get(0).toString().contains("Resource in scope SUITE failed to close")); + Assert.assertTrue( + r.getFailures().get(0).toString().contains("Resource in scope SUITE failed to close")); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestFailIfUnreferencedFiles.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestFailIfUnreferencedFiles.java index 891dcb03857..c28d3b6c5af 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestFailIfUnreferencedFiles.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestFailIfUnreferencedFiles.java @@ -16,8 +16,8 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.RandomizedTest; import java.util.Collections; - import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -29,14 +29,13 @@ import org.junit.Test; import org.junit.runner.JUnitCore; import org.junit.runner.Result; import org.junit.runner.notification.Failure; -import com.carrotsearch.randomizedtesting.RandomizedTest; // LUCENE-4456: Test that we fail if there are unreferenced files public class TestFailIfUnreferencedFiles extends WithNestedTests { public TestFailIfUnreferencedFiles() { super(true); } - + public static class Nested1 extends WithNestedTests.AbstractNestedTest { public void testDummy() throws Exception { MockDirectoryWrapper dir = newMockDirectory(); @@ -55,17 +54,17 @@ public class TestFailIfUnreferencedFiles extends WithNestedTests { @Test public void testFailIfUnreferencedFiles() { Result r = JUnitCore.runClasses(Nested1.class); - RandomizedTest.assumeTrue("Ignoring nested test, very likely zombie threads present.", - r.getIgnoreCount() == 0); + RandomizedTest.assumeTrue( + "Ignoring nested test, very likely zombie threads present.", r.getIgnoreCount() == 0); // We are suppressing output anyway so dump the failures. for (Failure f : r.getFailures()) { System.out.println(f.getTrace()); } - Assert.assertEquals("Expected exactly one failure.", - 1, r.getFailureCount()); - Assert.assertTrue("Expected unreferenced files assertion.", + Assert.assertEquals("Expected exactly one failure.", 1, r.getFailureCount()); + Assert.assertTrue( + "Expected unreferenced files assertion.", r.getFailures().get(0).getTrace().contains("unreferenced files:")); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestGroupFiltering.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestGroupFiltering.java index 9093a8cd932..76f1a06cbe6 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestGroupFiltering.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestGroupFiltering.java @@ -16,20 +16,19 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.annotations.TestGroup; import java.lang.annotation.Documented; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; -import com.carrotsearch.randomizedtesting.annotations.TestGroup; - public class TestGroupFiltering extends LuceneTestCase { @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @TestGroup(enabled = false) public @interface Foo {} - + @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) @@ -43,11 +42,12 @@ public class TestGroupFiltering extends LuceneTestCase { public @interface Jira { String bug(); } - + @Foo public void testFoo() {} - - @Foo @Bar + + @Foo + @Bar public void testFooBar() {} @Bar diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestJUnitRuleOrder.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestJUnitRuleOrder.java index 92494798020..686b7c84999 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestJUnitRuleOrder.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestJUnitRuleOrder.java @@ -18,7 +18,6 @@ package org.apache.lucene.util; import java.util.Arrays; import java.util.Stack; - import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -32,9 +31,8 @@ import org.junit.runner.JUnitCore; import org.junit.runners.model.Statement; /** - * This verifies that JUnit {@link Rule}s are invoked before - * {@link Before} and {@link After} hooks. This should be the - * case from JUnit 4.10 on. + * This verifies that JUnit {@link Rule}s are invoked before {@link Before} and {@link After} hooks. + * This should be the case from JUnit 4.10 on. */ public class TestJUnitRuleOrder extends WithNestedTests { static Stack stack; @@ -42,35 +40,38 @@ public class TestJUnitRuleOrder extends WithNestedTests { public TestJUnitRuleOrder() { super(true); } - + public static class Nested extends WithNestedTests.AbstractNestedTest { @Before public void before() { stack.push("@Before"); } - + @After public void after() { stack.push("@After"); } @Rule - public TestRule testRule = new TestRule() { - @Override - public Statement apply(final Statement base, Description description) { - return new Statement() { + public TestRule testRule = + new TestRule() { @Override - public void evaluate() throws Throwable { - stack.push("@Rule before"); - base.evaluate(); - stack.push("@Rule after"); + public Statement apply(final Statement base, Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + stack.push("@Rule before"); + base.evaluate(); + stack.push("@Rule after"); + } + }; } }; - } - }; @Test - public void test() {/* empty */} + public void test() { + /* empty */ + } @BeforeClass public static void beforeClassCleanup() { @@ -80,13 +81,14 @@ public class TestJUnitRuleOrder extends WithNestedTests { @AfterClass public static void afterClassCheck() { stack.push("@AfterClass"); - } + } } @Test public void testRuleOrder() { JUnitCore.runClasses(Nested.class); Assert.assertEquals( - Arrays.toString(stack.toArray()), "[@Rule before, @Before, @After, @Rule after, @AfterClass]"); + Arrays.toString(stack.toArray()), + "[@Rule before, @Before, @After, @Rule after, @AfterClass]"); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestJvmInfo.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestJvmInfo.java index e59937edaf1..c580eed003a 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestJvmInfo.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestJvmInfo.java @@ -17,22 +17,20 @@ package org.apache.lucene.util; import com.carrotsearch.randomizedtesting.RandomizedTest; +import java.util.Locale; import org.junit.Test; -import java.util.Locale; - -/** - * - */ +/** */ public class TestJvmInfo extends RandomizedTest { - @Test - public void testEchoJvmInfo() { - System.out.println(String.format(Locale.ROOT, - "This test runs with Java %s (%s, %s %s).", - System.getProperty("java.version"), - System.getProperty("java.vendor"), - System.getProperty("java.vm.name"), - System.getProperty("java.vm.version") - )); - } + @Test + public void testEchoJvmInfo() { + System.out.println( + String.format( + Locale.ROOT, + "This test runs with Java %s (%s, %s %s).", + System.getProperty("java.version"), + System.getProperty("java.vendor"), + System.getProperty("java.vm.name"), + System.getProperty("java.vm.version"))); + } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestMaxFailuresRule.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestMaxFailuresRule.java index 2859f62bc2c..21927eebcef 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestMaxFailuresRule.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestMaxFailuresRule.java @@ -16,8 +16,14 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.annotations.Repeat; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence; import java.util.concurrent.CountDownLatch; - import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -27,17 +33,7 @@ import org.junit.runner.Result; import org.junit.runner.notification.Failure; import org.junit.runner.notification.RunListener; -import com.carrotsearch.randomizedtesting.annotations.Repeat; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence; - -/** - * @see TestRuleIgnoreAfterMaxFailures - */ +/** @see TestRuleIgnoreAfterMaxFailures */ public class TestMaxFailuresRule extends WithNestedTests { public TestMaxFailuresRule() { super(true); @@ -55,8 +51,7 @@ public class TestMaxFailuresRule extends WithNestedTests { boolean fail = random().nextInt(5) == 0; if (fail) numFails++; // some seeds are really lucky ... so cheat. - if (numFails < DESIRED_FAILURES && - DESIRED_FAILURES <= TOTAL_ITERS - numIters) { + if (numFails < DESIRED_FAILURES && DESIRED_FAILURES <= TOTAL_ITERS - numIters) { fail = true; } assertFalse(fail); @@ -68,29 +63,30 @@ public class TestMaxFailuresRule extends WithNestedTests { LuceneTestCase.replaceMaxFailureRule(new TestRuleIgnoreAfterMaxFailures(2)); JUnitCore core = new JUnitCore(); final StringBuilder results = new StringBuilder(); - core.addListener(new RunListener() { - char lastTest; + core.addListener( + new RunListener() { + char lastTest; - @Override - public void testStarted(Description description) throws Exception { - lastTest = 'S'; // success. - } + @Override + public void testStarted(Description description) throws Exception { + lastTest = 'S'; // success. + } - @Override - public void testAssumptionFailure(Failure failure) { - lastTest = 'A'; // assumption failure. - } + @Override + public void testAssumptionFailure(Failure failure) { + lastTest = 'A'; // assumption failure. + } - @Override - public void testFailure(Failure failure) throws Exception { - lastTest = 'F'; // failure - } + @Override + public void testFailure(Failure failure) throws Exception { + lastTest = 'F'; // failure + } - @Override - public void testFinished(Description description) throws Exception { - results.append(lastTest); - } - }); + @Override + public void testFinished(Description description) throws Exception { + results.append(lastTest); + } + }); Result result = core.run(Nested.class); Assert.assertEquals(500, result.getRunCount()); @@ -99,8 +95,7 @@ public class TestMaxFailuresRule extends WithNestedTests { // Make sure we had exactly two failures followed by assumption-failures // resulting from ignored tests. - Assert.assertTrue(results.toString(), - results.toString().matches("(S*F){2}A+")); + Assert.assertTrue(results.toString(), results.toString().matches("(S*F){2}A+")); } @ThreadLeakZombies(Consequence.IGNORE_REMAINING_TESTS) @@ -112,7 +107,7 @@ public class TestMaxFailuresRule extends WithNestedTests { public static CountDownLatch die; public static Thread zombie; public static int testNum; - + @BeforeClass public static void setup() { assert zombie == null; @@ -123,17 +118,20 @@ public class TestMaxFailuresRule extends WithNestedTests { @Repeat(iterations = TOTAL_ITERS) public void testLeaveZombie() { if (++testNum == 2) { - zombie = new Thread() { - @Override - public void run() { - while (true) { - try { - die.await(); - return; - } catch (Exception e) { /* ignore */ } - } - } - }; + zombie = + new Thread() { + @Override + public void run() { + while (true) { + try { + die.await(); + return; + } catch (Exception e) { + /* ignore */ + } + } + } + }; zombie.start(); } } @@ -144,39 +142,40 @@ public class TestMaxFailuresRule extends WithNestedTests { LuceneTestCase.replaceMaxFailureRule(new TestRuleIgnoreAfterMaxFailures(1)); JUnitCore core = new JUnitCore(); final StringBuilder results = new StringBuilder(); - core.addListener(new RunListener() { - char lastTest; + core.addListener( + new RunListener() { + char lastTest; - @Override - public void testStarted(Description description) throws Exception { - lastTest = 'S'; // success. - } + @Override + public void testStarted(Description description) throws Exception { + lastTest = 'S'; // success. + } - @Override - public void testAssumptionFailure(Failure failure) { - lastTest = 'A'; // assumption failure. - } + @Override + public void testAssumptionFailure(Failure failure) { + lastTest = 'A'; // assumption failure. + } - @Override - public void testFailure(Failure failure) throws Exception { - lastTest = 'F'; // failure - System.out.println(failure.getMessage()); - } + @Override + public void testFailure(Failure failure) throws Exception { + lastTest = 'F'; // failure + System.out.println(failure.getMessage()); + } - @Override - public void testFinished(Description description) throws Exception { - results.append(lastTest); - } - }); + @Override + public void testFinished(Description description) throws Exception { + results.append(lastTest); + } + }); Result result = core.run(Nested2.class); if (Nested2.die != null) { Nested2.die.countDown(); Nested2.zombie.join(); } - + super.prevSysOut.println(results.toString()); Assert.assertEquals(Nested2.TOTAL_ITERS, result.getRunCount()); Assert.assertEquals(results.toString(), "SFAAAAAAAA", results.toString()); - } + } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestPleaseFail.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestPleaseFail.java index b019875c6e0..99d3c375fe6 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestPleaseFail.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestPleaseFail.java @@ -19,9 +19,8 @@ package org.apache.lucene.util; import org.junit.Assert; /** - * This test is for manually causing assertion failures or errors (to - * trigger the event for occasional needs of testing the test framework - * itself from command line). + * This test is for manually causing assertion failures or errors (to trigger the event for + * occasional needs of testing the test framework itself from command line). */ public class TestPleaseFail extends LuceneTestCase { public void testFail() { diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestRamUsageTesterOnWildAnimals.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestRamUsageTesterOnWildAnimals.java index 6c40218a201..c810e2f466f 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestRamUsageTesterOnWildAnimals.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestRamUsageTesterOnWildAnimals.java @@ -18,9 +18,7 @@ package org.apache.lucene.util; import org.junit.Assert; -/** - * Check large and special graphs. - */ +/** Check large and special graphs. */ public class TestRamUsageTesterOnWildAnimals extends LuceneTestCase { public static class ListElement { ListElement next; @@ -30,7 +28,7 @@ public class TestRamUsageTesterOnWildAnimals extends LuceneTestCase { int UPPERLIMIT = atLeast(10000); int lower = 0; int upper = UPPERLIMIT; - + while (lower + 1 < upper) { int mid = (lower + upper) / 2; try { @@ -49,5 +47,5 @@ public class TestRamUsageTesterOnWildAnimals extends LuceneTestCase { if (lower + 1 < UPPERLIMIT) { Assert.fail("Max object chain length till stack overflow: " + lower); } - } + } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestReproduceMessage.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestReproduceMessage.java index ad1017cbcec..21f66962a40 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestReproduceMessage.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestReproduceMessage.java @@ -28,13 +28,11 @@ import org.junit.rules.TestRule; import org.junit.runner.JUnitCore; import org.junit.runners.model.Statement; -/** - * Test reproduce message is right. - */ +/** Test reproduce message is right. */ public class TestReproduceMessage extends WithNestedTests { public static SorePoint where; - public static SoreType type; - + public static SoreType type; + public static class Nested extends AbstractNestedTest { @BeforeClass public static void beforeClass() { @@ -44,13 +42,15 @@ public class TestReproduceMessage extends WithNestedTests { } @Rule - public TestRule rule = (base, description) -> new Statement() { - @Override - public void evaluate() throws Throwable { - triggerOn(SorePoint.RULE); - base.evaluate(); - } - }; + public TestRule rule = + (base, description) -> + new Statement() { + @Override + public void evaluate() throws Throwable { + triggerOn(SorePoint.RULE); + base.evaluate(); + } + }; /** Class initializer block/ default constructor. */ public Nested() { @@ -60,24 +60,24 @@ public class TestReproduceMessage extends WithNestedTests { @Before public void before() { triggerOn(SorePoint.BEFORE); - } + } @Test public void test() { triggerOn(SorePoint.TEST); } - + @After public void after() { triggerOn(SorePoint.AFTER); - } + } @AfterClass public static void afterClass() { if (isRunningNested()) { triggerOn(SorePoint.AFTER_CLASS); } - } + } /** */ private static void triggerOn(SorePoint pt) { @@ -99,56 +99,56 @@ public class TestReproduceMessage extends WithNestedTests { /* * ASSUMPTIONS. */ - + public TestReproduceMessage() { super(true); } @Test - public void testAssumeBeforeClass() throws Exception { - type = SoreType.ASSUMPTION; + public void testAssumeBeforeClass() throws Exception { + type = SoreType.ASSUMPTION; where = SorePoint.BEFORE_CLASS; Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test - public void testAssumeInitializer() throws Exception { - type = SoreType.ASSUMPTION; + public void testAssumeInitializer() throws Exception { + type = SoreType.ASSUMPTION; where = SorePoint.INITIALIZER; Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test - public void testAssumeRule() throws Exception { - type = SoreType.ASSUMPTION; + public void testAssumeRule() throws Exception { + type = SoreType.ASSUMPTION; where = SorePoint.RULE; Assert.assertEquals("", runAndReturnSyserr()); } @Test - public void testAssumeBefore() throws Exception { - type = SoreType.ASSUMPTION; + public void testAssumeBefore() throws Exception { + type = SoreType.ASSUMPTION; where = SorePoint.BEFORE; Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test - public void testAssumeTest() throws Exception { - type = SoreType.ASSUMPTION; + public void testAssumeTest() throws Exception { + type = SoreType.ASSUMPTION; where = SorePoint.TEST; Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test - public void testAssumeAfter() throws Exception { - type = SoreType.ASSUMPTION; + public void testAssumeAfter() throws Exception { + type = SoreType.ASSUMPTION; where = SorePoint.AFTER; Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test - public void testAssumeAfterClass() throws Exception { - type = SoreType.ASSUMPTION; + public void testAssumeAfterClass() throws Exception { + type = SoreType.ASSUMPTION; where = SorePoint.AFTER_CLASS; Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @@ -156,17 +156,17 @@ public class TestReproduceMessage extends WithNestedTests { /* * FAILURES */ - + @Test - public void testFailureBeforeClass() throws Exception { - type = SoreType.FAILURE; + public void testFailureBeforeClass() throws Exception { + type = SoreType.FAILURE; where = SorePoint.BEFORE_CLASS; Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); } @Test - public void testFailureInitializer() throws Exception { - type = SoreType.FAILURE; + public void testFailureInitializer() throws Exception { + type = SoreType.FAILURE; where = SorePoint.INITIALIZER; Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); } @@ -177,36 +177,36 @@ public class TestReproduceMessage extends WithNestedTests { } @Test - public void testFailureRule() throws Exception { - type = SoreType.FAILURE; + public void testFailureRule() throws Exception { + type = SoreType.FAILURE; where = SorePoint.RULE; checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".test"); } @Test - public void testFailureBefore() throws Exception { - type = SoreType.FAILURE; + public void testFailureBefore() throws Exception { + type = SoreType.FAILURE; where = SorePoint.BEFORE; checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".test"); } @Test - public void testFailureTest() throws Exception { - type = SoreType.FAILURE; + public void testFailureTest() throws Exception { + type = SoreType.FAILURE; where = SorePoint.TEST; checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".test"); } @Test - public void testFailureAfter() throws Exception { - type = SoreType.FAILURE; + public void testFailureAfter() throws Exception { + type = SoreType.FAILURE; where = SorePoint.AFTER; checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".test"); } @Test - public void testFailureAfterClass() throws Exception { - type = SoreType.FAILURE; + public void testFailureAfterClass() throws Exception { + type = SoreType.FAILURE; where = SorePoint.AFTER_CLASS; Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); } @@ -214,52 +214,52 @@ public class TestReproduceMessage extends WithNestedTests { /* * ERRORS */ - + @Test - public void testErrorBeforeClass() throws Exception { - type = SoreType.ERROR; + public void testErrorBeforeClass() throws Exception { + type = SoreType.ERROR; where = SorePoint.BEFORE_CLASS; Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); } @Test - public void testErrorInitializer() throws Exception { - type = SoreType.ERROR; + public void testErrorInitializer() throws Exception { + type = SoreType.ERROR; where = SorePoint.INITIALIZER; Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); } @Test - public void testErrorRule() throws Exception { - type = SoreType.ERROR; + public void testErrorRule() throws Exception { + type = SoreType.ERROR; where = SorePoint.RULE; checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".test"); } @Test - public void testErrorBefore() throws Exception { - type = SoreType.ERROR; + public void testErrorBefore() throws Exception { + type = SoreType.ERROR; where = SorePoint.BEFORE; checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".test"); } @Test - public void testErrorTest() throws Exception { - type = SoreType.ERROR; + public void testErrorTest() throws Exception { + type = SoreType.ERROR; where = SorePoint.TEST; checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".test"); } @Test - public void testErrorAfter() throws Exception { - type = SoreType.ERROR; + public void testErrorAfter() throws Exception { + type = SoreType.ERROR; where = SorePoint.AFTER; checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".test"); } @Test - public void testErrorAfterClass() throws Exception { - type = SoreType.ERROR; + public void testErrorAfterClass() throws Exception { + type = SoreType.ERROR; where = SorePoint.AFTER_CLASS; Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestReproduceMessageWithRepeated.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestReproduceMessageWithRepeated.java index 00dc38bcdf8..bfe4e6aadb3 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestReproduceMessageWithRepeated.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestReproduceMessageWithRepeated.java @@ -16,14 +16,11 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.junit.Test; import org.junit.runner.JUnitCore; -import com.carrotsearch.randomizedtesting.annotations.Repeat; - -/** - * Test reproduce message is right with {@link Repeat} annotation. - */ +/** Test reproduce message is right with {@link Repeat} annotation. */ public class TestReproduceMessageWithRepeated extends WithNestedTests { public static class Nested extends AbstractNestedTest { @Test @@ -39,7 +36,8 @@ public class TestReproduceMessageWithRepeated extends WithNestedTests { @Test public void testRepeatedMessage() throws Exception { - TestReproduceMessage.checkTestName(runAndReturnSyserr(), Nested.class.getSimpleName() + ".testMe"); + TestReproduceMessage.checkTestName( + runAndReturnSyserr(), Nested.class.getSimpleName() + ".testMe"); } private String runAndReturnSyserr() { diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestRunWithRestrictedPermissions.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestRunWithRestrictedPermissions.java index 103937f5612..0e41b172cf0 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestRunWithRestrictedPermissions.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestRunWithRestrictedPermissions.java @@ -28,27 +28,32 @@ public class TestRunWithRestrictedPermissions extends LuceneTestCase { } public void testNormallyAllowedStuff() throws Exception { - expectThrows(SecurityException.class, () -> runWithRestrictedPermissions(this::doSomeForbiddenStuff)); + expectThrows( + SecurityException.class, () -> runWithRestrictedPermissions(this::doSomeForbiddenStuff)); } public void testCompletelyForbidden1() throws Exception { - expectThrows(SecurityException.class, () -> runWithRestrictedPermissions(this::doSomeCompletelyForbiddenStuff)); + expectThrows( + SecurityException.class, + () -> runWithRestrictedPermissions(this::doSomeCompletelyForbiddenStuff)); } public void testCompletelyForbidden2() throws Exception { - expectThrows(SecurityException.class, () -> - runWithRestrictedPermissions(this::doSomeCompletelyForbiddenStuff, new AllPermission())); + expectThrows( + SecurityException.class, + () -> + runWithRestrictedPermissions( + this::doSomeCompletelyForbiddenStuff, new AllPermission())); } private Void doSomeForbiddenStuff() throws IOException { createTempDir("cannot_create_temp_folder"); return null; // Void } - + // something like this should not never pass!! private Void doSomeCompletelyForbiddenStuff() throws IOException { Files.createFile(Paths.get("denied")); return null; // Void } - } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestSeedFromUncaught.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestSeedFromUncaught.java index 81cf5e6568b..08fba0c4c1f 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestSeedFromUncaught.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestSeedFromUncaught.java @@ -22,20 +22,18 @@ import org.junit.runner.JUnitCore; import org.junit.runner.Result; import org.junit.runner.notification.Failure; -/** - * Check that uncaught exceptions result in seed info being dumped to - * console. - */ +/** Check that uncaught exceptions result in seed info being dumped to console. */ public class TestSeedFromUncaught extends WithNestedTests { public static class ThrowInUncaught extends AbstractNestedTest { @Test public void testFoo() throws Exception { - Thread t = new Thread() { - @Override - public void run() { - throw new RuntimeException("foobar"); - } - }; + Thread t = + new Thread() { + @Override + public void run() { + throw new RuntimeException("foobar"); + } + }; t.start(); t.join(); } @@ -45,9 +43,7 @@ public class TestSeedFromUncaught extends WithNestedTests { super(/* suppress normal output. */ true); } - /** - * Verify super method calls on {@link LuceneTestCase#setUp()}. - */ + /** Verify super method calls on {@link LuceneTestCase#setUp()}. */ @Test public void testUncaughtDumpsSeed() { Result result = JUnitCore.runClasses(ThrowInUncaught.class); diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestSetupTeardownChaining.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestSetupTeardownChaining.java index b2ddbd44e8a..bc6803d84c2 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestSetupTeardownChaining.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestSetupTeardownChaining.java @@ -23,8 +23,7 @@ import org.junit.runner.Result; import org.junit.runner.notification.Failure; /** - * Ensures proper functions of {@link LuceneTestCase#setUp()} - * and {@link LuceneTestCase#tearDown()}. + * Ensures proper functions of {@link LuceneTestCase#setUp()} and {@link LuceneTestCase#tearDown()}. */ public class TestSetupTeardownChaining extends WithNestedTests { public static class NestedSetupChain extends AbstractNestedTest { @@ -35,8 +34,7 @@ public class TestSetupTeardownChaining extends WithNestedTests { } @Test - public void testMe() { - } + public void testMe() {} } public static class NestedTeardownChain extends AbstractNestedTest { @@ -46,35 +44,34 @@ public class TestSetupTeardownChaining extends WithNestedTests { } @Test - public void testMe() { - } + public void testMe() {} } public TestSetupTeardownChaining() { super(true); } - - /** - * Verify super method calls on {@link LuceneTestCase#setUp()}. - */ + + /** Verify super method calls on {@link LuceneTestCase#setUp()}. */ @Test public void testSetupChaining() { Result result = JUnitCore.runClasses(NestedSetupChain.class); Assert.assertEquals(1, result.getFailureCount()); Failure failure = result.getFailures().get(0); - Assert.assertTrue(failure.getMessage() - .contains("One of the overrides of setUp does not propagate the call.")); + Assert.assertTrue( + failure + .getMessage() + .contains("One of the overrides of setUp does not propagate the call.")); } - - /** - * Verify super method calls on {@link LuceneTestCase#tearDown()}. - */ + + /** Verify super method calls on {@link LuceneTestCase#tearDown()}. */ @Test public void testTeardownChaining() { Result result = JUnitCore.runClasses(NestedTeardownChain.class); Assert.assertEquals(1, result.getFailureCount()); Failure failure = result.getFailures().get(0); - Assert.assertTrue(failure.getMessage() - .contains("One of the overrides of tearDown does not propagate the call.")); + Assert.assertTrue( + failure + .getMessage() + .contains("One of the overrides of tearDown does not propagate the call.")); } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestSysoutsLimits.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestSysoutsLimits.java index 76c724b9306..ddeb3e80766 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestSysoutsLimits.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestSysoutsLimits.java @@ -16,25 +16,22 @@ */ package org.apache.lucene.util; -import java.util.stream.Collectors; - import com.carrotsearch.randomizedtesting.RandomizedTest; +import java.util.stream.Collectors; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.JUnitCore; import org.junit.runner.Result; -/** - * @see TestRuleLimitSysouts - */ +/** @see TestRuleLimitSysouts */ public class TestSysoutsLimits extends WithNestedTests { public TestSysoutsLimits() { super(false); } public static class ParentNestedTest extends LuceneTestCase - implements TestRuleIgnoreTestSuites.NestedTestSuite { + implements TestRuleIgnoreTestSuites.NestedTestSuite { @BeforeClass public static void onlyWhenNested() { assumeTrue("Only runs when nested", TestRuleIgnoreTestSuites.isRunningNested()); @@ -53,9 +50,10 @@ public class TestSysoutsLimits extends WithNestedTests { JUnitCore core = new JUnitCore(); Result result = core.run(OverSoftLimit.class); - String msg = result.getFailures().stream() - .map(failure -> failure.getMessage()) - .collect(Collectors.joining("\n")); + String msg = + result.getFailures().stream() + .map(failure -> failure.getMessage()) + .collect(Collectors.joining("\n")); Assert.assertTrue(msg, msg.contains("The test or suite printed 10 bytes")); } @@ -72,9 +70,10 @@ public class TestSysoutsLimits extends WithNestedTests { JUnitCore core = new JUnitCore(); Result result = core.run(UnderLimit.class); - String msg = result.getFailures().stream() - .map(failure -> failure.getMessage()) - .collect(Collectors.joining("\n")); + String msg = + result.getFailures().stream() + .map(failure -> failure.getMessage()) + .collect(Collectors.joining("\n")); Assert.assertTrue(msg, msg.isEmpty()); } @@ -84,9 +83,12 @@ public class TestSysoutsLimits extends WithNestedTests { public void testWrite() { System.out.print("1234567890"); System.out.print("-marker1-"); - System.out.print("-marker2-"); System.out.flush(); - System.out.print("-marker3-"); System.out.flush(); - System.out.print("-marker4-"); System.out.flush(); + System.out.print("-marker2-"); + System.out.flush(); + System.out.print("-marker3-"); + System.out.flush(); + System.out.print("-marker4-"); + System.out.flush(); } } @@ -95,9 +97,10 @@ public class TestSysoutsLimits extends WithNestedTests { JUnitCore core = new JUnitCore(); Result result = core.run(OverHardLimit.class); - String msg = result.getFailures().stream() - .map(failure -> failure.getMessage()) - .collect(Collectors.joining("\n")); + String msg = + result.getFailures().stream() + .map(failure -> failure.getMessage()) + .collect(Collectors.joining("\n")); Assert.assertTrue(msg, msg.contains("Hard limit was enforced")); Assert.assertTrue(msg, msg.contains("The test or suite printed 46 bytes")); diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestWorstCaseTestBehavior.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestWorstCaseTestBehavior.java index c07c8dabb9c..d169408a4a1 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/TestWorstCaseTestBehavior.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestWorstCaseTestBehavior.java @@ -16,24 +16,24 @@ */ package org.apache.lucene.util; -import org.junit.Ignore; - import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.annotations.Timeout; +import org.junit.Ignore; public class TestWorstCaseTestBehavior extends LuceneTestCase { @Ignore public void testThreadLeak() { - Thread t = new Thread() { - @Override - public void run() { - try { - Thread.sleep(10000); - } catch (InterruptedException e) { - // Ignore. - } - } - }; + Thread t = + new Thread() { + @Override + public void run() { + try { + Thread.sleep(10000); + } catch (InterruptedException e) { + // Ignore. + } + } + }; t.start(); while (!t.isAlive()) { @@ -68,29 +68,31 @@ public class TestWorstCaseTestBehavior extends LuceneTestCase { @Ignore public void testUncaughtException() throws Exception { - Thread t = new Thread() { - @Override - public void run() { - throw new RuntimeException("foobar"); - } - }; + Thread t = + new Thread() { + @Override + public void run() { + throw new RuntimeException("foobar"); + } + }; t.start(); t.join(); } - + @Ignore @Timeout(millis = 500) public void testTimeout() throws Exception { Thread.sleep(5000); } - + @Ignore @Timeout(millis = 1000) public void testZombie() throws Exception { while (true) { try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } } } } diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/WithNestedTests.java b/lucene/test-framework/src/test/org/apache/lucene/util/WithNestedTests.java index e3d03df684f..4e88e5c68f7 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/util/WithNestedTests.java +++ b/lucene/test-framework/src/test/org/apache/lucene/util/WithNestedTests.java @@ -16,12 +16,15 @@ */ package org.apache.lucene.util; +import com.carrotsearch.randomizedtesting.RandomizedRunner; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.SysGlobals; +import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.List; - import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks; import org.junit.After; import org.junit.Assert; @@ -34,27 +37,20 @@ import org.junit.rules.TestRule; import org.junit.runner.Result; import org.junit.runner.notification.Failure; -import com.carrotsearch.randomizedtesting.RandomizedRunner; -import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.SysGlobals; -import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; - /** - * An abstract test class that prepares nested test classes to run. - * A nested test class will assume it's executed under control of this - * class and be ignored otherwise. - * - *

    The purpose of this is so that nested test suites don't run from - * IDEs like Eclipse (where they are automatically detected). - * - *

    This class cannot extend {@link LuceneTestCase} because in case - * there's a nested {@link LuceneTestCase} afterclass hooks run twice and - * cause havoc (static fields). + * An abstract test class that prepares nested test classes to run. A nested test class will assume + * it's executed under control of this class and be ignored otherwise. + * + *

    The purpose of this is so that nested test suites don't run from IDEs like Eclipse (where they + * are automatically detected). + * + *

    This class cannot extend {@link LuceneTestCase} because in case there's a nested {@link + * LuceneTestCase} afterclass hooks run twice and cause havoc (static fields). */ public abstract class WithNestedTests { @SuppressSysoutChecks(bugUrl = "WithNestedTests has its own stream capture.") - public static abstract class AbstractNestedTest extends LuceneTestCase - implements TestRuleIgnoreTestSuites.NestedTestSuite { + public abstract static class AbstractNestedTest extends LuceneTestCase + implements TestRuleIgnoreTestSuites.NestedTestSuite { protected static boolean isRunningNested() { return TestRuleIgnoreTestSuites.isRunningNested(); } @@ -65,68 +61,72 @@ public abstract class WithNestedTests { protected WithNestedTests(boolean suppressOutputStreams) { this.suppressOutputStreams = suppressOutputStreams; } - + protected PrintStream prevSysErr; protected PrintStream prevSysOut; private ByteArrayOutputStream sysout; private ByteArrayOutputStream syserr; @ClassRule - public static final TestRule classRules = RuleChain.outerRule(new TestRuleAdapter() { - private TestRuleIgnoreAfterMaxFailures prevRule; + public static final TestRule classRules = + RuleChain.outerRule( + new TestRuleAdapter() { + private TestRuleIgnoreAfterMaxFailures prevRule; - protected void before() throws Throwable { - if (!isPropertyEmpty(SysGlobals.SYSPROP_TESTFILTER()) || - !isPropertyEmpty(SysGlobals.SYSPROP_TESTCLASS()) || - !isPropertyEmpty(SysGlobals.SYSPROP_TESTMETHOD()) || - !isPropertyEmpty(SysGlobals.SYSPROP_ITERATIONS())) { - // We're running with a complex test filter that is properly handled by classes - // which are executed by RandomizedRunner. The "outer" classes testing LuceneTestCase - // itself are executed by the default JUnit runner and would be always executed. - // We thus always skip execution if any filtering is detected. - Assume.assumeTrue(false); - } - - // Check zombie threads from previous suites. Don't run if zombies are around. - RandomizedTest.assumeFalse(RandomizedRunner.hasZombieThreads()); + protected void before() throws Throwable { + if (!isPropertyEmpty(SysGlobals.SYSPROP_TESTFILTER()) + || !isPropertyEmpty(SysGlobals.SYSPROP_TESTCLASS()) + || !isPropertyEmpty(SysGlobals.SYSPROP_TESTMETHOD()) + || !isPropertyEmpty(SysGlobals.SYSPROP_ITERATIONS())) { + // We're running with a complex test filter that is properly handled by classes + // which are executed by RandomizedRunner. The "outer" classes testing + // LuceneTestCase + // itself are executed by the default JUnit runner and would be always executed. + // We thus always skip execution if any filtering is detected. + Assume.assumeTrue(false); + } - TestRuleIgnoreAfterMaxFailures newRule = new TestRuleIgnoreAfterMaxFailures(Integer.MAX_VALUE); - prevRule = LuceneTestCase.replaceMaxFailureRule(newRule); - RandomizedTest.assumeFalse(FailureMarker.hadFailures()); - } + // Check zombie threads from previous suites. Don't run if zombies are around. + RandomizedTest.assumeFalse(RandomizedRunner.hasZombieThreads()); - protected void afterAlways(List errors) throws Throwable { - if (prevRule != null) { - LuceneTestCase.replaceMaxFailureRule(prevRule); - } - FailureMarker.resetFailures(); - } + TestRuleIgnoreAfterMaxFailures newRule = + new TestRuleIgnoreAfterMaxFailures(Integer.MAX_VALUE); + prevRule = LuceneTestCase.replaceMaxFailureRule(newRule); + RandomizedTest.assumeFalse(FailureMarker.hadFailures()); + } - private boolean isPropertyEmpty(String propertyName) { - String value = System.getProperty(propertyName); - return value == null || value.trim().isEmpty(); - } - }); + protected void afterAlways(List errors) throws Throwable { + if (prevRule != null) { + LuceneTestCase.replaceMaxFailureRule(prevRule); + } + FailureMarker.resetFailures(); + } + + private boolean isPropertyEmpty(String propertyName) { + String value = System.getProperty(propertyName); + return value == null || value.trim().isEmpty(); + } + }); + + /** Restore properties after test. */ + @Rule public final TestRule rules; - /** - * Restore properties after test. - */ - @Rule - public final TestRule rules; { final TestRuleMarkFailure marker = new TestRuleMarkFailure(); - rules = RuleChain - .outerRule(new TestRuleRestoreSystemProperties(TestRuleIgnoreTestSuites.PROPERTY_RUN_NESTED)) - .around(new TestRuleAdapter() { - @Override - protected void afterAlways(List errors) throws Throwable { - if (marker.hadFailures() && suppressOutputStreams) { - System.out.println("sysout from nested test: " + getSysOut() + "\n"); - System.out.println("syserr from nested test: " + getSysErr()); - } - } - }) - .around(marker); + rules = + RuleChain.outerRule( + new TestRuleRestoreSystemProperties(TestRuleIgnoreTestSuites.PROPERTY_RUN_NESTED)) + .around( + new TestRuleAdapter() { + @Override + protected void afterAlways(List errors) throws Throwable { + if (marker.hadFailures() && suppressOutputStreams) { + System.out.println("sysout from nested test: " + getSysOut() + "\n"); + System.out.println("syserr from nested test: " + getSysErr()); + } + } + }) + .around(marker); } @Before @@ -169,8 +169,14 @@ public abstract class WithNestedTests { b.append("\n"); b.append(f.getTrace()); } - Assert.assertFalse("Expected failures: " + expected + " but was " + - result.getFailureCount() + ", failures below: " + b.toString(), true); + Assert.assertFalse( + "Expected failures: " + + expected + + " but was " + + result.getFailureCount() + + ", failures below: " + + b.toString(), + true); } } @@ -184,5 +190,5 @@ public abstract class WithNestedTests { Assert.assertTrue(suppressOutputStreams); System.err.flush(); return new String(syserr.toByteArray(), StandardCharsets.UTF_8); - } + } }